xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision 4da3aa2f3126a6cd4b4bbde88727b583cddd4ee7)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <petsc/private/matimpl.h>
5 #include <../src/ksp/pc/impls/gamg/gamg.h>           /*I "petscpc.h" I*/
6 #include <../src/ksp/pc/impls/bjacobi/bjacobi.h> /* Hack to access same_local_solves */
7 #include <../src/ksp/ksp/impls/cheby/chebyshevimpl.h>    /*I "petscksp.h" I*/
8 
9 #if defined PETSC_GAMG_USE_LOG
10 PetscLogEvent petsc_gamg_setup_events[NUM_SET];
11 #endif
12 
13 #if defined PETSC_USE_LOG
14 PetscLogEvent PC_GAMGGraph_AGG;
15 PetscLogEvent PC_GAMGGraph_GEO;
16 PetscLogEvent PC_GAMGCoarsen_AGG;
17 PetscLogEvent PC_GAMGCoarsen_GEO;
18 PetscLogEvent PC_GAMGProlongator_AGG;
19 PetscLogEvent PC_GAMGProlongator_GEO;
20 PetscLogEvent PC_GAMGOptProlongator_AGG;
21 #endif
22 
23 /* #define GAMG_STAGES */
24 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
25 static PetscLogStage gamg_stages[PETSC_MG_MAXLEVELS];
26 #endif
27 
28 static PetscFunctionList GAMGList = 0;
29 static PetscBool PCGAMGPackageInitialized;
30 
31 /* ----------------------------------------------------------------------------- */
32 PetscErrorCode PCReset_GAMG(PC pc)
33 {
34   PetscErrorCode ierr, level;
35   PC_MG          *mg      = (PC_MG*)pc->data;
36   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
37 
38   PetscFunctionBegin;
39   ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
40   pc_gamg->data_sz = 0;
41   ierr = PetscFree(pc_gamg->orig_data);CHKERRQ(ierr);
42   for (level = 0; level < PETSC_MG_MAXLEVELS ; level++) {
43     mg->min_eigen_DinvA[level] = 0;
44     mg->max_eigen_DinvA[level] = 0;
45   }
46   pc_gamg->emin = 0;
47   pc_gamg->emax = 0;
48   PetscFunctionReturn(0);
49 }
50 
51 /* -------------------------------------------------------------------------- */
52 /*
53    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
54      of active processors.
55 
56    Input Parameter:
57    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
58           'pc_gamg->data_sz' are changed via repartitioning/reduction.
59    . Amat_fine - matrix on this fine (k) level
60    . cr_bs - coarse block size
61    In/Output Parameter:
62    . a_P_inout - prolongation operator to the next level (k-->k-1)
63    . a_nactive_proc - number of active procs
64    Output Parameter:
65    . a_Amat_crs - coarse matrix that is created (k-1)
66 */
67 
68 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc,Mat Amat_fine,PetscInt cr_bs,Mat *a_P_inout,Mat *a_Amat_crs,PetscMPIInt *a_nactive_proc,IS * Pcolumnperm, PetscBool is_last)
69 {
70   PetscErrorCode  ierr;
71   PC_MG           *mg         = (PC_MG*)pc->data;
72   PC_GAMG         *pc_gamg    = (PC_GAMG*)mg->innerctx;
73   Mat             Cmat,Pold=*a_P_inout;
74   MPI_Comm        comm;
75   PetscMPIInt     rank,size,new_size,nactive=*a_nactive_proc;
76   PetscInt        ncrs_eq,ncrs,f_bs;
77 
78   PetscFunctionBegin;
79   ierr = PetscObjectGetComm((PetscObject)Amat_fine,&comm);CHKERRQ(ierr);
80   ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
81   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
82   ierr = MatGetBlockSize(Amat_fine, &f_bs);CHKERRQ(ierr);
83   ierr = MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat);CHKERRQ(ierr);
84 
85   if (Pcolumnperm) *Pcolumnperm = NULL;
86 
87   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
88   ierr = MatGetLocalSize(Cmat, &ncrs_eq, NULL);CHKERRQ(ierr);
89   if (pc_gamg->data_cell_rows>0) {
90     ncrs = pc_gamg->data_sz/pc_gamg->data_cell_cols/pc_gamg->data_cell_rows;
91   } else {
92     PetscInt  bs;
93     ierr = MatGetBlockSize(Cmat, &bs);CHKERRQ(ierr);
94     ncrs = ncrs_eq/bs;
95   }
96   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
97   if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) new_size = 1;
98   else {
99     PetscInt ncrs_eq_glob;
100     ierr     = MatGetSize(Cmat, &ncrs_eq_glob, NULL);CHKERRQ(ierr);
101     new_size = (PetscMPIInt)((float)ncrs_eq_glob/(float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
102     if (!new_size) new_size = 1; /* not likely, posible? */
103     else if (new_size >= nactive) new_size = nactive; /* no change, rare */
104   }
105 
106   if (new_size==nactive) {
107     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
108     if (new_size < size) {
109       /* odd case where multiple coarse grids are on one processor or no coarsening ... */
110       ierr = PetscInfo1(pc,"reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n",nactive);CHKERRQ(ierr);
111       if (pc_gamg->cpu_pin_coarse_grids) {
112         ierr = MatPinToCPU(*a_Amat_crs,PETSC_TRUE);CHKERRQ(ierr);
113         ierr = MatPinToCPU(*a_P_inout,PETSC_TRUE);CHKERRQ(ierr);
114       }
115     }
116     /* we know that the grid structure can be reused in MatPtAP */
117   } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
118     PetscInt       *counts,*newproc_idx,ii,jj,kk,strideNew,*tidx,ncrs_new,ncrs_eq_new,nloc_old,expand_factor=1,rfactor=1;
119     IS             is_eq_newproc,is_eq_num,is_eq_num_prim,new_eq_indices;
120     nloc_old = ncrs_eq/cr_bs;
121     if (ncrs_eq % cr_bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ncrs_eq %D not divisible by cr_bs %D",ncrs_eq,cr_bs);
122     /* get new_size and rfactor */
123     if (pc_gamg->layout_type==PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
124       /* find factor */
125       if (new_size == 1) rfactor = size; /* don't modify */
126       else {
127         PetscReal best_fact = 0.;
128         jj = -1;
129         for (kk = 1 ; kk <= size ; kk++) {
130           if (!(size%kk)) { /* a candidate */
131             PetscReal nactpe = (PetscReal)size/(PetscReal)kk, fact = nactpe/(PetscReal)new_size;
132             if (fact > 1.0) fact = 1./fact; /* keep fact < 1 */
133             if (fact > best_fact) {
134               best_fact = fact; jj = kk;
135             }
136           }
137         }
138         if (jj != -1) rfactor = jj;
139         else rfactor = 1; /* a prime */
140         if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
141         else expand_factor = rfactor;
142       }
143       new_size = size/rfactor; /* make new size one that is factor */
144       if (new_size==nactive) { /* no repartitioning or reduction, bail out because nested here (rare) */
145         *a_Amat_crs = Cmat;
146         ierr = PetscInfo2(pc,"Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%D\n",new_size,ncrs_eq);CHKERRQ(ierr);
147         PetscFunctionReturn(0);
148       }
149     }
150 #if defined PETSC_GAMG_USE_LOG
151     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
152 #endif
153     /* make 'is_eq_newproc' */
154     ierr = PetscMalloc1(size, &counts);CHKERRQ(ierr);
155     if (pc_gamg->repart) {
156       /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates of primal part accordingly */
157       Mat      adj;
158       ierr = PetscInfo4(pc,"Repartition: size (active): %d --> %d, %D local equations, using %s process layout\n",*a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread");CHKERRQ(ierr);
159       /* get 'adj' */
160       if (cr_bs == 1) {
161         ierr = MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
162       } else {
163         /* make a scalar matrix to partition (no Stokes here) */
164         Mat               tMat;
165         PetscInt          Istart_crs,Iend_crs,ncols,jj,Ii;
166         const PetscScalar *vals;
167         const PetscInt    *idx;
168         PetscInt          *d_nnz, *o_nnz, M, N;
169         static PetscInt   llev = 0; /* ugly but just used for debugging */
170         MatType           mtype;
171 
172         ierr = PetscMalloc2(ncrs, &d_nnz,ncrs, &o_nnz);CHKERRQ(ierr);
173         ierr = MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs);CHKERRQ(ierr);
174         ierr = MatGetSize(Cmat, &M, &N);CHKERRQ(ierr);
175         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
176           ierr      = MatGetRow(Cmat,Ii,&ncols,0,0);CHKERRQ(ierr);
177           d_nnz[jj] = ncols/cr_bs;
178           o_nnz[jj] = ncols/cr_bs;
179           ierr      = MatRestoreRow(Cmat,Ii,&ncols,0,0);CHKERRQ(ierr);
180           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
181           if (o_nnz[jj] > (M/cr_bs-ncrs)) o_nnz[jj] = M/cr_bs-ncrs;
182         }
183 
184         ierr = MatGetType(Amat_fine,&mtype);CHKERRQ(ierr);
185         ierr = MatCreate(comm, &tMat);CHKERRQ(ierr);
186         ierr = MatSetSizes(tMat, ncrs, ncrs,PETSC_DETERMINE, PETSC_DETERMINE);CHKERRQ(ierr);
187         ierr = MatSetType(tMat,mtype);CHKERRQ(ierr);
188         ierr = MatSeqAIJSetPreallocation(tMat,0,d_nnz);CHKERRQ(ierr);
189         ierr = MatMPIAIJSetPreallocation(tMat,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
190         ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr);
191 
192         for (ii = Istart_crs; ii < Iend_crs; ii++) {
193           PetscInt dest_row = ii/cr_bs;
194           ierr = MatGetRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
195           for (jj = 0; jj < ncols; jj++) {
196             PetscInt    dest_col = idx[jj]/cr_bs;
197             PetscScalar v        = 1.0;
198             ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES);CHKERRQ(ierr);
199           }
200           ierr = MatRestoreRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
201         }
202         ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
203         ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
204 
205         if (llev++ == -1) {
206           PetscViewer viewer; char fname[32];
207           ierr = PetscSNPrintf(fname,sizeof(fname),"part_mat_%D.mat",llev);CHKERRQ(ierr);
208           PetscViewerBinaryOpen(comm,fname,FILE_MODE_WRITE,&viewer);
209           ierr = MatView(tMat, viewer);CHKERRQ(ierr);
210           ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
211         }
212         ierr = MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
213         ierr = MatDestroy(&tMat);CHKERRQ(ierr);
214       } /* create 'adj' */
215 
216       { /* partition: get newproc_idx */
217         char            prefix[256];
218         const char      *pcpre;
219         const PetscInt  *is_idx;
220         MatPartitioning mpart;
221         IS              proc_is;
222 
223         ierr = MatPartitioningCreate(comm, &mpart);CHKERRQ(ierr);
224         ierr = MatPartitioningSetAdjacency(mpart, adj);CHKERRQ(ierr);
225         ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
226         ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
227         ierr = PetscObjectSetOptionsPrefix((PetscObject)mpart,prefix);CHKERRQ(ierr);
228         ierr = MatPartitioningSetFromOptions(mpart);CHKERRQ(ierr);
229         ierr = MatPartitioningSetNParts(mpart, new_size);CHKERRQ(ierr);
230         ierr = MatPartitioningApply(mpart, &proc_is);CHKERRQ(ierr);
231         ierr = MatPartitioningDestroy(&mpart);CHKERRQ(ierr);
232 
233         /* collect IS info */
234         ierr     = PetscMalloc1(ncrs_eq, &newproc_idx);CHKERRQ(ierr);
235         ierr     = ISGetIndices(proc_is, &is_idx);CHKERRQ(ierr);
236         for (kk = jj = 0 ; kk < nloc_old ; kk++) {
237           for (ii = 0 ; ii < cr_bs ; ii++, jj++) {
238             newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */
239           }
240         }
241         ierr = ISRestoreIndices(proc_is, &is_idx);CHKERRQ(ierr);
242         ierr = ISDestroy(&proc_is);CHKERRQ(ierr);
243       }
244       ierr = MatDestroy(&adj);CHKERRQ(ierr);
245 
246       ierr = ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc);CHKERRQ(ierr);
247       ierr = PetscFree(newproc_idx);CHKERRQ(ierr);
248     } else { /* simple aggregation of parts -- 'is_eq_newproc' */
249       PetscInt targetPE;
250       if (new_size==nactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"new_size==nactive. Should not happen");
251       ierr = PetscInfo1(pc,"Number of equations (loc) %D with simple aggregation\n",ncrs_eq);CHKERRQ(ierr);
252       targetPE = (rank/rfactor)*expand_factor;
253       ierr     = ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc);CHKERRQ(ierr);
254     } /* end simple 'is_eq_newproc' */
255 
256     /*
257       Create an index set from the is_eq_newproc index set to indicate the mapping TO
258     */
259     ierr = ISPartitioningToNumbering(is_eq_newproc, &is_eq_num);CHKERRQ(ierr);
260     is_eq_num_prim = is_eq_num;
261     /*
262       Determine how many equations/vertices are assigned to each processor
263     */
264     ierr        = ISPartitioningCount(is_eq_newproc, size, counts);CHKERRQ(ierr);
265     ncrs_eq_new = counts[rank];
266     ierr        = ISDestroy(&is_eq_newproc);CHKERRQ(ierr);
267     ncrs_new = ncrs_eq_new/cr_bs;
268 
269     ierr = PetscFree(counts);CHKERRQ(ierr);
270     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxilary data into some complex abstracted thing */
271     {
272       Vec            src_crd, dest_crd;
273       const PetscInt *idx,ndata_rows=pc_gamg->data_cell_rows,ndata_cols=pc_gamg->data_cell_cols,node_data_sz=ndata_rows*ndata_cols;
274       VecScatter     vecscat;
275       PetscScalar    *array;
276       IS isscat;
277       /* move data (for primal equations only) */
278       /* Create a vector to contain the newly ordered element information */
279       ierr = VecCreate(comm, &dest_crd);CHKERRQ(ierr);
280       ierr = VecSetSizes(dest_crd, node_data_sz*ncrs_new, PETSC_DECIDE);CHKERRQ(ierr);
281       ierr = VecSetType(dest_crd,VECSTANDARD);CHKERRQ(ierr); /* this is needed! */
282       /*
283 	There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
284 	a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
285       */
286       ierr = PetscMalloc1(ncrs*node_data_sz, &tidx);CHKERRQ(ierr);
287       ierr = ISGetIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
288       for (ii=0,jj=0; ii<ncrs; ii++) {
289 	PetscInt id = idx[ii*cr_bs]/cr_bs; /* get node back */
290 	for (kk=0; kk<node_data_sz; kk++, jj++) tidx[jj] = id*node_data_sz + kk;
291       }
292       ierr = ISRestoreIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
293       ierr = ISCreateGeneral(comm, node_data_sz*ncrs, tidx, PETSC_COPY_VALUES, &isscat);CHKERRQ(ierr);
294       ierr = PetscFree(tidx);CHKERRQ(ierr);
295       /*
296 	Create a vector to contain the original vertex information for each element
297       */
298       ierr = VecCreateSeq(PETSC_COMM_SELF, node_data_sz*ncrs, &src_crd);CHKERRQ(ierr);
299       for (jj=0; jj<ndata_cols; jj++) {
300 	const PetscInt stride0=ncrs*pc_gamg->data_cell_rows;
301 	for (ii=0; ii<ncrs; ii++) {
302 	  for (kk=0; kk<ndata_rows; kk++) {
303 	    PetscInt    ix = ii*ndata_rows + kk + jj*stride0, jx = ii*node_data_sz + kk*ndata_cols + jj;
304 	    PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
305 	    ierr = VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES);CHKERRQ(ierr);
306 	  }
307 	}
308       }
309       ierr = VecAssemblyBegin(src_crd);CHKERRQ(ierr);
310       ierr = VecAssemblyEnd(src_crd);CHKERRQ(ierr);
311       /*
312 	Scatter the element vertex information (still in the original vertex ordering)
313 	to the correct processor
314       */
315       ierr = VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat);CHKERRQ(ierr);
316       ierr = ISDestroy(&isscat);CHKERRQ(ierr);
317       ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
318       ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
319       ierr = VecScatterDestroy(&vecscat);CHKERRQ(ierr);
320       ierr = VecDestroy(&src_crd);CHKERRQ(ierr);
321       /*
322 	Put the element vertex data into a new allocation of the gdata->ele
323       */
324       ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
325       ierr = PetscMalloc1(node_data_sz*ncrs_new, &pc_gamg->data);CHKERRQ(ierr);
326 
327       pc_gamg->data_sz = node_data_sz*ncrs_new;
328       strideNew        = ncrs_new*ndata_rows;
329 
330       ierr = VecGetArray(dest_crd, &array);CHKERRQ(ierr);
331       for (jj=0; jj<ndata_cols; jj++) {
332 	for (ii=0; ii<ncrs_new; ii++) {
333 	  for (kk=0; kk<ndata_rows; kk++) {
334 	    PetscInt ix = ii*ndata_rows + kk + jj*strideNew, jx = ii*node_data_sz + kk*ndata_cols + jj;
335 	    pc_gamg->data[ix] = PetscRealPart(array[jx]);
336 	  }
337 	}
338       }
339       ierr = VecRestoreArray(dest_crd, &array);CHKERRQ(ierr);
340       ierr = VecDestroy(&dest_crd);CHKERRQ(ierr);
341     }
342     /* move A and P (columns) with new layout */
343 #if defined PETSC_GAMG_USE_LOG
344     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
345 #endif
346     /*
347       Invert for MatCreateSubMatrix
348     */
349     ierr = ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices);CHKERRQ(ierr);
350     ierr = ISSort(new_eq_indices);CHKERRQ(ierr); /* is this needed? */
351     ierr = ISSetBlockSize(new_eq_indices, cr_bs);CHKERRQ(ierr);
352     if (is_eq_num != is_eq_num_prim) {
353       ierr = ISDestroy(&is_eq_num_prim);CHKERRQ(ierr); /* could be same as 'is_eq_num' */
354     }
355     if (Pcolumnperm) {
356       ierr = PetscObjectReference((PetscObject)new_eq_indices);CHKERRQ(ierr);
357       *Pcolumnperm = new_eq_indices;
358     }
359     ierr = ISDestroy(&is_eq_num);CHKERRQ(ierr);
360 #if defined PETSC_GAMG_USE_LOG
361     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
362     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
363 #endif
364     /* 'a_Amat_crs' output */
365     {
366       Mat mat;
367       ierr        = MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat);CHKERRQ(ierr);
368       *a_Amat_crs = mat;
369     }
370     ierr = MatDestroy(&Cmat);CHKERRQ(ierr);
371 
372 #if defined PETSC_GAMG_USE_LOG
373     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
374 #endif
375     /* prolongator */
376     {
377       IS       findices;
378       PetscInt Istart,Iend;
379       Mat      Pnew;
380 
381       ierr = MatGetOwnershipRange(Pold, &Istart, &Iend);CHKERRQ(ierr);
382 #if defined PETSC_GAMG_USE_LOG
383       ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
384 #endif
385       ierr = ISCreateStride(comm,Iend-Istart,Istart,1,&findices);CHKERRQ(ierr);
386       ierr = ISSetBlockSize(findices,f_bs);CHKERRQ(ierr);
387       ierr = MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew);CHKERRQ(ierr);
388       ierr = ISDestroy(&findices);CHKERRQ(ierr);
389 
390 #if defined PETSC_GAMG_USE_LOG
391       ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
392 #endif
393       ierr = MatDestroy(a_P_inout);CHKERRQ(ierr);
394 
395       /* output - repartitioned */
396       *a_P_inout = Pnew;
397     }
398     ierr = ISDestroy(&new_eq_indices);CHKERRQ(ierr);
399 
400     *a_nactive_proc = new_size; /* output */
401 
402     /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
403     if (pc_gamg->cpu_pin_coarse_grids) {
404 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
405       static PetscInt llev = 2;
406       ierr = PetscInfo1(pc,"Pinning level %D to the CPU\n",llev++);CHKERRQ(ierr);
407 #endif
408       ierr = MatPinToCPU(*a_Amat_crs,PETSC_TRUE);CHKERRQ(ierr);
409       ierr = MatPinToCPU(*a_P_inout,PETSC_TRUE);CHKERRQ(ierr);
410       if (1) { /* lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
411         Mat         A = *a_Amat_crs, P = *a_P_inout;
412         PetscMPIInt size;
413         ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
414         if (size > 1) {
415           Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data, *p = (Mat_MPIAIJ*)P->data;
416           ierr = VecPinToCPU(a->lvec,PETSC_TRUE);CHKERRQ(ierr);
417           ierr = VecPinToCPU(p->lvec,PETSC_TRUE);CHKERRQ(ierr);
418         }
419       }
420     }
421 #if defined PETSC_GAMG_USE_LOG
422     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
423 #endif
424   }
425   PetscFunctionReturn(0);
426 }
427 
428 /* -------------------------------------------------------------------------- */
429 /*
430    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
431                     by setting data structures and options.
432 
433    Input Parameter:
434 .  pc - the preconditioner context
435 
436 */
437 PetscErrorCode PCSetUp_GAMG(PC pc)
438 {
439   PetscErrorCode ierr;
440   PC_MG          *mg      = (PC_MG*)pc->data;
441   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
442   Mat            Pmat     = pc->pmat;
443   PetscInt       fine_level,level,level1,bs,M,N,qq,lidx,nASMBlocksArr[PETSC_MG_MAXLEVELS];
444   MPI_Comm       comm;
445   PetscMPIInt    rank,size,nactivepe;
446   Mat            Aarr[PETSC_MG_MAXLEVELS],Parr[PETSC_MG_MAXLEVELS];
447   IS             *ASMLocalIDsArr[PETSC_MG_MAXLEVELS];
448   PetscLogDouble nnz0=0.,nnztot=0.;
449   MatInfo        info;
450   PetscBool      is_last = PETSC_FALSE;
451 
452   PetscFunctionBegin;
453   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
454   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
455   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
456 
457   if (pc_gamg->setup_count++ > 0) {
458     if ((PetscBool)(!pc_gamg->reuse_prol)) {
459       /* reset everything */
460       ierr = PCReset_MG(pc);CHKERRQ(ierr);
461       pc->setupcalled = 0;
462     } else {
463       PC_MG_Levels **mglevels = mg->levels;
464       /* just do Galerkin grids */
465       Mat          B,dA,dB;
466 
467       if (!pc->setupcalled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCSetUp() has not been called yet");
468       if (pc_gamg->Nlevels > 1) {
469         /* currently only handle case where mat and pmat are the same on coarser levels */
470         ierr = KSPGetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,&dA,&dB);CHKERRQ(ierr);
471         /* (re)set to get dirty flag */
472         ierr = KSPSetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,dA,dB);CHKERRQ(ierr);
473 
474         for (level=pc_gamg->Nlevels-2; level>=0; level--) {
475           /* 2nd solve, matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
476           if (pc_gamg->setup_count==2 /* && pc_gamg->repart||reduction */) {
477             ierr = PetscInfo2(pc,"new RAP after first solve level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
478             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_INITIAL_MATRIX,2.0,&B);CHKERRQ(ierr);
479             ierr = MatDestroy(&mglevels[level]->A);CHKERRQ(ierr);
480             mglevels[level]->A = B;
481           } else {
482             ierr = PetscInfo2(pc,"RAP after first solve reusing matrix level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
483             ierr = KSPGetOperators(mglevels[level]->smoothd,NULL,&B);CHKERRQ(ierr);
484             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_REUSE_MATRIX,1.0,&B);CHKERRQ(ierr);
485           }
486           ierr = KSPSetOperators(mglevels[level]->smoothd,B,B);CHKERRQ(ierr);
487           dB   = B;
488         }
489       }
490 
491       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
492       PetscFunctionReturn(0);
493     }
494   }
495 
496   if (!pc_gamg->data) {
497     if (pc_gamg->orig_data) {
498       ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
499       ierr = MatGetLocalSize(Pmat, &qq, NULL);CHKERRQ(ierr);
500 
501       pc_gamg->data_sz        = (qq/bs)*pc_gamg->orig_data_cell_rows*pc_gamg->orig_data_cell_cols;
502       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
503       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
504 
505       ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data);CHKERRQ(ierr);
506       for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
507     } else {
508       if (!pc_gamg->ops->createdefaultdata) SETERRQ(comm,PETSC_ERR_PLIB,"'createdefaultdata' not set(?) need to support NULL data");
509       ierr = pc_gamg->ops->createdefaultdata(pc,Pmat);CHKERRQ(ierr);
510     }
511   }
512 
513   /* cache original data for reuse */
514   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
515     ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data);CHKERRQ(ierr);
516     for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
517     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
518     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
519   }
520 
521   /* get basic dims */
522   ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
523   ierr = MatGetSize(Pmat, &M, &N);CHKERRQ(ierr);
524 
525   ierr = MatGetInfo(Pmat,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr); /* global reduction */
526   nnz0   = info.nz_used;
527   nnztot = info.nz_used;
528   ierr = PetscInfo6(pc,"level %d) N=%D, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n",0,M,pc_gamg->data_cell_rows,pc_gamg->data_cell_cols,(int)(nnz0/(PetscReal)M+0.5),size);CHKERRQ(ierr);
529 
530   /* Get A_i and R_i */
531   for (level=0, Aarr[0]=Pmat, nactivepe = size; level < (pc_gamg->Nlevels-1) && (!level || M>pc_gamg->coarse_eq_limit); level++) {
532     pc_gamg->current_level = level;
533     if (level >= PETSC_MG_MAXLEVELS) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Too many levels %D",level);
534     level1 = level + 1;
535 #if defined PETSC_GAMG_USE_LOG
536     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
537 #if (defined GAMG_STAGES)
538     ierr = PetscLogStagePush(gamg_stages[level]);CHKERRQ(ierr);
539 #endif
540 #endif
541     { /* construct prolongator */
542       Mat              Gmat;
543       PetscCoarsenData *agg_lists;
544       Mat              Prol11;
545 
546       ierr = pc_gamg->ops->graph(pc,Aarr[level], &Gmat);CHKERRQ(ierr);
547       ierr = pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists);CHKERRQ(ierr);
548       ierr = pc_gamg->ops->prolongator(pc,Aarr[level],Gmat,agg_lists,&Prol11);CHKERRQ(ierr);
549 
550       /* could have failed to create new level */
551       if (Prol11) {
552         /* get new block size of coarse matrices */
553         ierr = MatGetBlockSizes(Prol11, NULL, &bs);CHKERRQ(ierr);
554 
555         if (pc_gamg->ops->optprolongator) {
556           /* smooth */
557           ierr = pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11);CHKERRQ(ierr);
558         }
559 
560         if (pc_gamg->use_aggs_in_asm) {
561           PetscInt bs;
562           ierr = MatGetBlockSizes(Prol11, &bs, NULL);CHKERRQ(ierr);
563           ierr = PetscCDGetASMBlocks(agg_lists, bs, Gmat, &nASMBlocksArr[level], &ASMLocalIDsArr[level]);CHKERRQ(ierr);
564         }
565 
566         Parr[level1] = Prol11;
567       } else Parr[level1] = NULL; /* failed to coarsen */
568 
569       ierr = MatDestroy(&Gmat);CHKERRQ(ierr);
570       ierr = PetscCDDestroy(agg_lists);CHKERRQ(ierr);
571     } /* construct prolongator scope */
572 #if defined PETSC_GAMG_USE_LOG
573     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
574 #endif
575     if (!level) Aarr[0] = Pmat; /* use Pmat for finest level setup */
576     if (!Parr[level1]) { /* failed to coarsen */
577       ierr =  PetscInfo1(pc,"Stop gridding, level %D\n",level);CHKERRQ(ierr);
578 #if defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES
579       ierr = PetscLogStagePop();CHKERRQ(ierr);
580 #endif
581       break;
582     }
583 #if defined PETSC_GAMG_USE_LOG
584     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
585 #endif
586     ierr = MatGetSize(Parr[level1], &M, &N);CHKERRQ(ierr); /* N is next M, a loop test variables */
587     if (is_last) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Is last ????????");
588     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
589     if (level1 == pc_gamg->Nlevels-1) is_last = PETSC_TRUE;
590     ierr = pc_gamg->ops->createlevel(pc, Aarr[level], bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last);CHKERRQ(ierr);
591 
592 #if defined PETSC_GAMG_USE_LOG
593     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
594 #endif
595     ierr = MatGetSize(Aarr[level1], &M, &N);CHKERRQ(ierr); /* M is loop test variables */
596     ierr = MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info);CHKERRQ(ierr);
597     nnztot += info.nz_used;
598     ierr = PetscInfo5(pc,"%d) N=%D, n data cols=%d, nnz/row (ave)=%d, %d active pes\n",level1,M,pc_gamg->data_cell_cols,(int)(info.nz_used/(PetscReal)M),nactivepe);CHKERRQ(ierr);
599 
600 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
601     ierr = PetscLogStagePop();CHKERRQ(ierr);
602 #endif
603     /* stop if one node or one proc -- could pull back for singular problems */
604     if ( (pc_gamg->data_cell_cols && M/pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M/bs < 2) ) {
605       ierr =  PetscInfo2(pc,"HARD stop of coarsening on level %D.  Grid too small: %D block nodes\n",level,M/bs);CHKERRQ(ierr);
606       level++;
607       break;
608     }
609   } /* levels */
610   ierr                  = PetscFree(pc_gamg->data);CHKERRQ(ierr);
611 
612   ierr = PetscInfo2(pc,"%D levels, grid complexity = %g\n",level+1,nnztot/nnz0);CHKERRQ(ierr);
613   pc_gamg->Nlevels = level + 1;
614   fine_level       = level;
615   ierr             = PCMGSetLevels(pc,pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
616 
617   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
618     /* set default smoothers & set operators */
619     for (lidx = 1, level = pc_gamg->Nlevels-2; lidx <= fine_level; lidx++, level--) {
620       KSP smoother;
621       PC  subpc;
622 
623       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
624       ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
625 
626       ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
627       /* set ops */
628       ierr = KSPSetOperators(smoother, Aarr[level], Aarr[level]);CHKERRQ(ierr);
629       ierr = PCMGSetInterpolation(pc, lidx, Parr[level+1]);CHKERRQ(ierr);
630 
631       /* set defaults */
632       ierr = KSPSetType(smoother, KSPCHEBYSHEV);CHKERRQ(ierr);
633 
634       /* set blocks for ASM smoother that uses the 'aggregates' */
635       if (pc_gamg->use_aggs_in_asm) {
636         PetscInt sz;
637         IS       *iss;
638 
639         sz   = nASMBlocksArr[level];
640         iss  = ASMLocalIDsArr[level];
641         ierr = PCSetType(subpc, PCASM);CHKERRQ(ierr);
642         ierr = PCASMSetOverlap(subpc, 0);CHKERRQ(ierr);
643         ierr = PCASMSetType(subpc,PC_ASM_BASIC);CHKERRQ(ierr);
644         if (!sz) {
645           IS       is;
646           ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
647           ierr = PCASMSetLocalSubdomains(subpc, 1, NULL, &is);CHKERRQ(ierr);
648           ierr = ISDestroy(&is);CHKERRQ(ierr);
649         } else {
650           PetscInt kk;
651           ierr = PCASMSetLocalSubdomains(subpc, sz, NULL, iss);CHKERRQ(ierr);
652           for (kk=0; kk<sz; kk++) {
653             ierr = ISDestroy(&iss[kk]);CHKERRQ(ierr);
654           }
655           ierr = PetscFree(iss);CHKERRQ(ierr);
656         }
657         ASMLocalIDsArr[level] = NULL;
658         nASMBlocksArr[level]  = 0;
659       } else {
660         ierr = PCSetType(subpc, PCSOR);CHKERRQ(ierr);
661       }
662     }
663     {
664       /* coarse grid */
665       KSP smoother,*k2; PC subpc,pc2; PetscInt ii,first;
666       Mat Lmat = Aarr[(level=pc_gamg->Nlevels-1)]; lidx = 0;
667       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
668       ierr = KSPSetOperators(smoother, Lmat, Lmat);CHKERRQ(ierr);
669       if (!pc_gamg->use_parallel_coarse_grid_solver) {
670         ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
671         ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
672         ierr = PCSetType(subpc, PCBJACOBI);CHKERRQ(ierr);
673         ierr = PCSetUp(subpc);CHKERRQ(ierr);
674         ierr = PCBJacobiGetSubKSP(subpc,&ii,&first,&k2);CHKERRQ(ierr);
675         if (ii != 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ii %D is not one",ii);
676         ierr = KSPGetPC(k2[0],&pc2);CHKERRQ(ierr);
677         ierr = PCSetType(pc2, PCLU);CHKERRQ(ierr);
678         ierr = PCFactorSetShiftType(pc2,MAT_SHIFT_INBLOCKS);CHKERRQ(ierr);
679         ierr = KSPSetTolerances(k2[0],PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,1);CHKERRQ(ierr);
680         ierr = KSPSetType(k2[0], KSPPREONLY);CHKERRQ(ierr);
681         /* This flag gets reset by PCBJacobiGetSubKSP(), but our BJacobi really does the same algorithm everywhere (and in
682          * fact, all but one process will have zero dofs), so we reset the flag to avoid having PCView_BJacobi attempt to
683          * view every subdomain as though they were different. */
684         ((PC_BJacobi*)subpc->data)->same_local_solves = PETSC_TRUE;
685       }
686     }
687 
688     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
689     ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
690     ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr);
691     ierr = PetscOptionsEnd();CHKERRQ(ierr);
692     ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
693 
694     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
695 
696     /* setup cheby eigen estimates from SA */
697     for (lidx = 1, level = pc_gamg->Nlevels-2; level >= 0 ; lidx++, level--) {
698       KSP       smoother;
699       PetscBool ischeb;
700       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
701       ierr = PetscObjectTypeCompare((PetscObject)smoother,KSPCHEBYSHEV,&ischeb);CHKERRQ(ierr);
702       if (ischeb) {
703         KSP_Chebyshev  *cheb = (KSP_Chebyshev*)smoother->data;
704         if (mg->max_eigen_DinvA[level] > 0 && cheb->emax == 0.) { /* let command line emax override using SA's eigenvalues */
705           PC        subpc;
706           PetscBool isjac;
707           ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
708           ierr = PetscObjectTypeCompare((PetscObject)subpc,PCJACOBI,&isjac);CHKERRQ(ierr);
709           if ( (isjac && pc_gamg->use_sa_esteig==-1) || pc_gamg->use_sa_esteig==1) {
710             PetscReal       emax,emin;
711             emin = mg->min_eigen_DinvA[level];
712             emax = mg->max_eigen_DinvA[level];
713             ierr = PetscInfo4(pc,"PCSetUp_GAMG: call KSPChebyshevSetEigenvalues on level %D (N=%D) with emax = %g emin = %g\n",level,Aarr[level]->rmap->N,(double)emax,(double)emin);CHKERRQ(ierr);
714             cheb->emin_computed = emin;
715             cheb->emax_computed = emax;
716             ierr = KSPChebyshevSetEigenvalues(smoother, cheb->tform[2]*emin + cheb->tform[3]*emax, cheb->tform[0]*emin + cheb->tform[1]*emax);CHKERRQ(ierr);
717           }
718         }
719       }
720     }
721 
722     /* clean up */
723     for (level=1; level<pc_gamg->Nlevels; level++) {
724       ierr = MatDestroy(&Parr[level]);CHKERRQ(ierr);
725       ierr = MatDestroy(&Aarr[level]);CHKERRQ(ierr);
726     }
727 
728   } else {
729     KSP smoother;
730     ierr = PetscInfo(pc,"One level solver used (system is seen as DD). Using default solver.\n");CHKERRQ(ierr);
731     ierr = PCMGGetSmoother(pc, 0, &smoother);CHKERRQ(ierr);
732     ierr = KSPSetOperators(smoother, Aarr[0], Aarr[0]);CHKERRQ(ierr);
733     ierr = KSPSetType(smoother, KSPPREONLY);CHKERRQ(ierr);
734     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
735   }
736   PetscFunctionReturn(0);
737 }
738 
739 /* ------------------------------------------------------------------------- */
740 /*
741  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
742    that was created with PCCreate_GAMG().
743 
744    Input Parameter:
745 .  pc - the preconditioner context
746 
747    Application Interface Routine: PCDestroy()
748 */
749 PetscErrorCode PCDestroy_GAMG(PC pc)
750 {
751   PetscErrorCode ierr;
752   PC_MG          *mg     = (PC_MG*)pc->data;
753   PC_GAMG        *pc_gamg= (PC_GAMG*)mg->innerctx;
754 
755   PetscFunctionBegin;
756   ierr = PCReset_GAMG(pc);CHKERRQ(ierr);
757   if (pc_gamg->ops->destroy) {
758     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
759   }
760   ierr = PetscFree(pc_gamg->ops);CHKERRQ(ierr);
761   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
762   ierr = PetscFree(pc_gamg);CHKERRQ(ierr);
763   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
764   PetscFunctionReturn(0);
765 }
766 
767 /*@
768    PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction.
769 
770    Logically Collective on PC
771 
772    Input Parameters:
773 +  pc - the preconditioner context
774 -  n - the number of equations
775 
776 
777    Options Database Key:
778 .  -pc_gamg_process_eq_limit <limit>
779 
780    Notes:
781     GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
782           that has degrees of freedom
783 
784    Level: intermediate
785 
786 .seealso: PCGAMGSetCoarseEqLim()
787 @*/
788 PetscErrorCode  PCGAMGSetProcEqLim(PC pc, PetscInt n)
789 {
790   PetscErrorCode ierr;
791 
792   PetscFunctionBegin;
793   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
794   ierr = PetscTryMethod(pc,"PCGAMGSetProcEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
795   PetscFunctionReturn(0);
796 }
797 
798 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
799 {
800   PC_MG   *mg      = (PC_MG*)pc->data;
801   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
802 
803   PetscFunctionBegin;
804   if (n>0) pc_gamg->min_eq_proc = n;
805   PetscFunctionReturn(0);
806 }
807 
808 /*@
809    PCGAMGSetCoarseEqLim - Set maximum number of equations on coarsest grid.
810 
811  Collective on PC
812 
813    Input Parameters:
814 +  pc - the preconditioner context
815 -  n - maximum number of equations to aim for
816 
817    Options Database Key:
818 .  -pc_gamg_coarse_eq_limit <limit>
819 
820    Notes: For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
821      has less than 1000 unknowns.
822 
823    Level: intermediate
824 
825 .seealso: PCGAMGSetProcEqLim()
826 @*/
827 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
828 {
829   PetscErrorCode ierr;
830 
831   PetscFunctionBegin;
832   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
833   ierr = PetscTryMethod(pc,"PCGAMGSetCoarseEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
834   PetscFunctionReturn(0);
835 }
836 
837 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
838 {
839   PC_MG   *mg      = (PC_MG*)pc->data;
840   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
841 
842   PetscFunctionBegin;
843   if (n>0) pc_gamg->coarse_eq_limit = n;
844   PetscFunctionReturn(0);
845 }
846 
847 /*@
848    PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids
849 
850    Collective on PC
851 
852    Input Parameters:
853 +  pc - the preconditioner context
854 -  n - PETSC_TRUE or PETSC_FALSE
855 
856    Options Database Key:
857 .  -pc_gamg_repartition <true,false>
858 
859    Notes:
860     this will generally improve the loading balancing of the work on each level
861 
862    Level: intermediate
863 
864 .seealso: ()
865 @*/
866 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
867 {
868   PetscErrorCode ierr;
869 
870   PetscFunctionBegin;
871   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
872   ierr = PetscTryMethod(pc,"PCGAMGSetRepartition_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
873   PetscFunctionReturn(0);
874 }
875 
876 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
877 {
878   PC_MG   *mg      = (PC_MG*)pc->data;
879   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
880 
881   PetscFunctionBegin;
882   pc_gamg->repart = n;
883   PetscFunctionReturn(0);
884 }
885 
886 /*@
887    PCGAMGSetEstEigKSPMaxIt - Set number of KSP iterations in eigen estimator (for Cheby)
888 
889    Collective on PC
890 
891    Input Parameters:
892 +  pc - the preconditioner context
893 -  n - number of its
894 
895    Options Database Key:
896 .  -pc_gamg_esteig_ksp_max_it <its>
897 
898    Notes:
899 
900    Level: intermediate
901 
902 .seealso: ()
903 @*/
904 PetscErrorCode PCGAMGSetEstEigKSPMaxIt(PC pc, PetscInt n)
905 {
906   PetscErrorCode ierr;
907 
908   PetscFunctionBegin;
909   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
910   ierr = PetscTryMethod(pc,"PCGAMGSetEstEigKSPMaxIt_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
911   PetscFunctionReturn(0);
912 }
913 
914 static PetscErrorCode PCGAMGSetEstEigKSPMaxIt_GAMG(PC pc, PetscInt n)
915 {
916   PC_MG   *mg      = (PC_MG*)pc->data;
917   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
918 
919   PetscFunctionBegin;
920   pc_gamg->esteig_max_it = n;
921   PetscFunctionReturn(0);
922 }
923 
924 /*@
925    PCGAMGSetUseSAEstEig - Use eigen estimate from smoothed aggregation for Cheby smoother
926 
927    Collective on PC
928 
929    Input Parameters:
930 +  pc - the preconditioner context
931 -  n - number of its
932 
933    Options Database Key:
934 .  -pc_gamg_use_sa_esteig <true,false>
935 
936    Notes:
937 
938    Level: intermediate
939 
940 .seealso: ()
941 @*/
942 PetscErrorCode PCGAMGSetUseSAEstEig(PC pc, PetscBool n)
943 {
944   PetscErrorCode ierr;
945 
946   PetscFunctionBegin;
947   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
948   ierr = PetscTryMethod(pc,"PCGAMGSetUseSAEstEig_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
949   PetscFunctionReturn(0);
950 }
951 
952 static PetscErrorCode PCGAMGSetUseSAEstEig_GAMG(PC pc, PetscInt n)
953 {
954   PC_MG   *mg      = (PC_MG*)pc->data;
955   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
956 
957   PetscFunctionBegin;
958   pc_gamg->use_sa_esteig = n ? 1 : 0;
959   PetscFunctionReturn(0);
960 }
961 
962 /*@C
963    PCGAMGSetEstEigKSPType - Set type of KSP in eigen estimator (for Cheby)
964 
965    Collective on PC
966 
967    Input Parameters:
968 +  pc - the preconditioner context
969 -  t - ksp type
970 
971    Options Database Key:
972 .  -pc_gamg_esteig_ksp_type <type>
973 
974    Notes:
975 
976    Level: intermediate
977 
978 .seealso: ()
979 @*/
980 PetscErrorCode PCGAMGSetEstEigKSPType(PC pc, char t[])
981 {
982   PetscErrorCode ierr;
983 
984   PetscFunctionBegin;
985   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
986   ierr = PetscTryMethod(pc,"PCGAMGSetEstEigKSPType_C",(PC,char[]),(pc,t));CHKERRQ(ierr);
987   PetscFunctionReturn(0);
988 }
989 
990 static PetscErrorCode PCGAMGSetEstEigKSPType_GAMG(PC pc, char t[])
991 {
992   PetscErrorCode ierr;
993   PC_MG   *mg      = (PC_MG*)pc->data;
994   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
995 
996   PetscFunctionBegin;
997   ierr = PetscStrcpy(pc_gamg->esteig_type,t);CHKERRQ(ierr);
998   PetscFunctionReturn(0);
999 }
1000 
1001 /*@
1002    PCGAMGSetEigenvalues - Set eigenvalues
1003 
1004    Collective on PC
1005 
1006    Input Parameters:
1007 +  pc - the preconditioner context
1008 -  emax - max eigenvalue
1009 .  emin - min eigenvalue
1010 
1011    Options Database Key:
1012 .  -pc_gamg_eigenvalues
1013 
1014    Level: intermediate
1015 
1016 .seealso: PCGAMGSetEstEigKSPMaxIt(), PCGAMGSetUseSAEstEig(), PCGAMGSetEstEigKSPType()
1017 @*/
1018 PetscErrorCode PCGAMGSetEigenvalues(PC pc, PetscReal emax,PetscReal emin)
1019 {
1020   PetscErrorCode ierr;
1021 
1022   PetscFunctionBegin;
1023   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1024   ierr = PetscTryMethod(pc,"PCGAMGSetEigenvalues_C",(PC,PetscReal,PetscReal),(pc,emax,emin));CHKERRQ(ierr);
1025   PetscFunctionReturn(0);
1026 }
1027 static PetscErrorCode PCGAMGSetEigenvalues_GAMG(PC pc,PetscReal emax,PetscReal emin)
1028 {
1029   PC_MG          *mg      = (PC_MG*)pc->data;
1030   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1031 
1032   PetscFunctionBegin;
1033   if (emax <= emin) SETERRQ2(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"Maximum eigenvalue must be larger than minimum: max %g min %g",(double)emax,(double)emin);
1034   if (emax*emin <= 0.0) SETERRQ2(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"Both eigenvalues must be of the same sign: max %g min %g",(double)emax,(double)emin);
1035   pc_gamg->emax = emax;
1036   pc_gamg->emin = emin;
1037 
1038   PetscFunctionReturn(0);
1039 }
1040 
1041 /*@
1042    PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding algebraic multigrid preconditioner
1043 
1044    Collective on PC
1045 
1046    Input Parameters:
1047 +  pc - the preconditioner context
1048 -  n - PETSC_TRUE or PETSC_FALSE
1049 
1050    Options Database Key:
1051 .  -pc_gamg_reuse_interpolation <true,false>
1052 
1053    Level: intermediate
1054 
1055    Notes:
1056     this may negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
1057           rebuilding the preconditioner quicker.
1058 
1059 .seealso: ()
1060 @*/
1061 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
1062 {
1063   PetscErrorCode ierr;
1064 
1065   PetscFunctionBegin;
1066   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1067   ierr = PetscTryMethod(pc,"PCGAMGSetReuseInterpolation_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
1068   PetscFunctionReturn(0);
1069 }
1070 
1071 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
1072 {
1073   PC_MG   *mg      = (PC_MG*)pc->data;
1074   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1075 
1076   PetscFunctionBegin;
1077   pc_gamg->reuse_prol = n;
1078   PetscFunctionReturn(0);
1079 }
1080 
1081 /*@
1082    PCGAMGASMSetUseAggs - Have the PCGAMG smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner.
1083 
1084    Collective on PC
1085 
1086    Input Parameters:
1087 +  pc - the preconditioner context
1088 -  flg - PETSC_TRUE to use aggregates, PETSC_FALSE to not
1089 
1090    Options Database Key:
1091 .  -pc_gamg_asm_use_agg
1092 
1093    Level: intermediate
1094 
1095 .seealso: ()
1096 @*/
1097 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
1098 {
1099   PetscErrorCode ierr;
1100 
1101   PetscFunctionBegin;
1102   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1103   ierr = PetscTryMethod(pc,"PCGAMGASMSetUseAggs_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1104   PetscFunctionReturn(0);
1105 }
1106 
1107 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
1108 {
1109   PC_MG   *mg      = (PC_MG*)pc->data;
1110   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1111 
1112   PetscFunctionBegin;
1113   pc_gamg->use_aggs_in_asm = flg;
1114   PetscFunctionReturn(0);
1115 }
1116 
1117 /*@
1118    PCGAMGSetUseParallelCoarseGridSolve - allow a parallel coarse grid solver
1119 
1120    Collective on PC
1121 
1122    Input Parameters:
1123 +  pc - the preconditioner context
1124 -  flg - PETSC_TRUE to not force coarse grid onto one processor
1125 
1126    Options Database Key:
1127 .  -pc_gamg_use_parallel_coarse_grid_solver
1128 
1129    Level: intermediate
1130 
1131 .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetCpuPinCoarseGrids()
1132 @*/
1133 PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve(PC pc, PetscBool flg)
1134 {
1135   PetscErrorCode ierr;
1136 
1137   PetscFunctionBegin;
1138   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1139   ierr = PetscTryMethod(pc,"PCGAMGSetUseParallelCoarseGridSolve_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1140   PetscFunctionReturn(0);
1141 }
1142 
1143 static PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
1144 {
1145   PC_MG   *mg      = (PC_MG*)pc->data;
1146   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1147 
1148   PetscFunctionBegin;
1149   pc_gamg->use_parallel_coarse_grid_solver = flg;
1150   PetscFunctionReturn(0);
1151 }
1152 
1153 /*@
1154    PCGAMGSetCpuPinCoarseGrids - pin reduced grids to CPU
1155 
1156    Collective on PC
1157 
1158    Input Parameters:
1159 +  pc - the preconditioner context
1160 -  flg - PETSC_TRUE to pin coarse grids to CPU
1161 
1162    Options Database Key:
1163 .  -pc_gamg_cpu_pin_coarse_grids
1164 
1165    Level: intermediate
1166 
1167 .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetUseParallelCoarseGridSolve()
1168 @*/
1169 PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
1170 {
1171   PetscErrorCode ierr;
1172 
1173   PetscFunctionBegin;
1174   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1175   ierr = PetscTryMethod(pc,"PCGAMGSetCpuPinCoarseGrids_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1176   PetscFunctionReturn(0);
1177 }
1178 
1179 static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1180 {
1181   PC_MG   *mg      = (PC_MG*)pc->data;
1182   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1183 
1184   PetscFunctionBegin;
1185   pc_gamg->cpu_pin_coarse_grids = flg;
1186   PetscFunctionReturn(0);
1187 }
1188 
1189 /*@
1190    PCGAMGSetCoarseGridLayoutType - place reduce grids on processors with natural order (compact type)
1191 
1192    Collective on PC
1193 
1194    Input Parameters:
1195 +  pc - the preconditioner context
1196 -  flg - Layout type
1197 
1198    Options Database Key:
1199 .  -pc_gamg_coarse_grid_layout_type
1200 
1201    Level: intermediate
1202 
1203 .seealso: PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetCpuPinCoarseGrids()
1204 @*/
1205 PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1206 {
1207   PetscErrorCode ierr;
1208 
1209   PetscFunctionBegin;
1210   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1211   ierr = PetscTryMethod(pc,"PCGAMGSetCoarseGridLayoutType_C",(PC,PCGAMGLayoutType),(pc,flg));CHKERRQ(ierr);
1212   PetscFunctionReturn(0);
1213 }
1214 
1215 static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1216 {
1217   PC_MG   *mg      = (PC_MG*)pc->data;
1218   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1219 
1220   PetscFunctionBegin;
1221   pc_gamg->layout_type = flg;
1222   PetscFunctionReturn(0);
1223 }
1224 
1225 /*@
1226    PCGAMGSetNlevels -  Sets the maximum number of levels PCGAMG will use
1227 
1228    Not collective on PC
1229 
1230    Input Parameters:
1231 +  pc - the preconditioner
1232 -  n - the maximum number of levels to use
1233 
1234    Options Database Key:
1235 .  -pc_mg_levels
1236 
1237    Level: intermediate
1238 
1239 .seealso: ()
1240 @*/
1241 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1242 {
1243   PetscErrorCode ierr;
1244 
1245   PetscFunctionBegin;
1246   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1247   ierr = PetscTryMethod(pc,"PCGAMGSetNlevels_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
1248   PetscFunctionReturn(0);
1249 }
1250 
1251 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1252 {
1253   PC_MG   *mg      = (PC_MG*)pc->data;
1254   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1255 
1256   PetscFunctionBegin;
1257   pc_gamg->Nlevels = n;
1258   PetscFunctionReturn(0);
1259 }
1260 
1261 /*@
1262    PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1263 
1264    Not collective on PC
1265 
1266    Input Parameters:
1267 +  pc - the preconditioner context
1268 -  threshold - the threshold value, 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1269 
1270    Options Database Key:
1271 .  -pc_gamg_threshold <threshold>
1272 
1273    Notes:
1274     Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1275     Before coarsening or aggregating the graph, GAMG removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1276 
1277    Level: intermediate
1278 
1279 .seealso: PCGAMGFilterGraph(), PCGAMGSetSquareGraph()
1280 @*/
1281 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1282 {
1283   PetscErrorCode ierr;
1284 
1285   PetscFunctionBegin;
1286   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1287   ierr = PetscTryMethod(pc,"PCGAMGSetThreshold_C",(PC,PetscReal[],PetscInt),(pc,v,n));CHKERRQ(ierr);
1288   PetscFunctionReturn(0);
1289 }
1290 
1291 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1292 {
1293   PC_MG   *mg      = (PC_MG*)pc->data;
1294   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1295   PetscInt i;
1296   PetscFunctionBegin;
1297   for (i=0;i<n;i++) pc_gamg->threshold[i] = v[i];
1298   do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_MG_MAXLEVELS);
1299   PetscFunctionReturn(0);
1300 }
1301 
1302 /*@
1303    PCGAMGSetThresholdScale - Relative threshold reduction at each level
1304 
1305    Not collective on PC
1306 
1307    Input Parameters:
1308 +  pc - the preconditioner context
1309 -  scale - the threshold value reduction, ussually < 1.0
1310 
1311    Options Database Key:
1312 .  -pc_gamg_threshold_scale <v>
1313 
1314    Level: advanced
1315 
1316 .seealso: ()
1317 @*/
1318 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1319 {
1320   PetscErrorCode ierr;
1321 
1322   PetscFunctionBegin;
1323   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1324   ierr = PetscTryMethod(pc,"PCGAMGSetThresholdScale_C",(PC,PetscReal),(pc,v));CHKERRQ(ierr);
1325   PetscFunctionReturn(0);
1326 }
1327 
1328 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1329 {
1330   PC_MG   *mg      = (PC_MG*)pc->data;
1331   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1332   PetscFunctionBegin;
1333   pc_gamg->threshold_scale = v;
1334   PetscFunctionReturn(0);
1335 }
1336 
1337 /*@C
1338    PCGAMGSetType - Set solution method
1339 
1340    Collective on PC
1341 
1342    Input Parameters:
1343 +  pc - the preconditioner context
1344 -  type - PCGAMGAGG, PCGAMGGEO, or PCGAMGCLASSICAL
1345 
1346    Options Database Key:
1347 .  -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply
1348 
1349    Level: intermediate
1350 
1351 .seealso: PCGAMGGetType(), PCGAMG, PCGAMGType
1352 @*/
1353 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1354 {
1355   PetscErrorCode ierr;
1356 
1357   PetscFunctionBegin;
1358   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1359   ierr = PetscTryMethod(pc,"PCGAMGSetType_C",(PC,PCGAMGType),(pc,type));CHKERRQ(ierr);
1360   PetscFunctionReturn(0);
1361 }
1362 
1363 /*@C
1364    PCGAMGGetType - Get solution method
1365 
1366    Collective on PC
1367 
1368    Input Parameter:
1369 .  pc - the preconditioner context
1370 
1371    Output Parameter:
1372 .  type - the type of algorithm used
1373 
1374    Level: intermediate
1375 
1376 .seealso: PCGAMGSetType(), PCGAMGType
1377 @*/
1378 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1379 {
1380   PetscErrorCode ierr;
1381 
1382   PetscFunctionBegin;
1383   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1384   ierr = PetscUseMethod(pc,"PCGAMGGetType_C",(PC,PCGAMGType*),(pc,type));CHKERRQ(ierr);
1385   PetscFunctionReturn(0);
1386 }
1387 
1388 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1389 {
1390   PC_MG          *mg      = (PC_MG*)pc->data;
1391   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1392 
1393   PetscFunctionBegin;
1394   *type = pc_gamg->type;
1395   PetscFunctionReturn(0);
1396 }
1397 
1398 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1399 {
1400   PetscErrorCode ierr,(*r)(PC);
1401   PC_MG          *mg      = (PC_MG*)pc->data;
1402   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1403 
1404   PetscFunctionBegin;
1405   pc_gamg->type = type;
1406   ierr = PetscFunctionListFind(GAMGList,type,&r);CHKERRQ(ierr);
1407   if (!r) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown GAMG type %s given",type);
1408   if (pc_gamg->ops->destroy) {
1409     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
1410     ierr = PetscMemzero(pc_gamg->ops,sizeof(struct _PCGAMGOps));CHKERRQ(ierr);
1411     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1412     /* cleaning up common data in pc_gamg - this should disapear someday */
1413     pc_gamg->data_cell_cols = 0;
1414     pc_gamg->data_cell_rows = 0;
1415     pc_gamg->orig_data_cell_cols = 0;
1416     pc_gamg->orig_data_cell_rows = 0;
1417     ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
1418     pc_gamg->data_sz = 0;
1419   }
1420   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
1421   ierr = PetscStrallocpy(type,&pc_gamg->gamg_type_name);CHKERRQ(ierr);
1422   ierr = (*r)(pc);CHKERRQ(ierr);
1423   PetscFunctionReturn(0);
1424 }
1425 
1426 /* -------------------------------------------------------------------------- */
1427 /*
1428    PCMGGetGridComplexity - compute coarse grid complexity of MG hierarchy
1429 
1430    Input Parameter:
1431 .  pc - the preconditioner context
1432 
1433    Output Parameter:
1434 .  gc - grid complexity = sum_i(nnz_i) / nnz_0
1435 
1436    Level: advanced
1437 */
1438 static PetscErrorCode PCMGGetGridComplexity(PC pc, PetscReal *gc)
1439 {
1440   PetscErrorCode ierr;
1441   PC_MG          *mg      = (PC_MG*)pc->data;
1442   PC_MG_Levels   **mglevels = mg->levels;
1443   PetscInt       lev;
1444   PetscLogDouble nnz0 = 0, sgc = 0;
1445   MatInfo        info;
1446 
1447   PetscFunctionBegin;
1448   if (!mg->nlevels) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MG has no levels");
1449   for (lev=0; lev<mg->nlevels; lev++) {
1450     Mat dB;
1451     ierr = KSPGetOperators(mglevels[lev]->smoothd,NULL,&dB);CHKERRQ(ierr);
1452     ierr = MatGetInfo(dB,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr); /* global reduction */
1453     sgc += info.nz_used;
1454     if (lev==mg->nlevels-1) nnz0 = info.nz_used;
1455   }
1456   if (nnz0 > 0) *gc = (PetscReal)(sgc/nnz0);
1457   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number for grid points on finest level is not available");
1458   PetscFunctionReturn(0);
1459 }
1460 
1461 static PetscErrorCode PCView_GAMG(PC pc,PetscViewer viewer)
1462 {
1463   PetscErrorCode ierr,i;
1464   PC_MG          *mg      = (PC_MG*)pc->data;
1465   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1466   PetscReal       gc=0;
1467   PetscFunctionBegin;
1468   ierr = PetscViewerASCIIPrintf(viewer,"    GAMG specific options\n");CHKERRQ(ierr);
1469   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold for dropping small values in graph on each level =");CHKERRQ(ierr);
1470   for (i=0;i<pc_gamg->current_level;i++) {
1471     ierr = PetscViewerASCIIPrintf(viewer," %g",(double)pc_gamg->threshold[i]);CHKERRQ(ierr);
1472   }
1473   ierr = PetscViewerASCIIPrintf(viewer,"\n");CHKERRQ(ierr);
1474   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold scaling factor for each level not specified = %g\n",(double)pc_gamg->threshold_scale);CHKERRQ(ierr);
1475   if (pc_gamg->use_aggs_in_asm) {
1476     ierr = PetscViewerASCIIPrintf(viewer,"      Using aggregates from coarsening process to define subdomains for PCASM\n");CHKERRQ(ierr);
1477   }
1478   if (pc_gamg->use_parallel_coarse_grid_solver) {
1479     ierr = PetscViewerASCIIPrintf(viewer,"      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n");CHKERRQ(ierr);
1480   }
1481 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
1482   if (pc_gamg->cpu_pin_coarse_grids) {
1483     /* ierr = PetscViewerASCIIPrintf(viewer,"      Pinning coarse grids to the CPU)\n");CHKERRQ(ierr); */
1484   }
1485 #endif
1486   /* if (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) { */
1487   /*   ierr = PetscViewerASCIIPrintf(viewer,"      Put reduced grids on processes in natural order (ie, 0,1,2...)\n");CHKERRQ(ierr); */
1488   /* } else { */
1489   /*   ierr = PetscViewerASCIIPrintf(viewer,"      Put reduced grids on whole machine (ie, 0,1*f,2*f...,np-f)\n");CHKERRQ(ierr); */
1490   /* } */
1491   if (pc_gamg->ops->view) {
1492     ierr = (*pc_gamg->ops->view)(pc,viewer);CHKERRQ(ierr);
1493   }
1494   ierr = PCMGGetGridComplexity(pc,&gc);CHKERRQ(ierr);
1495   ierr = PetscViewerASCIIPrintf(viewer,"      Complexity:    grid = %g\n",gc);CHKERRQ(ierr);
1496   PetscFunctionReturn(0);
1497 }
1498 
1499 PetscErrorCode PCSetFromOptions_GAMG(PetscOptionItems *PetscOptionsObject,PC pc)
1500 {
1501   PetscErrorCode ierr;
1502   PC_MG          *mg      = (PC_MG*)pc->data;
1503   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1504   PetscBool      flag,f2;
1505   MPI_Comm       comm;
1506   char           prefix[256],tname[32];
1507   PetscInt       i,n;
1508   const char     *pcpre;
1509   static const char *LayoutTypes[] = {"compact","spread","PCGAMGLayoutType","PC_GAMG_LAYOUT",0};
1510   PetscFunctionBegin;
1511   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1512   ierr = PetscOptionsHead(PetscOptionsObject,"GAMG options");CHKERRQ(ierr);
1513   ierr = PetscOptionsFList("-pc_gamg_type","Type of AMG method","PCGAMGSetType",GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag);CHKERRQ(ierr);
1514   if (flag) {
1515     ierr = PCGAMGSetType(pc,tname);CHKERRQ(ierr);
1516   }
1517   ierr = PetscOptionsFList("-pc_gamg_esteig_ksp_type","Krylov method for eigen estimator","PCGAMGSetEstEigKSPType",KSPList,pc_gamg->esteig_type,tname,sizeof(tname),&flag);CHKERRQ(ierr);
1518   if (flag) {
1519     ierr = PCGAMGSetEstEigKSPType(pc,tname);CHKERRQ(ierr);
1520   }
1521   ierr = PetscOptionsBool("-pc_gamg_repartition","Repartion coarse grids","PCGAMGSetRepartition",pc_gamg->repart,&pc_gamg->repart,NULL);CHKERRQ(ierr);
1522   f2 = PETSC_TRUE;
1523   ierr = PetscOptionsBool("-pc_gamg_use_sa_esteig","Use eigen estimate from Smoothed aggregation for smoother","PCGAMGSetUseSAEstEig",f2,&f2,&flag);CHKERRQ(ierr);
1524   if (flag) pc_gamg->use_sa_esteig = f2 ? 1 : 0;
1525   ierr = PetscOptionsBool("-pc_gamg_reuse_interpolation","Reuse prolongation operator","PCGAMGReuseInterpolation",pc_gamg->reuse_prol,&pc_gamg->reuse_prol,NULL);CHKERRQ(ierr);
1526   ierr = PetscOptionsBool("-pc_gamg_asm_use_agg","Use aggregation aggregates for ASM smoother","PCGAMGASMSetUseAggs",pc_gamg->use_aggs_in_asm,&pc_gamg->use_aggs_in_asm,NULL);CHKERRQ(ierr);
1527   ierr = PetscOptionsBool("-pc_gamg_use_parallel_coarse_grid_solver","Use parallel coarse grid solver (otherwise put last grid on one process)","PCGAMGSetUseParallelCoarseGridSolve",pc_gamg->use_parallel_coarse_grid_solver,&pc_gamg->use_parallel_coarse_grid_solver,NULL);CHKERRQ(ierr);
1528   ierr = PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids","Pin coarse grids to the CPU","PCGAMGSetCpuPinCoarseGrids",pc_gamg->cpu_pin_coarse_grids,&pc_gamg->cpu_pin_coarse_grids,NULL);CHKERRQ(ierr);
1529   ierr = PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type","compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth","PCGAMGSetCoarseGridLayoutType",LayoutTypes,(PetscEnum)pc_gamg->layout_type,(PetscEnum*)&pc_gamg->layout_type,NULL);CHKERRQ(ierr);
1530   ierr = PetscOptionsInt("-pc_gamg_process_eq_limit","Limit (goal) on number of equations per process on coarse grids","PCGAMGSetProcEqLim",pc_gamg->min_eq_proc,&pc_gamg->min_eq_proc,NULL);CHKERRQ(ierr);
1531   ierr = PetscOptionsInt("-pc_gamg_esteig_ksp_max_it","Number of iterations of eigen estimator","PCGAMGSetEstEigKSPMaxIt",pc_gamg->esteig_max_it,&pc_gamg->esteig_max_it,NULL);CHKERRQ(ierr);
1532   ierr = PetscOptionsInt("-pc_gamg_coarse_eq_limit","Limit on number of equations for the coarse grid","PCGAMGSetCoarseEqLim",pc_gamg->coarse_eq_limit,&pc_gamg->coarse_eq_limit,NULL);CHKERRQ(ierr);
1533   ierr = PetscOptionsReal("-pc_gamg_threshold_scale","Scaling of threshold for each level not specified","PCGAMGSetThresholdScale",pc_gamg->threshold_scale,&pc_gamg->threshold_scale,NULL);CHKERRQ(ierr);
1534   n = PETSC_MG_MAXLEVELS;
1535   ierr = PetscOptionsRealArray("-pc_gamg_threshold","Relative threshold to use for dropping edges in aggregation graph","PCGAMGSetThreshold",pc_gamg->threshold,&n,&flag);CHKERRQ(ierr);
1536   if (!flag || n < PETSC_MG_MAXLEVELS) {
1537     if (!flag) n = 1;
1538     i = n;
1539     do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_MG_MAXLEVELS);
1540   }
1541   ierr = PetscOptionsInt("-pc_mg_levels","Set number of MG levels","PCGAMGSetNlevels",pc_gamg->Nlevels,&pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
1542   {
1543     PetscReal eminmax[2] = {0., 0.};
1544     n = 2;
1545     ierr = PetscOptionsRealArray("-pc_gamg_eigenvalues","extreme eigenvalues for smoothed aggregation","PCGAMGSetEigenvalues",eminmax,&n,&flag);CHKERRQ(ierr);
1546     if (flag) {
1547       if (n != 2) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"-pc_gamg_eigenvalues: must specify 2 parameters, min and max eigenvalues");
1548       ierr = PCGAMGSetEigenvalues(pc, eminmax[1], eminmax[0]);CHKERRQ(ierr);
1549     }
1550   }
1551   /* set options for subtype */
1552   if (pc_gamg->ops->setfromoptions) {ierr = (*pc_gamg->ops->setfromoptions)(PetscOptionsObject,pc);CHKERRQ(ierr);}
1553 
1554   ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
1555   ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
1556   ierr = PetscOptionsTail();CHKERRQ(ierr);
1557   PetscFunctionReturn(0);
1558 }
1559 
1560 /* -------------------------------------------------------------------------- */
1561 /*MC
1562      PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1563 
1564    Options Database Keys:
1565 +   -pc_gamg_type <type> - one of agg, geo, or classical
1566 .   -pc_gamg_repartition  <true,default=false> - repartition the degrees of freedom accross the coarse grids as they are determined
1567 .   -pc_gamg_reuse_interpolation <true,default=false> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations
1568 .   -pc_gamg_asm_use_agg <true,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1569 .   -pc_gamg_process_eq_limit <limit, default=50> - GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit>
1570                                         equations on each process that has degrees of freedom
1571 .   -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1572 .   -pc_gamg_threshold[] <thresh,default=0> - Before aggregating the graph GAMG will remove small values from the graph on each level
1573 -   -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1574 
1575    Options Database Keys for default Aggregation:
1576 +  -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1577 .  -pc_gamg_sym_graph <true,default=false> - symmetrize the graph before computing the aggregation
1578 -  -pc_gamg_square_graph <n,default=1> - number of levels to square the graph before aggregating it
1579 
1580    Multigrid options:
1581 +  -pc_mg_cycles <v> - v or w, see PCMGSetCycleType()
1582 .  -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1583 .  -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1584 -  -pc_mg_levels <levels> - Number of levels of multigrid to use.
1585 
1586 
1587   Notes:
1588     In order to obtain good performance for PCGAMG for vector valued problems you must
1589        Call MatSetBlockSize() to indicate the number of degrees of freedom per grid point
1590        Call MatSetNearNullSpace() (or PCSetCoordinates() if solving the equations of elasticity) to indicate the near null space of the operator
1591        See the Users Manual Chapter 4 for more details
1592 
1593   Level: intermediate
1594 
1595 .seealso:  PCCreate(), PCSetType(), MatSetBlockSize(), PCMGType, PCSetCoordinates(), MatSetNearNullSpace(), PCGAMGSetType(), PCGAMGAGG, PCGAMGGEO, PCGAMGCLASSICAL, PCGAMGSetProcEqLim(),
1596            PCGAMGSetCoarseEqLim(), PCGAMGSetRepartition(), PCGAMGRegister(), PCGAMGSetReuseInterpolation(), PCGAMGASMSetUseAggs(), PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetNlevels(), PCGAMGSetThreshold(), PCGAMGGetType(), PCGAMGSetReuseInterpolation(), PCGAMGSetUseSAEstEig(), PCGAMGSetEstEigKSPMaxIt(), PCGAMGSetEstEigKSPType()
1597 M*/
1598 
1599 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1600 {
1601   PetscErrorCode ierr,i;
1602   PC_GAMG        *pc_gamg;
1603   PC_MG          *mg;
1604 
1605   PetscFunctionBegin;
1606    /* register AMG type */
1607   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1608 
1609   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1610   ierr = PCSetType(pc, PCMG);CHKERRQ(ierr);
1611   ierr = PetscObjectChangeTypeName((PetscObject)pc, PCGAMG);CHKERRQ(ierr);
1612 
1613   /* create a supporting struct and attach it to pc */
1614   ierr         = PetscNewLog(pc,&pc_gamg);CHKERRQ(ierr);
1615   ierr         = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1616   mg           = (PC_MG*)pc->data;
1617   mg->innerctx = pc_gamg;
1618 
1619   ierr = PetscNewLog(pc,&pc_gamg->ops);CHKERRQ(ierr);
1620 
1621   pc_gamg->setup_count = 0;
1622   /* these should be in subctx but repartitioning needs simple arrays */
1623   pc_gamg->data_sz = 0;
1624   pc_gamg->data    = 0;
1625 
1626   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1627   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1628   pc->ops->setup          = PCSetUp_GAMG;
1629   pc->ops->reset          = PCReset_GAMG;
1630   pc->ops->destroy        = PCDestroy_GAMG;
1631   mg->view                = PCView_GAMG;
1632 
1633   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCMGGetLevels_C",PCMGGetLevels_MG);CHKERRQ(ierr);
1634   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCMGSetLevels_C",PCMGSetLevels_MG);CHKERRQ(ierr);
1635   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetProcEqLim_C",PCGAMGSetProcEqLim_GAMG);CHKERRQ(ierr);
1636   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseEqLim_C",PCGAMGSetCoarseEqLim_GAMG);CHKERRQ(ierr);
1637   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetRepartition_C",PCGAMGSetRepartition_GAMG);CHKERRQ(ierr);
1638   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEstEigKSPType_C",PCGAMGSetEstEigKSPType_GAMG);CHKERRQ(ierr);
1639   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEstEigKSPMaxIt_C",PCGAMGSetEstEigKSPMaxIt_GAMG);CHKERRQ(ierr);
1640   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEigenvalues_C",PCGAMGSetEigenvalues_GAMG);CHKERRQ(ierr);
1641   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseSAEstEig_C",PCGAMGSetUseSAEstEig_GAMG);CHKERRQ(ierr);
1642   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetReuseInterpolation_C",PCGAMGSetReuseInterpolation_GAMG);CHKERRQ(ierr);
1643   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGASMSetUseAggs_C",PCGAMGASMSetUseAggs_GAMG);CHKERRQ(ierr);
1644   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseParallelCoarseGridSolve_C",PCGAMGSetUseParallelCoarseGridSolve_GAMG);CHKERRQ(ierr);
1645   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCpuPinCoarseGrids_C",PCGAMGSetCpuPinCoarseGrids_GAMG);CHKERRQ(ierr);
1646   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseGridLayoutType_C",PCGAMGSetCoarseGridLayoutType_GAMG);CHKERRQ(ierr);
1647   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThreshold_C",PCGAMGSetThreshold_GAMG);CHKERRQ(ierr);
1648   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThresholdScale_C",PCGAMGSetThresholdScale_GAMG);CHKERRQ(ierr);
1649   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetType_C",PCGAMGSetType_GAMG);CHKERRQ(ierr);
1650   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGGetType_C",PCGAMGGetType_GAMG);CHKERRQ(ierr);
1651   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetNlevels_C",PCGAMGSetNlevels_GAMG);CHKERRQ(ierr);
1652   pc_gamg->repart           = PETSC_FALSE;
1653   pc_gamg->reuse_prol       = PETSC_FALSE;
1654   pc_gamg->use_aggs_in_asm  = PETSC_FALSE;
1655   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1656   pc_gamg->cpu_pin_coarse_grids = PETSC_FALSE;
1657   pc_gamg->layout_type      = PCGAMG_LAYOUT_SPREAD;
1658   pc_gamg->min_eq_proc      = 50;
1659   pc_gamg->coarse_eq_limit  = 50;
1660   for (i=0;i<PETSC_MG_MAXLEVELS;i++) pc_gamg->threshold[i] = 0.;
1661   pc_gamg->threshold_scale = 1.;
1662   pc_gamg->Nlevels          = PETSC_MG_MAXLEVELS;
1663   pc_gamg->current_level    = 0; /* don't need to init really */
1664   ierr = PetscStrcpy(pc_gamg->esteig_type,KSPGMRES);CHKERRQ(ierr);
1665   pc_gamg->esteig_max_it    = 10;
1666   pc_gamg->use_sa_esteig    = -1;
1667   pc_gamg->emin             = 0;
1668   pc_gamg->emax             = 0;
1669 
1670   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1671 
1672   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1673   ierr = PCGAMGSetType(pc,PCGAMGAGG);CHKERRQ(ierr);
1674   PetscFunctionReturn(0);
1675 }
1676 
1677 /*@C
1678  PCGAMGInitializePackage - This function initializes everything in the PCGAMG package. It is called
1679     from PCInitializePackage().
1680 
1681  Level: developer
1682 
1683  .seealso: PetscInitialize()
1684 @*/
1685 PetscErrorCode PCGAMGInitializePackage(void)
1686 {
1687   PetscErrorCode ierr;
1688 
1689   PetscFunctionBegin;
1690   if (PCGAMGPackageInitialized) PetscFunctionReturn(0);
1691   PCGAMGPackageInitialized = PETSC_TRUE;
1692   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGGEO,PCCreateGAMG_GEO);CHKERRQ(ierr);
1693   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGAGG,PCCreateGAMG_AGG);CHKERRQ(ierr);
1694   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGCLASSICAL,PCCreateGAMG_Classical);CHKERRQ(ierr);
1695   ierr = PetscRegisterFinalize(PCGAMGFinalizePackage);CHKERRQ(ierr);
1696 
1697   /* general events */
1698   ierr = PetscLogEventRegister("PCGAMGGraph_AGG", 0, &PC_GAMGGraph_AGG);CHKERRQ(ierr);
1699   ierr = PetscLogEventRegister("PCGAMGGraph_GEO", PC_CLASSID, &PC_GAMGGraph_GEO);CHKERRQ(ierr);
1700   ierr = PetscLogEventRegister("PCGAMGCoarse_AGG", PC_CLASSID, &PC_GAMGCoarsen_AGG);CHKERRQ(ierr);
1701   ierr = PetscLogEventRegister("PCGAMGCoarse_GEO", PC_CLASSID, &PC_GAMGCoarsen_GEO);CHKERRQ(ierr);
1702   ierr = PetscLogEventRegister("PCGAMGProl_AGG", PC_CLASSID, &PC_GAMGProlongator_AGG);CHKERRQ(ierr);
1703   ierr = PetscLogEventRegister("PCGAMGProl_GEO", PC_CLASSID, &PC_GAMGProlongator_GEO);CHKERRQ(ierr);
1704   ierr = PetscLogEventRegister("PCGAMGPOpt_AGG", PC_CLASSID, &PC_GAMGOptProlongator_AGG);CHKERRQ(ierr);
1705 
1706 #if defined PETSC_GAMG_USE_LOG
1707   ierr = PetscLogEventRegister("GAMG: createProl", PC_CLASSID, &petsc_gamg_setup_events[SET1]);CHKERRQ(ierr);
1708   ierr = PetscLogEventRegister("  Graph", PC_CLASSID, &petsc_gamg_setup_events[GRAPH]);CHKERRQ(ierr);
1709   /* PetscLogEventRegister("    G.Mat", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_MAT]); */
1710   /* PetscLogEventRegister("    G.Filter", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_FILTER]); */
1711   /* PetscLogEventRegister("    G.Square", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_SQR]); */
1712   ierr = PetscLogEventRegister("  MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[SET4]);CHKERRQ(ierr);
1713   ierr = PetscLogEventRegister("  geo: growSupp", PC_CLASSID, &petsc_gamg_setup_events[SET5]);CHKERRQ(ierr);
1714   ierr = PetscLogEventRegister("  geo: triangle", PC_CLASSID, &petsc_gamg_setup_events[SET6]);CHKERRQ(ierr);
1715   ierr = PetscLogEventRegister("    search-set", PC_CLASSID, &petsc_gamg_setup_events[FIND_V]);CHKERRQ(ierr);
1716   ierr = PetscLogEventRegister("  SA: col data", PC_CLASSID, &petsc_gamg_setup_events[SET7]);CHKERRQ(ierr);
1717   ierr = PetscLogEventRegister("  SA: frmProl0", PC_CLASSID, &petsc_gamg_setup_events[SET8]);CHKERRQ(ierr);
1718   ierr = PetscLogEventRegister("  SA: smooth", PC_CLASSID, &petsc_gamg_setup_events[SET9]);CHKERRQ(ierr);
1719   ierr = PetscLogEventRegister("GAMG: partLevel", PC_CLASSID, &petsc_gamg_setup_events[SET2]);CHKERRQ(ierr);
1720   ierr = PetscLogEventRegister("  repartition", PC_CLASSID, &petsc_gamg_setup_events[SET12]);CHKERRQ(ierr);
1721   ierr = PetscLogEventRegister("  Invert-Sort", PC_CLASSID, &petsc_gamg_setup_events[SET13]);CHKERRQ(ierr);
1722   ierr = PetscLogEventRegister("  Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14]);CHKERRQ(ierr);
1723   ierr = PetscLogEventRegister("  Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15]);CHKERRQ(ierr);
1724 
1725   /* PetscLogEventRegister(" PL move data", PC_CLASSID, &petsc_gamg_setup_events[SET13]); */
1726   /* PetscLogEventRegister("GAMG: fix", PC_CLASSID, &petsc_gamg_setup_events[SET10]); */
1727   /* PetscLogEventRegister("GAMG: set levels", PC_CLASSID, &petsc_gamg_setup_events[SET11]); */
1728   /* create timer stages */
1729 #if defined GAMG_STAGES
1730   {
1731     char     str[32];
1732     PetscInt lidx;
1733     sprintf(str,"MG Level %d (finest)",0);
1734     ierr = PetscLogStageRegister(str, &gamg_stages[0]);CHKERRQ(ierr);
1735     for (lidx=1; lidx<9; lidx++) {
1736       sprintf(str,"MG Level %d",lidx);
1737       ierr = PetscLogStageRegister(str, &gamg_stages[lidx]);CHKERRQ(ierr);
1738     }
1739   }
1740 #endif
1741 #endif
1742   PetscFunctionReturn(0);
1743 }
1744 
1745 /*@C
1746  PCGAMGFinalizePackage - This function frees everything from the PCGAMG package. It is
1747     called from PetscFinalize() automatically.
1748 
1749  Level: developer
1750 
1751  .seealso: PetscFinalize()
1752 @*/
1753 PetscErrorCode PCGAMGFinalizePackage(void)
1754 {
1755   PetscErrorCode ierr;
1756 
1757   PetscFunctionBegin;
1758   PCGAMGPackageInitialized = PETSC_FALSE;
1759   ierr = PetscFunctionListDestroy(&GAMGList);CHKERRQ(ierr);
1760   PetscFunctionReturn(0);
1761 }
1762 
1763 /*@C
1764  PCGAMGRegister - Register a PCGAMG implementation.
1765 
1766  Input Parameters:
1767  + type - string that will be used as the name of the GAMG type.
1768  - create - function for creating the gamg context.
1769 
1770   Level: advanced
1771 
1772  .seealso: PCGAMGType, PCGAMG, PCGAMGSetType()
1773 @*/
1774 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1775 {
1776   PetscErrorCode ierr;
1777 
1778   PetscFunctionBegin;
1779   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1780   ierr = PetscFunctionListAdd(&GAMGList,type,create);CHKERRQ(ierr);
1781   PetscFunctionReturn(0);
1782 }
1783 
1784