xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision ccbd7a3f2d0a2fa356a9878ab9796fd8d29170f0)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <petsc/private/matimpl.h>
5 #include <../src/ksp/pc/impls/gamg/gamg.h>           /*I "petscpc.h" I*/
6 #include <../src/ksp/pc/impls/bjacobi/bjacobi.h> /* Hack to access same_local_solves */
7 #include <../src/ksp/ksp/impls/cheby/chebyshevimpl.h>    /*I "petscksp.h" I*/
8 
9 #if defined PETSC_GAMG_USE_LOG
10 PetscLogEvent petsc_gamg_setup_events[NUM_SET];
11 #endif
12 
13 #if defined PETSC_USE_LOG
14 PetscLogEvent PC_GAMGGraph_AGG;
15 PetscLogEvent PC_GAMGGraph_GEO;
16 PetscLogEvent PC_GAMGCoarsen_AGG;
17 PetscLogEvent PC_GAMGCoarsen_GEO;
18 PetscLogEvent PC_GAMGProlongator_AGG;
19 PetscLogEvent PC_GAMGProlongator_GEO;
20 PetscLogEvent PC_GAMGOptProlongator_AGG;
21 #endif
22 
23 /* #define GAMG_STAGES */
24 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
25 static PetscLogStage gamg_stages[PETSC_MG_MAXLEVELS];
26 #endif
27 
28 static PetscFunctionList GAMGList = NULL;
29 static PetscBool PCGAMGPackageInitialized;
30 
31 /* ----------------------------------------------------------------------------- */
32 PetscErrorCode PCReset_GAMG(PC pc)
33 {
34   PetscErrorCode ierr, level;
35   PC_MG          *mg      = (PC_MG*)pc->data;
36   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
37 
38   PetscFunctionBegin;
39   ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
40   pc_gamg->data_sz = 0;
41   ierr = PetscFree(pc_gamg->orig_data);CHKERRQ(ierr);
42   for (level = 0; level < PETSC_MG_MAXLEVELS ; level++) {
43     mg->min_eigen_DinvA[level] = 0;
44     mg->max_eigen_DinvA[level] = 0;
45   }
46   pc_gamg->emin = 0;
47   pc_gamg->emax = 0;
48   PetscFunctionReturn(0);
49 }
50 
51 /* -------------------------------------------------------------------------- */
52 /*
53    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
54      of active processors.
55 
56    Input Parameter:
57    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
58           'pc_gamg->data_sz' are changed via repartitioning/reduction.
59    . Amat_fine - matrix on this fine (k) level
60    . cr_bs - coarse block size
61    In/Output Parameter:
62    . a_P_inout - prolongation operator to the next level (k-->k-1)
63    . a_nactive_proc - number of active procs
64    Output Parameter:
65    . a_Amat_crs - coarse matrix that is created (k-1)
66 */
67 
68 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc,Mat Amat_fine,PetscInt cr_bs,Mat *a_P_inout,Mat *a_Amat_crs,PetscMPIInt *a_nactive_proc,IS * Pcolumnperm, PetscBool is_last)
69 {
70   PetscErrorCode  ierr;
71   PC_MG           *mg         = (PC_MG*)pc->data;
72   PC_GAMG         *pc_gamg    = (PC_GAMG*)mg->innerctx;
73   Mat             Cmat,Pold=*a_P_inout;
74   MPI_Comm        comm;
75   PetscMPIInt     rank,size,new_size,nactive=*a_nactive_proc;
76   PetscInt        ncrs_eq,ncrs,f_bs;
77 
78   PetscFunctionBegin;
79   ierr = PetscObjectGetComm((PetscObject)Amat_fine,&comm);CHKERRQ(ierr);
80   ierr = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
81   ierr = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
82   ierr = MatGetBlockSize(Amat_fine, &f_bs);CHKERRQ(ierr);
83   ierr = MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat);CHKERRQ(ierr);
84 
85   if (Pcolumnperm) *Pcolumnperm = NULL;
86 
87   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
88   ierr = MatGetLocalSize(Cmat, &ncrs_eq, NULL);CHKERRQ(ierr);
89   if (pc_gamg->data_cell_rows>0) {
90     ncrs = pc_gamg->data_sz/pc_gamg->data_cell_cols/pc_gamg->data_cell_rows;
91   } else {
92     PetscInt  bs;
93     ierr = MatGetBlockSize(Cmat, &bs);CHKERRQ(ierr);
94     ncrs = ncrs_eq/bs;
95   }
96   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
97   if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) new_size = 1;
98   else {
99     PetscInt ncrs_eq_glob;
100     ierr     = MatGetSize(Cmat, &ncrs_eq_glob, NULL);CHKERRQ(ierr);
101     new_size = (PetscMPIInt)((float)ncrs_eq_glob/(float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
102     if (!new_size) new_size = 1; /* not likely, posible? */
103     else if (new_size >= nactive) new_size = nactive; /* no change, rare */
104   }
105 
106   if (new_size==nactive) {
107     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
108     if (new_size < size) {
109       /* odd case where multiple coarse grids are on one processor or no coarsening ... */
110       ierr = PetscInfo1(pc,"reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n",nactive);CHKERRQ(ierr);
111       if (pc_gamg->cpu_pin_coarse_grids) {
112         ierr = MatBindToCPU(*a_Amat_crs,PETSC_TRUE);CHKERRQ(ierr);
113         ierr = MatBindToCPU(*a_P_inout,PETSC_TRUE);CHKERRQ(ierr);
114       }
115     }
116     /* we know that the grid structure can be reused in MatPtAP */
117   } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
118     PetscInt       *counts,*newproc_idx,ii,jj,kk,strideNew,*tidx,ncrs_new,ncrs_eq_new,nloc_old,expand_factor=1,rfactor=1;
119     IS             is_eq_newproc,is_eq_num,is_eq_num_prim,new_eq_indices;
120     nloc_old = ncrs_eq/cr_bs;
121     if (ncrs_eq % cr_bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ncrs_eq %D not divisible by cr_bs %D",ncrs_eq,cr_bs);
122     /* get new_size and rfactor */
123     if (pc_gamg->layout_type==PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
124       /* find factor */
125       if (new_size == 1) rfactor = size; /* don't modify */
126       else {
127         PetscReal best_fact = 0.;
128         jj = -1;
129         for (kk = 1 ; kk <= size ; kk++) {
130           if (!(size%kk)) { /* a candidate */
131             PetscReal nactpe = (PetscReal)size/(PetscReal)kk, fact = nactpe/(PetscReal)new_size;
132             if (fact > 1.0) fact = 1./fact; /* keep fact < 1 */
133             if (fact > best_fact) {
134               best_fact = fact; jj = kk;
135             }
136           }
137         }
138         if (jj != -1) rfactor = jj;
139         else rfactor = 1; /* a prime */
140         if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
141         else expand_factor = rfactor;
142       }
143       new_size = size/rfactor; /* make new size one that is factor */
144       if (new_size==nactive) { /* no repartitioning or reduction, bail out because nested here (rare) */
145         *a_Amat_crs = Cmat;
146         ierr = PetscInfo2(pc,"Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%D\n",new_size,ncrs_eq);CHKERRQ(ierr);
147         PetscFunctionReturn(0);
148       }
149     }
150 #if defined PETSC_GAMG_USE_LOG
151     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
152 #endif
153     /* make 'is_eq_newproc' */
154     ierr = PetscMalloc1(size, &counts);CHKERRQ(ierr);
155     if (pc_gamg->repart) {
156       /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates of primal part accordingly */
157       Mat      adj;
158       ierr = PetscInfo4(pc,"Repartition: size (active): %d --> %d, %D local equations, using %s process layout\n",*a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread");CHKERRQ(ierr);
159       /* get 'adj' */
160       if (cr_bs == 1) {
161         ierr = MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
162       } else {
163         /* make a scalar matrix to partition (no Stokes here) */
164         Mat               tMat;
165         PetscInt          Istart_crs,Iend_crs,ncols,jj,Ii;
166         const PetscScalar *vals;
167         const PetscInt    *idx;
168         PetscInt          *d_nnz, *o_nnz, M, N;
169         static PetscInt   llev = 0; /* ugly but just used for debugging */
170         MatType           mtype;
171 
172         ierr = PetscMalloc2(ncrs, &d_nnz,ncrs, &o_nnz);CHKERRQ(ierr);
173         ierr = MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs);CHKERRQ(ierr);
174         ierr = MatGetSize(Cmat, &M, &N);CHKERRQ(ierr);
175         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
176           ierr      = MatGetRow(Cmat,Ii,&ncols,NULL,NULL);CHKERRQ(ierr);
177           d_nnz[jj] = ncols/cr_bs;
178           o_nnz[jj] = ncols/cr_bs;
179           ierr      = MatRestoreRow(Cmat,Ii,&ncols,NULL,NULL);CHKERRQ(ierr);
180           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
181           if (o_nnz[jj] > (M/cr_bs-ncrs)) o_nnz[jj] = M/cr_bs-ncrs;
182         }
183 
184         ierr = MatGetType(Amat_fine,&mtype);CHKERRQ(ierr);
185         ierr = MatCreate(comm, &tMat);CHKERRQ(ierr);
186         ierr = MatSetSizes(tMat, ncrs, ncrs,PETSC_DETERMINE, PETSC_DETERMINE);CHKERRQ(ierr);
187         ierr = MatSetType(tMat,mtype);CHKERRQ(ierr);
188         ierr = MatSeqAIJSetPreallocation(tMat,0,d_nnz);CHKERRQ(ierr);
189         ierr = MatMPIAIJSetPreallocation(tMat,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
190         ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr);
191 
192         for (ii = Istart_crs; ii < Iend_crs; ii++) {
193           PetscInt dest_row = ii/cr_bs;
194           ierr = MatGetRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
195           for (jj = 0; jj < ncols; jj++) {
196             PetscInt    dest_col = idx[jj]/cr_bs;
197             PetscScalar v        = 1.0;
198             ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES);CHKERRQ(ierr);
199           }
200           ierr = MatRestoreRow(Cmat,ii,&ncols,&idx,&vals);CHKERRQ(ierr);
201         }
202         ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
203         ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
204 
205         if (llev++ == -1) {
206           PetscViewer viewer; char fname[32];
207           ierr = PetscSNPrintf(fname,sizeof(fname),"part_mat_%D.mat",llev);CHKERRQ(ierr);
208           PetscViewerBinaryOpen(comm,fname,FILE_MODE_WRITE,&viewer);
209           ierr = MatView(tMat, viewer);CHKERRQ(ierr);
210           ierr = PetscViewerDestroy(&viewer);CHKERRQ(ierr);
211         }
212         ierr = MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj);CHKERRQ(ierr);
213         ierr = MatDestroy(&tMat);CHKERRQ(ierr);
214       } /* create 'adj' */
215 
216       { /* partition: get newproc_idx */
217         char            prefix[256];
218         const char      *pcpre;
219         const PetscInt  *is_idx;
220         MatPartitioning mpart;
221         IS              proc_is;
222 
223         ierr = MatPartitioningCreate(comm, &mpart);CHKERRQ(ierr);
224         ierr = MatPartitioningSetAdjacency(mpart, adj);CHKERRQ(ierr);
225         ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
226         ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
227         ierr = PetscObjectSetOptionsPrefix((PetscObject)mpart,prefix);CHKERRQ(ierr);
228         ierr = MatPartitioningSetFromOptions(mpart);CHKERRQ(ierr);
229         ierr = MatPartitioningSetNParts(mpart, new_size);CHKERRQ(ierr);
230         ierr = MatPartitioningApply(mpart, &proc_is);CHKERRQ(ierr);
231         ierr = MatPartitioningDestroy(&mpart);CHKERRQ(ierr);
232 
233         /* collect IS info */
234         ierr     = PetscMalloc1(ncrs_eq, &newproc_idx);CHKERRQ(ierr);
235         ierr     = ISGetIndices(proc_is, &is_idx);CHKERRQ(ierr);
236         for (kk = jj = 0 ; kk < nloc_old ; kk++) {
237           for (ii = 0 ; ii < cr_bs ; ii++, jj++) {
238             newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */
239           }
240         }
241         ierr = ISRestoreIndices(proc_is, &is_idx);CHKERRQ(ierr);
242         ierr = ISDestroy(&proc_is);CHKERRQ(ierr);
243       }
244       ierr = MatDestroy(&adj);CHKERRQ(ierr);
245 
246       ierr = ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc);CHKERRQ(ierr);
247       ierr = PetscFree(newproc_idx);CHKERRQ(ierr);
248     } else { /* simple aggregation of parts -- 'is_eq_newproc' */
249       PetscInt targetPE;
250       if (new_size==nactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"new_size==nactive. Should not happen");
251       ierr = PetscInfo1(pc,"Number of equations (loc) %D with simple aggregation\n",ncrs_eq);CHKERRQ(ierr);
252       targetPE = (rank/rfactor)*expand_factor;
253       ierr     = ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc);CHKERRQ(ierr);
254     } /* end simple 'is_eq_newproc' */
255 
256     /*
257       Create an index set from the is_eq_newproc index set to indicate the mapping TO
258     */
259     ierr = ISPartitioningToNumbering(is_eq_newproc, &is_eq_num);CHKERRQ(ierr);
260     is_eq_num_prim = is_eq_num;
261     /*
262       Determine how many equations/vertices are assigned to each processor
263     */
264     ierr        = ISPartitioningCount(is_eq_newproc, size, counts);CHKERRQ(ierr);
265     ncrs_eq_new = counts[rank];
266     ierr        = ISDestroy(&is_eq_newproc);CHKERRQ(ierr);
267     ncrs_new = ncrs_eq_new/cr_bs;
268 
269     ierr = PetscFree(counts);CHKERRQ(ierr);
270     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxilary data into some complex abstracted thing */
271     {
272       Vec            src_crd, dest_crd;
273       const PetscInt *idx,ndata_rows=pc_gamg->data_cell_rows,ndata_cols=pc_gamg->data_cell_cols,node_data_sz=ndata_rows*ndata_cols;
274       VecScatter     vecscat;
275       PetscScalar    *array;
276       IS isscat;
277       /* move data (for primal equations only) */
278       /* Create a vector to contain the newly ordered element information */
279       ierr = VecCreate(comm, &dest_crd);CHKERRQ(ierr);
280       ierr = VecSetSizes(dest_crd, node_data_sz*ncrs_new, PETSC_DECIDE);CHKERRQ(ierr);
281       ierr = VecSetType(dest_crd,VECSTANDARD);CHKERRQ(ierr); /* this is needed! */
282       /*
283         There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
284         a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
285       */
286       ierr = PetscMalloc1(ncrs*node_data_sz, &tidx);CHKERRQ(ierr);
287       ierr = ISGetIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
288       for (ii=0,jj=0; ii<ncrs; ii++) {
289         PetscInt id = idx[ii*cr_bs]/cr_bs; /* get node back */
290         for (kk=0; kk<node_data_sz; kk++, jj++) tidx[jj] = id*node_data_sz + kk;
291       }
292       ierr = ISRestoreIndices(is_eq_num_prim, &idx);CHKERRQ(ierr);
293       ierr = ISCreateGeneral(comm, node_data_sz*ncrs, tidx, PETSC_COPY_VALUES, &isscat);CHKERRQ(ierr);
294       ierr = PetscFree(tidx);CHKERRQ(ierr);
295       /*
296         Create a vector to contain the original vertex information for each element
297       */
298       ierr = VecCreateSeq(PETSC_COMM_SELF, node_data_sz*ncrs, &src_crd);CHKERRQ(ierr);
299       for (jj=0; jj<ndata_cols; jj++) {
300         const PetscInt stride0=ncrs*pc_gamg->data_cell_rows;
301         for (ii=0; ii<ncrs; ii++) {
302           for (kk=0; kk<ndata_rows; kk++) {
303             PetscInt    ix = ii*ndata_rows + kk + jj*stride0, jx = ii*node_data_sz + kk*ndata_cols + jj;
304             PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
305             ierr = VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES);CHKERRQ(ierr);
306           }
307         }
308       }
309       ierr = VecAssemblyBegin(src_crd);CHKERRQ(ierr);
310       ierr = VecAssemblyEnd(src_crd);CHKERRQ(ierr);
311       /*
312         Scatter the element vertex information (still in the original vertex ordering)
313         to the correct processor
314       */
315       ierr = VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat);CHKERRQ(ierr);
316       ierr = ISDestroy(&isscat);CHKERRQ(ierr);
317       ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
318       ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
319       ierr = VecScatterDestroy(&vecscat);CHKERRQ(ierr);
320       ierr = VecDestroy(&src_crd);CHKERRQ(ierr);
321       /*
322         Put the element vertex data into a new allocation of the gdata->ele
323       */
324       ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
325       ierr = PetscMalloc1(node_data_sz*ncrs_new, &pc_gamg->data);CHKERRQ(ierr);
326 
327       pc_gamg->data_sz = node_data_sz*ncrs_new;
328       strideNew        = ncrs_new*ndata_rows;
329 
330       ierr = VecGetArray(dest_crd, &array);CHKERRQ(ierr);
331       for (jj=0; jj<ndata_cols; jj++) {
332         for (ii=0; ii<ncrs_new; ii++) {
333           for (kk=0; kk<ndata_rows; kk++) {
334             PetscInt ix = ii*ndata_rows + kk + jj*strideNew, jx = ii*node_data_sz + kk*ndata_cols + jj;
335             pc_gamg->data[ix] = PetscRealPart(array[jx]);
336           }
337         }
338       }
339       ierr = VecRestoreArray(dest_crd, &array);CHKERRQ(ierr);
340       ierr = VecDestroy(&dest_crd);CHKERRQ(ierr);
341     }
342     /* move A and P (columns) with new layout */
343 #if defined PETSC_GAMG_USE_LOG
344     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
345 #endif
346     /*
347       Invert for MatCreateSubMatrix
348     */
349     ierr = ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices);CHKERRQ(ierr);
350     ierr = ISSort(new_eq_indices);CHKERRQ(ierr); /* is this needed? */
351     ierr = ISSetBlockSize(new_eq_indices, cr_bs);CHKERRQ(ierr);
352     if (is_eq_num != is_eq_num_prim) {
353       ierr = ISDestroy(&is_eq_num_prim);CHKERRQ(ierr); /* could be same as 'is_eq_num' */
354     }
355     if (Pcolumnperm) {
356       ierr = PetscObjectReference((PetscObject)new_eq_indices);CHKERRQ(ierr);
357       *Pcolumnperm = new_eq_indices;
358     }
359     ierr = ISDestroy(&is_eq_num);CHKERRQ(ierr);
360 #if defined PETSC_GAMG_USE_LOG
361     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET13],0,0,0,0);CHKERRQ(ierr);
362     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
363 #endif
364     /* 'a_Amat_crs' output */
365     {
366       Mat mat;
367       ierr        = MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat);CHKERRQ(ierr);
368       *a_Amat_crs = mat;
369     }
370     ierr = MatDestroy(&Cmat);CHKERRQ(ierr);
371 
372 #if defined PETSC_GAMG_USE_LOG
373     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET14],0,0,0,0);CHKERRQ(ierr);
374 #endif
375     /* prolongator */
376     {
377       IS       findices;
378       PetscInt Istart,Iend;
379       Mat      Pnew;
380 
381       ierr = MatGetOwnershipRange(Pold, &Istart, &Iend);CHKERRQ(ierr);
382 #if defined PETSC_GAMG_USE_LOG
383       ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
384 #endif
385       ierr = ISCreateStride(comm,Iend-Istart,Istart,1,&findices);CHKERRQ(ierr);
386       ierr = ISSetBlockSize(findices,f_bs);CHKERRQ(ierr);
387       ierr = MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew);CHKERRQ(ierr);
388       ierr = ISDestroy(&findices);CHKERRQ(ierr);
389 
390 #if defined PETSC_GAMG_USE_LOG
391       ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET15],0,0,0,0);CHKERRQ(ierr);
392 #endif
393       ierr = MatDestroy(a_P_inout);CHKERRQ(ierr);
394 
395       /* output - repartitioned */
396       *a_P_inout = Pnew;
397     }
398     ierr = ISDestroy(&new_eq_indices);CHKERRQ(ierr);
399 
400     *a_nactive_proc = new_size; /* output */
401 
402     /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
403     if (pc_gamg->cpu_pin_coarse_grids) {
404 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
405       static PetscInt llev = 2;
406       ierr = PetscInfo1(pc,"Pinning level %D to the CPU\n",llev++);CHKERRQ(ierr);
407 #endif
408       ierr = MatBindToCPU(*a_Amat_crs,PETSC_TRUE);CHKERRQ(ierr);
409       ierr = MatBindToCPU(*a_P_inout,PETSC_TRUE);CHKERRQ(ierr);
410       if (1) { /* lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
411         Mat         A = *a_Amat_crs, P = *a_P_inout;
412         PetscMPIInt size;
413         ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
414         if (size > 1) {
415           Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data, *p = (Mat_MPIAIJ*)P->data;
416           ierr = VecBindToCPU(a->lvec,PETSC_TRUE);CHKERRQ(ierr);
417           ierr = VecBindToCPU(p->lvec,PETSC_TRUE);CHKERRQ(ierr);
418         }
419       }
420     }
421 #if defined PETSC_GAMG_USE_LOG
422     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr);
423 #endif
424   }
425   PetscFunctionReturn(0);
426 }
427 
428 /* -------------------------------------------------------------------------- */
429 /*
430    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
431                     by setting data structures and options.
432 
433    Input Parameter:
434 .  pc - the preconditioner context
435 
436 */
437 PetscErrorCode PCSetUp_GAMG(PC pc)
438 {
439   PetscErrorCode ierr;
440   PC_MG          *mg      = (PC_MG*)pc->data;
441   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
442   Mat            Pmat     = pc->pmat;
443   PetscInt       fine_level,level,level1,bs,M,N,qq,lidx,nASMBlocksArr[PETSC_MG_MAXLEVELS];
444   MPI_Comm       comm;
445   PetscMPIInt    rank,size,nactivepe;
446   Mat            Aarr[PETSC_MG_MAXLEVELS],Parr[PETSC_MG_MAXLEVELS];
447   IS             *ASMLocalIDsArr[PETSC_MG_MAXLEVELS];
448   PetscLogDouble nnz0=0.,nnztot=0.;
449   MatInfo        info;
450   PetscBool      is_last = PETSC_FALSE;
451 
452   PetscFunctionBegin;
453   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
454   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
455   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
456 
457   if (pc_gamg->setup_count++ > 0) {
458     if ((PetscBool)(!pc_gamg->reuse_prol)) {
459       /* reset everything */
460       ierr = PCReset_MG(pc);CHKERRQ(ierr);
461       pc->setupcalled = 0;
462     } else {
463       PC_MG_Levels **mglevels = mg->levels;
464       /* just do Galerkin grids */
465       Mat          B,dA,dB;
466 
467       if (!pc->setupcalled) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"PCSetUp() has not been called yet");
468       if (pc_gamg->Nlevels > 1) {
469         /* currently only handle case where mat and pmat are the same on coarser levels */
470         ierr = KSPGetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,&dA,&dB);CHKERRQ(ierr);
471         /* (re)set to get dirty flag */
472         ierr = KSPSetOperators(mglevels[pc_gamg->Nlevels-1]->smoothd,dA,dB);CHKERRQ(ierr);
473 
474         for (level=pc_gamg->Nlevels-2; level>=0; level--) {
475           /* 2nd solve, matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
476           if (pc_gamg->setup_count==2 /* && pc_gamg->repart||reduction */) {
477             ierr = PetscInfo2(pc,"new RAP after first solve level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
478             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_INITIAL_MATRIX,2.0,&B);CHKERRQ(ierr);
479             ierr = MatDestroy(&mglevels[level]->A);CHKERRQ(ierr);
480             mglevels[level]->A = B;
481           } else {
482             ierr = PetscInfo2(pc,"RAP after first solve reusing matrix level %D, %D setup\n",level,pc_gamg->setup_count);CHKERRQ(ierr);
483             ierr = KSPGetOperators(mglevels[level]->smoothd,NULL,&B);CHKERRQ(ierr);
484             ierr = MatPtAP(dB,mglevels[level+1]->interpolate,MAT_REUSE_MATRIX,1.0,&B);CHKERRQ(ierr);
485           }
486           ierr = KSPSetOperators(mglevels[level]->smoothd,B,B);CHKERRQ(ierr);
487           dB   = B;
488         }
489       }
490 
491       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
492       PetscFunctionReturn(0);
493     }
494   }
495 
496   if (!pc_gamg->data) {
497     if (pc_gamg->orig_data) {
498       ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
499       ierr = MatGetLocalSize(Pmat, &qq, NULL);CHKERRQ(ierr);
500 
501       pc_gamg->data_sz        = (qq/bs)*pc_gamg->orig_data_cell_rows*pc_gamg->orig_data_cell_cols;
502       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
503       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
504 
505       ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data);CHKERRQ(ierr);
506       for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
507     } else {
508       if (!pc_gamg->ops->createdefaultdata) SETERRQ(comm,PETSC_ERR_PLIB,"'createdefaultdata' not set(?) need to support NULL data");
509       ierr = pc_gamg->ops->createdefaultdata(pc,Pmat);CHKERRQ(ierr);
510     }
511   }
512 
513   /* cache original data for reuse */
514   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
515     ierr = PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data);CHKERRQ(ierr);
516     for (qq=0; qq<pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
517     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
518     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
519   }
520 
521   /* get basic dims */
522   ierr = MatGetBlockSize(Pmat, &bs);CHKERRQ(ierr);
523   ierr = MatGetSize(Pmat, &M, &N);CHKERRQ(ierr);
524 
525   ierr = MatGetInfo(Pmat,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr); /* global reduction */
526   nnz0   = info.nz_used;
527   nnztot = info.nz_used;
528   ierr = PetscInfo6(pc,"level %d) N=%D, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n",0,M,pc_gamg->data_cell_rows,pc_gamg->data_cell_cols,(int)(nnz0/(PetscReal)M+0.5),size);CHKERRQ(ierr);
529 
530   /* Get A_i and R_i */
531   for (level=0, Aarr[0]=Pmat, nactivepe = size; level < (pc_gamg->Nlevels-1) && (!level || M>pc_gamg->coarse_eq_limit); level++) {
532     pc_gamg->current_level = level;
533     if (level >= PETSC_MG_MAXLEVELS) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Too many levels %D",level);
534     level1 = level + 1;
535 #if defined PETSC_GAMG_USE_LOG
536     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
537 #if (defined GAMG_STAGES)
538     ierr = PetscLogStagePush(gamg_stages[level]);CHKERRQ(ierr);
539 #endif
540 #endif
541     { /* construct prolongator */
542       Mat              Gmat;
543       PetscCoarsenData *agg_lists;
544       Mat              Prol11;
545 
546       ierr = pc_gamg->ops->graph(pc,Aarr[level], &Gmat);CHKERRQ(ierr);
547       ierr = pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists);CHKERRQ(ierr);
548       ierr = pc_gamg->ops->prolongator(pc,Aarr[level],Gmat,agg_lists,&Prol11);CHKERRQ(ierr);
549 
550       /* could have failed to create new level */
551       if (Prol11) {
552         const char *prefix;
553         char       addp[32];
554 
555         /* get new block size of coarse matrices */
556         ierr = MatGetBlockSizes(Prol11, NULL, &bs);CHKERRQ(ierr);
557 
558         if (pc_gamg->ops->optprolongator) {
559           /* smooth */
560           ierr = pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11);CHKERRQ(ierr);
561         }
562 
563         if (pc_gamg->use_aggs_in_asm) {
564           PetscInt bs;
565           ierr = MatGetBlockSizes(Prol11, &bs, NULL);CHKERRQ(ierr);
566           ierr = PetscCDGetASMBlocks(agg_lists, bs, Gmat, &nASMBlocksArr[level], &ASMLocalIDsArr[level]);CHKERRQ(ierr);
567         }
568 
569         ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
570         ierr = MatSetOptionsPrefix(Prol11,prefix);CHKERRQ(ierr);
571         ierr = PetscSNPrintf(addp,sizeof(addp),"pc_gamg_prolongator_%d_",level);CHKERRQ(ierr);
572         ierr = MatAppendOptionsPrefix(Prol11,addp);CHKERRQ(ierr);
573         /* if we reuse the prolongator, then it is better to generate the transpose by default with CUDA
574            Such behaviour can be adapted with -pc_gamg_prolongator_ prefixed options */
575 #if defined(PETSC_HAVE_CUDA)
576         {
577           PetscBool ismpiaij;
578 
579           ierr = PetscObjectBaseTypeCompare((PetscObject)Prol11,MATMPIAIJ,&ismpiaij);CHKERRQ(ierr);
580           if (ismpiaij) {
581             Mat Prol_d,Prol_o;
582 
583             ierr = MatMPIAIJGetSeqAIJ(Prol11,&Prol_d,&Prol_o,NULL);CHKERRQ(ierr);
584             ierr = MatSeqAIJCUSPARSESetGenerateTranspose(Prol_d,pc_gamg->reuse_prol);CHKERRQ(ierr);
585             ierr = MatSeqAIJCUSPARSESetGenerateTranspose(Prol_o,pc_gamg->reuse_prol);CHKERRQ(ierr);
586           } else {
587             ierr = MatSeqAIJCUSPARSESetGenerateTranspose(Prol11,pc_gamg->reuse_prol);CHKERRQ(ierr);
588           }
589         }
590 #endif
591         ierr = MatSetFromOptions(Prol11);CHKERRQ(ierr);
592         Parr[level1] = Prol11;
593       } else Parr[level1] = NULL; /* failed to coarsen */
594 
595       ierr = MatDestroy(&Gmat);CHKERRQ(ierr);
596       ierr = PetscCDDestroy(agg_lists);CHKERRQ(ierr);
597     } /* construct prolongator scope */
598 #if defined PETSC_GAMG_USE_LOG
599     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr);
600 #endif
601     if (!level) Aarr[0] = Pmat; /* use Pmat for finest level setup */
602     if (!Parr[level1]) { /* failed to coarsen */
603       ierr =  PetscInfo1(pc,"Stop gridding, level %D\n",level);CHKERRQ(ierr);
604 #if defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES
605       ierr = PetscLogStagePop();CHKERRQ(ierr);
606 #endif
607       break;
608     }
609 #if defined PETSC_GAMG_USE_LOG
610     ierr = PetscLogEventBegin(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
611 #endif
612     ierr = MatGetSize(Parr[level1], &M, &N);CHKERRQ(ierr); /* N is next M, a loop test variables */
613     if (is_last) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Is last ????????");
614     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
615     if (level1 == pc_gamg->Nlevels-1) is_last = PETSC_TRUE;
616     ierr = pc_gamg->ops->createlevel(pc, Aarr[level], bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last);CHKERRQ(ierr);
617 
618 #if defined PETSC_GAMG_USE_LOG
619     ierr = PetscLogEventEnd(petsc_gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr);
620 #endif
621     ierr = MatGetSize(Aarr[level1], &M, &N);CHKERRQ(ierr); /* M is loop test variables */
622     ierr = MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info);CHKERRQ(ierr);
623     nnztot += info.nz_used;
624     ierr = PetscInfo5(pc,"%d) N=%D, n data cols=%d, nnz/row (ave)=%d, %d active pes\n",level1,M,pc_gamg->data_cell_cols,(int)(info.nz_used/(PetscReal)M),nactivepe);CHKERRQ(ierr);
625 
626 #if (defined PETSC_GAMG_USE_LOG && defined GAMG_STAGES)
627     ierr = PetscLogStagePop();CHKERRQ(ierr);
628 #endif
629     /* stop if one node or one proc -- could pull back for singular problems */
630     if ((pc_gamg->data_cell_cols && M/pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M/bs < 2)) {
631       ierr =  PetscInfo2(pc,"HARD stop of coarsening on level %D.  Grid too small: %D block nodes\n",level,M/bs);CHKERRQ(ierr);
632       level++;
633       break;
634     }
635   } /* levels */
636   ierr                  = PetscFree(pc_gamg->data);CHKERRQ(ierr);
637 
638   ierr = PetscInfo2(pc,"%D levels, grid complexity = %g\n",level+1,nnztot/nnz0);CHKERRQ(ierr);
639   pc_gamg->Nlevels = level + 1;
640   fine_level       = level;
641   ierr             = PCMGSetLevels(pc,pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
642 
643   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
644     /* set default smoothers & set operators */
645     for (lidx = 1, level = pc_gamg->Nlevels-2; lidx <= fine_level; lidx++, level--) {
646       KSP smoother;
647       PC  subpc;
648 
649       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
650       ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
651 
652       ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
653       /* set ops */
654       ierr = KSPSetOperators(smoother, Aarr[level], Aarr[level]);CHKERRQ(ierr);
655       ierr = PCMGSetInterpolation(pc, lidx, Parr[level+1]);CHKERRQ(ierr);
656 
657       /* set defaults */
658       ierr = KSPSetType(smoother, KSPCHEBYSHEV);CHKERRQ(ierr);
659 
660       /* set blocks for ASM smoother that uses the 'aggregates' */
661       if (pc_gamg->use_aggs_in_asm) {
662         PetscInt sz;
663         IS       *iss;
664 
665         sz   = nASMBlocksArr[level];
666         iss  = ASMLocalIDsArr[level];
667         ierr = PCSetType(subpc, PCASM);CHKERRQ(ierr);
668         ierr = PCASMSetOverlap(subpc, 0);CHKERRQ(ierr);
669         ierr = PCASMSetType(subpc,PC_ASM_BASIC);CHKERRQ(ierr);
670         if (!sz) {
671           IS       is;
672           ierr = ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is);CHKERRQ(ierr);
673           ierr = PCASMSetLocalSubdomains(subpc, 1, NULL, &is);CHKERRQ(ierr);
674           ierr = ISDestroy(&is);CHKERRQ(ierr);
675         } else {
676           PetscInt kk;
677           ierr = PCASMSetLocalSubdomains(subpc, sz, NULL, iss);CHKERRQ(ierr);
678           for (kk=0; kk<sz; kk++) {
679             ierr = ISDestroy(&iss[kk]);CHKERRQ(ierr);
680           }
681           ierr = PetscFree(iss);CHKERRQ(ierr);
682         }
683         ASMLocalIDsArr[level] = NULL;
684         nASMBlocksArr[level]  = 0;
685       } else {
686         ierr = PCSetType(subpc, PCSOR);CHKERRQ(ierr);
687       }
688     }
689     {
690       /* coarse grid */
691       KSP smoother,*k2; PC subpc,pc2; PetscInt ii,first;
692       Mat Lmat = Aarr[(level=pc_gamg->Nlevels-1)]; lidx = 0;
693       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
694       ierr = KSPSetOperators(smoother, Lmat, Lmat);CHKERRQ(ierr);
695       if (!pc_gamg->use_parallel_coarse_grid_solver) {
696         ierr = KSPSetNormType(smoother, KSP_NORM_NONE);CHKERRQ(ierr);
697         ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
698         ierr = PCSetType(subpc, PCBJACOBI);CHKERRQ(ierr);
699         ierr = PCSetUp(subpc);CHKERRQ(ierr);
700         ierr = PCBJacobiGetSubKSP(subpc,&ii,&first,&k2);CHKERRQ(ierr);
701         if (ii != 1) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"ii %D is not one",ii);
702         ierr = KSPGetPC(k2[0],&pc2);CHKERRQ(ierr);
703         ierr = PCSetType(pc2, PCLU);CHKERRQ(ierr);
704         ierr = PCFactorSetShiftType(pc2,MAT_SHIFT_INBLOCKS);CHKERRQ(ierr);
705         ierr = KSPSetTolerances(k2[0],PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,1);CHKERRQ(ierr);
706         ierr = KSPSetType(k2[0], KSPPREONLY);CHKERRQ(ierr);
707         /* This flag gets reset by PCBJacobiGetSubKSP(), but our BJacobi really does the same algorithm everywhere (and in
708          * fact, all but one process will have zero dofs), so we reset the flag to avoid having PCView_BJacobi attempt to
709          * view every subdomain as though they were different. */
710         ((PC_BJacobi*)subpc->data)->same_local_solves = PETSC_TRUE;
711       }
712     }
713 
714     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
715     ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
716     ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr);
717     ierr = PetscOptionsEnd();CHKERRQ(ierr);
718     ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
719 
720     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
721 
722     /* setup cheby eigen estimates from SA */
723     for (lidx = 1, level = pc_gamg->Nlevels-2; level >= 0 ; lidx++, level--) {
724       KSP       smoother;
725       PetscBool ischeb;
726       ierr = PCMGGetSmoother(pc, lidx, &smoother);CHKERRQ(ierr);
727       ierr = PetscObjectTypeCompare((PetscObject)smoother,KSPCHEBYSHEV,&ischeb);CHKERRQ(ierr);
728       if (ischeb) {
729         KSP_Chebyshev  *cheb = (KSP_Chebyshev*)smoother->data;
730         if (mg->max_eigen_DinvA[level] > 0 && cheb->emax == 0.) { /* let command line emax override using SA's eigenvalues */
731           PC        subpc;
732           PetscBool isjac;
733           ierr = KSPGetPC(smoother, &subpc);CHKERRQ(ierr);
734           ierr = PetscObjectTypeCompare((PetscObject)subpc,PCJACOBI,&isjac);CHKERRQ(ierr);
735           if ((isjac && pc_gamg->use_sa_esteig==-1) || pc_gamg->use_sa_esteig==1) {
736             PetscReal       emax,emin;
737             emin = mg->min_eigen_DinvA[level];
738             emax = mg->max_eigen_DinvA[level];
739             ierr = PetscInfo4(pc,"PCSetUp_GAMG: call KSPChebyshevSetEigenvalues on level %D (N=%D) with emax = %g emin = %g\n",level,Aarr[level]->rmap->N,(double)emax,(double)emin);CHKERRQ(ierr);
740             cheb->emin_computed = emin;
741             cheb->emax_computed = emax;
742             ierr = KSPChebyshevSetEigenvalues(smoother, cheb->tform[2]*emin + cheb->tform[3]*emax, cheb->tform[0]*emin + cheb->tform[1]*emax);CHKERRQ(ierr);
743           }
744         }
745       }
746     }
747 
748     /* clean up */
749     for (level=1; level<pc_gamg->Nlevels; level++) {
750       ierr = MatDestroy(&Parr[level]);CHKERRQ(ierr);
751       ierr = MatDestroy(&Aarr[level]);CHKERRQ(ierr);
752     }
753 
754   } else {
755     KSP smoother;
756     ierr = PetscInfo(pc,"One level solver used (system is seen as DD). Using default solver.\n");CHKERRQ(ierr);
757     ierr = PCMGGetSmoother(pc, 0, &smoother);CHKERRQ(ierr);
758     ierr = KSPSetOperators(smoother, Aarr[0], Aarr[0]);CHKERRQ(ierr);
759     ierr = KSPSetType(smoother, KSPPREONLY);CHKERRQ(ierr);
760     ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
761   }
762   PetscFunctionReturn(0);
763 }
764 
765 /* ------------------------------------------------------------------------- */
766 /*
767  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
768    that was created with PCCreate_GAMG().
769 
770    Input Parameter:
771 .  pc - the preconditioner context
772 
773    Application Interface Routine: PCDestroy()
774 */
775 PetscErrorCode PCDestroy_GAMG(PC pc)
776 {
777   PetscErrorCode ierr;
778   PC_MG          *mg     = (PC_MG*)pc->data;
779   PC_GAMG        *pc_gamg= (PC_GAMG*)mg->innerctx;
780 
781   PetscFunctionBegin;
782   ierr = PCReset_GAMG(pc);CHKERRQ(ierr);
783   if (pc_gamg->ops->destroy) {
784     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
785   }
786   ierr = PetscFree(pc_gamg->ops);CHKERRQ(ierr);
787   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
788   ierr = PetscFree(pc_gamg);CHKERRQ(ierr);
789   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
790   PetscFunctionReturn(0);
791 }
792 
793 /*@
794    PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction.
795 
796    Logically Collective on PC
797 
798    Input Parameters:
799 +  pc - the preconditioner context
800 -  n - the number of equations
801 
802 
803    Options Database Key:
804 .  -pc_gamg_process_eq_limit <limit>
805 
806    Notes:
807     GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
808           that has degrees of freedom
809 
810    Level: intermediate
811 
812 .seealso: PCGAMGSetCoarseEqLim()
813 @*/
814 PetscErrorCode  PCGAMGSetProcEqLim(PC pc, PetscInt n)
815 {
816   PetscErrorCode ierr;
817 
818   PetscFunctionBegin;
819   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
820   ierr = PetscTryMethod(pc,"PCGAMGSetProcEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
821   PetscFunctionReturn(0);
822 }
823 
824 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
825 {
826   PC_MG   *mg      = (PC_MG*)pc->data;
827   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
828 
829   PetscFunctionBegin;
830   if (n>0) pc_gamg->min_eq_proc = n;
831   PetscFunctionReturn(0);
832 }
833 
834 /*@
835    PCGAMGSetCoarseEqLim - Set maximum number of equations on coarsest grid.
836 
837  Collective on PC
838 
839    Input Parameters:
840 +  pc - the preconditioner context
841 -  n - maximum number of equations to aim for
842 
843    Options Database Key:
844 .  -pc_gamg_coarse_eq_limit <limit>
845 
846    Notes: For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
847      has less than 1000 unknowns.
848 
849    Level: intermediate
850 
851 .seealso: PCGAMGSetProcEqLim()
852 @*/
853 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
854 {
855   PetscErrorCode ierr;
856 
857   PetscFunctionBegin;
858   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
859   ierr = PetscTryMethod(pc,"PCGAMGSetCoarseEqLim_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
860   PetscFunctionReturn(0);
861 }
862 
863 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
864 {
865   PC_MG   *mg      = (PC_MG*)pc->data;
866   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
867 
868   PetscFunctionBegin;
869   if (n>0) pc_gamg->coarse_eq_limit = n;
870   PetscFunctionReturn(0);
871 }
872 
873 /*@
874    PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids
875 
876    Collective on PC
877 
878    Input Parameters:
879 +  pc - the preconditioner context
880 -  n - PETSC_TRUE or PETSC_FALSE
881 
882    Options Database Key:
883 .  -pc_gamg_repartition <true,false>
884 
885    Notes:
886     this will generally improve the loading balancing of the work on each level
887 
888    Level: intermediate
889 
890 .seealso: ()
891 @*/
892 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
893 {
894   PetscErrorCode ierr;
895 
896   PetscFunctionBegin;
897   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
898   ierr = PetscTryMethod(pc,"PCGAMGSetRepartition_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
899   PetscFunctionReturn(0);
900 }
901 
902 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
903 {
904   PC_MG   *mg      = (PC_MG*)pc->data;
905   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
906 
907   PetscFunctionBegin;
908   pc_gamg->repart = n;
909   PetscFunctionReturn(0);
910 }
911 
912 /*@
913    PCGAMGSetEstEigKSPMaxIt - Set number of KSP iterations in eigen estimator (for Cheby)
914 
915    Collective on PC
916 
917    Input Parameters:
918 +  pc - the preconditioner context
919 -  n - number of its
920 
921    Options Database Key:
922 .  -pc_gamg_esteig_ksp_max_it <its>
923 
924    Notes:
925 
926    Level: intermediate
927 
928 .seealso: ()
929 @*/
930 PetscErrorCode PCGAMGSetEstEigKSPMaxIt(PC pc, PetscInt n)
931 {
932   PetscErrorCode ierr;
933 
934   PetscFunctionBegin;
935   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
936   ierr = PetscTryMethod(pc,"PCGAMGSetEstEigKSPMaxIt_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
937   PetscFunctionReturn(0);
938 }
939 
940 static PetscErrorCode PCGAMGSetEstEigKSPMaxIt_GAMG(PC pc, PetscInt n)
941 {
942   PC_MG   *mg      = (PC_MG*)pc->data;
943   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
944 
945   PetscFunctionBegin;
946   pc_gamg->esteig_max_it = n;
947   PetscFunctionReturn(0);
948 }
949 
950 /*@
951    PCGAMGSetUseSAEstEig - Use eigen estimate from smoothed aggregation for Cheby smoother
952 
953    Collective on PC
954 
955    Input Parameters:
956 +  pc - the preconditioner context
957 -  n - number of its
958 
959    Options Database Key:
960 .  -pc_gamg_use_sa_esteig <true,false>
961 
962    Notes:
963 
964    Level: intermediate
965 
966 .seealso: ()
967 @*/
968 PetscErrorCode PCGAMGSetUseSAEstEig(PC pc, PetscBool n)
969 {
970   PetscErrorCode ierr;
971 
972   PetscFunctionBegin;
973   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
974   ierr = PetscTryMethod(pc,"PCGAMGSetUseSAEstEig_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
975   PetscFunctionReturn(0);
976 }
977 
978 static PetscErrorCode PCGAMGSetUseSAEstEig_GAMG(PC pc, PetscInt n)
979 {
980   PC_MG   *mg      = (PC_MG*)pc->data;
981   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
982 
983   PetscFunctionBegin;
984   pc_gamg->use_sa_esteig = n ? 1 : 0;
985   PetscFunctionReturn(0);
986 }
987 
988 /*@C
989    PCGAMGSetEstEigKSPType - Set type of KSP in eigen estimator (for Cheby)
990 
991    Collective on PC
992 
993    Input Parameters:
994 +  pc - the preconditioner context
995 -  t - ksp type
996 
997    Options Database Key:
998 .  -pc_gamg_esteig_ksp_type <type>
999 
1000    Notes:
1001 
1002    Level: intermediate
1003 
1004 .seealso: ()
1005 @*/
1006 PetscErrorCode PCGAMGSetEstEigKSPType(PC pc, char t[])
1007 {
1008   PetscErrorCode ierr;
1009 
1010   PetscFunctionBegin;
1011   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1012   ierr = PetscTryMethod(pc,"PCGAMGSetEstEigKSPType_C",(PC,char[]),(pc,t));CHKERRQ(ierr);
1013   PetscFunctionReturn(0);
1014 }
1015 
1016 static PetscErrorCode PCGAMGSetEstEigKSPType_GAMG(PC pc, char t[])
1017 {
1018   PetscErrorCode ierr;
1019   PC_MG   *mg      = (PC_MG*)pc->data;
1020   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1021 
1022   PetscFunctionBegin;
1023   ierr = PetscStrcpy(pc_gamg->esteig_type,t);CHKERRQ(ierr);
1024   PetscFunctionReturn(0);
1025 }
1026 
1027 /*@
1028    PCGAMGSetEigenvalues - Set eigenvalues
1029 
1030    Collective on PC
1031 
1032    Input Parameters:
1033 +  pc - the preconditioner context
1034 -  emax - max eigenvalue
1035 .  emin - min eigenvalue
1036 
1037    Options Database Key:
1038 .  -pc_gamg_eigenvalues
1039 
1040    Level: intermediate
1041 
1042 .seealso: PCGAMGSetEstEigKSPMaxIt(), PCGAMGSetUseSAEstEig(), PCGAMGSetEstEigKSPType()
1043 @*/
1044 PetscErrorCode PCGAMGSetEigenvalues(PC pc, PetscReal emax,PetscReal emin)
1045 {
1046   PetscErrorCode ierr;
1047 
1048   PetscFunctionBegin;
1049   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1050   ierr = PetscTryMethod(pc,"PCGAMGSetEigenvalues_C",(PC,PetscReal,PetscReal),(pc,emax,emin));CHKERRQ(ierr);
1051   PetscFunctionReturn(0);
1052 }
1053 static PetscErrorCode PCGAMGSetEigenvalues_GAMG(PC pc,PetscReal emax,PetscReal emin)
1054 {
1055   PC_MG          *mg      = (PC_MG*)pc->data;
1056   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1057 
1058   PetscFunctionBegin;
1059   if (emax <= emin) SETERRQ2(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"Maximum eigenvalue must be larger than minimum: max %g min %g",(double)emax,(double)emin);
1060   if (emax*emin <= 0.0) SETERRQ2(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"Both eigenvalues must be of the same sign: max %g min %g",(double)emax,(double)emin);
1061   pc_gamg->emax = emax;
1062   pc_gamg->emin = emin;
1063 
1064   PetscFunctionReturn(0);
1065 }
1066 
1067 /*@
1068    PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding algebraic multigrid preconditioner
1069 
1070    Collective on PC
1071 
1072    Input Parameters:
1073 +  pc - the preconditioner context
1074 -  n - PETSC_TRUE or PETSC_FALSE
1075 
1076    Options Database Key:
1077 .  -pc_gamg_reuse_interpolation <true,false>
1078 
1079    Level: intermediate
1080 
1081    Notes:
1082     this may negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
1083           rebuilding the preconditioner quicker.
1084 
1085 .seealso: ()
1086 @*/
1087 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
1088 {
1089   PetscErrorCode ierr;
1090 
1091   PetscFunctionBegin;
1092   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1093   ierr = PetscTryMethod(pc,"PCGAMGSetReuseInterpolation_C",(PC,PetscBool),(pc,n));CHKERRQ(ierr);
1094   PetscFunctionReturn(0);
1095 }
1096 
1097 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
1098 {
1099   PC_MG   *mg      = (PC_MG*)pc->data;
1100   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1101 
1102   PetscFunctionBegin;
1103   pc_gamg->reuse_prol = n;
1104   PetscFunctionReturn(0);
1105 }
1106 
1107 /*@
1108    PCGAMGASMSetUseAggs - Have the PCGAMG smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner.
1109 
1110    Collective on PC
1111 
1112    Input Parameters:
1113 +  pc - the preconditioner context
1114 -  flg - PETSC_TRUE to use aggregates, PETSC_FALSE to not
1115 
1116    Options Database Key:
1117 .  -pc_gamg_asm_use_agg
1118 
1119    Level: intermediate
1120 
1121 .seealso: ()
1122 @*/
1123 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
1124 {
1125   PetscErrorCode ierr;
1126 
1127   PetscFunctionBegin;
1128   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1129   ierr = PetscTryMethod(pc,"PCGAMGASMSetUseAggs_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1130   PetscFunctionReturn(0);
1131 }
1132 
1133 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
1134 {
1135   PC_MG   *mg      = (PC_MG*)pc->data;
1136   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1137 
1138   PetscFunctionBegin;
1139   pc_gamg->use_aggs_in_asm = flg;
1140   PetscFunctionReturn(0);
1141 }
1142 
1143 /*@
1144    PCGAMGSetUseParallelCoarseGridSolve - allow a parallel coarse grid solver
1145 
1146    Collective on PC
1147 
1148    Input Parameters:
1149 +  pc - the preconditioner context
1150 -  flg - PETSC_TRUE to not force coarse grid onto one processor
1151 
1152    Options Database Key:
1153 .  -pc_gamg_use_parallel_coarse_grid_solver
1154 
1155    Level: intermediate
1156 
1157 .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetCpuPinCoarseGrids()
1158 @*/
1159 PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve(PC pc, PetscBool flg)
1160 {
1161   PetscErrorCode ierr;
1162 
1163   PetscFunctionBegin;
1164   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1165   ierr = PetscTryMethod(pc,"PCGAMGSetUseParallelCoarseGridSolve_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1166   PetscFunctionReturn(0);
1167 }
1168 
1169 static PetscErrorCode PCGAMGSetUseParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
1170 {
1171   PC_MG   *mg      = (PC_MG*)pc->data;
1172   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1173 
1174   PetscFunctionBegin;
1175   pc_gamg->use_parallel_coarse_grid_solver = flg;
1176   PetscFunctionReturn(0);
1177 }
1178 
1179 /*@
1180    PCGAMGSetCpuPinCoarseGrids - pin reduced grids to CPU
1181 
1182    Collective on PC
1183 
1184    Input Parameters:
1185 +  pc - the preconditioner context
1186 -  flg - PETSC_TRUE to pin coarse grids to CPU
1187 
1188    Options Database Key:
1189 .  -pc_gamg_cpu_pin_coarse_grids
1190 
1191    Level: intermediate
1192 
1193 .seealso: PCGAMGSetCoarseGridLayoutType(), PCGAMGSetUseParallelCoarseGridSolve()
1194 @*/
1195 PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
1196 {
1197   PetscErrorCode ierr;
1198 
1199   PetscFunctionBegin;
1200   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1201   ierr = PetscTryMethod(pc,"PCGAMGSetCpuPinCoarseGrids_C",(PC,PetscBool),(pc,flg));CHKERRQ(ierr);
1202   PetscFunctionReturn(0);
1203 }
1204 
1205 static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1206 {
1207   PC_MG   *mg      = (PC_MG*)pc->data;
1208   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1209 
1210   PetscFunctionBegin;
1211   pc_gamg->cpu_pin_coarse_grids = flg;
1212   PetscFunctionReturn(0);
1213 }
1214 
1215 /*@
1216    PCGAMGSetCoarseGridLayoutType - place reduce grids on processors with natural order (compact type)
1217 
1218    Collective on PC
1219 
1220    Input Parameters:
1221 +  pc - the preconditioner context
1222 -  flg - Layout type
1223 
1224    Options Database Key:
1225 .  -pc_gamg_coarse_grid_layout_type
1226 
1227    Level: intermediate
1228 
1229 .seealso: PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetCpuPinCoarseGrids()
1230 @*/
1231 PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1232 {
1233   PetscErrorCode ierr;
1234 
1235   PetscFunctionBegin;
1236   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1237   ierr = PetscTryMethod(pc,"PCGAMGSetCoarseGridLayoutType_C",(PC,PCGAMGLayoutType),(pc,flg));CHKERRQ(ierr);
1238   PetscFunctionReturn(0);
1239 }
1240 
1241 static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1242 {
1243   PC_MG   *mg      = (PC_MG*)pc->data;
1244   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1245 
1246   PetscFunctionBegin;
1247   pc_gamg->layout_type = flg;
1248   PetscFunctionReturn(0);
1249 }
1250 
1251 /*@
1252    PCGAMGSetNlevels -  Sets the maximum number of levels PCGAMG will use
1253 
1254    Not collective on PC
1255 
1256    Input Parameters:
1257 +  pc - the preconditioner
1258 -  n - the maximum number of levels to use
1259 
1260    Options Database Key:
1261 .  -pc_mg_levels
1262 
1263    Level: intermediate
1264 
1265 .seealso: ()
1266 @*/
1267 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1268 {
1269   PetscErrorCode ierr;
1270 
1271   PetscFunctionBegin;
1272   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1273   ierr = PetscTryMethod(pc,"PCGAMGSetNlevels_C",(PC,PetscInt),(pc,n));CHKERRQ(ierr);
1274   PetscFunctionReturn(0);
1275 }
1276 
1277 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1278 {
1279   PC_MG   *mg      = (PC_MG*)pc->data;
1280   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1281 
1282   PetscFunctionBegin;
1283   pc_gamg->Nlevels = n;
1284   PetscFunctionReturn(0);
1285 }
1286 
1287 /*@
1288    PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1289 
1290    Not collective on PC
1291 
1292    Input Parameters:
1293 +  pc - the preconditioner context
1294 .  threshold - array of threshold values for finest n levels; 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1295 -  n - number of threshold values provided in array
1296 
1297    Options Database Key:
1298 .  -pc_gamg_threshold <threshold>
1299 
1300    Notes:
1301     Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1302     Before coarsening or aggregating the graph, GAMG removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1303 
1304     If n is less than the total number of coarsenings (see PCGAMGSetNlevels()), then threshold scaling (see PCGAMGSetThresholdScale()) is used for each successive coarsening.
1305     In this case, PCGAMGSetThresholdScale() must be called before PCGAMGSetThreshold().
1306     If n is greater than the total number of levels, the excess entries in threshold will not be used.
1307 
1308    Level: intermediate
1309 
1310 .seealso: PCGAMGFilterGraph(), PCGAMGSetSquareGraph()
1311 @*/
1312 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1313 {
1314   PetscErrorCode ierr;
1315 
1316   PetscFunctionBegin;
1317   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1318   if (n) PetscValidRealPointer(v,2);
1319   ierr = PetscTryMethod(pc,"PCGAMGSetThreshold_C",(PC,PetscReal[],PetscInt),(pc,v,n));CHKERRQ(ierr);
1320   PetscFunctionReturn(0);
1321 }
1322 
1323 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1324 {
1325   PC_MG   *mg      = (PC_MG*)pc->data;
1326   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1327   PetscInt i;
1328   PetscFunctionBegin;
1329   for (i=0; i<PetscMin(n,PETSC_MG_MAXLEVELS); i++) pc_gamg->threshold[i] = v[i];
1330   for (; i<PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;
1331   PetscFunctionReturn(0);
1332 }
1333 
1334 /*@
1335    PCGAMGSetThresholdScale - Relative threshold reduction at each level
1336 
1337    Not collective on PC
1338 
1339    Input Parameters:
1340 +  pc - the preconditioner context
1341 -  scale - the threshold value reduction, ussually < 1.0
1342 
1343    Options Database Key:
1344 .  -pc_gamg_threshold_scale <v>
1345 
1346    Notes:
1347    The initial threshold (for an arbitrary number of levels starting from the finest) can be set with PCGAMGSetThreshold().
1348    This scaling is used for each subsequent coarsening, but must be called before PCGAMGSetThreshold().
1349 
1350    Level: advanced
1351 
1352 .seealso: PCGAMGSetThreshold()
1353 @*/
1354 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1355 {
1356   PetscErrorCode ierr;
1357 
1358   PetscFunctionBegin;
1359   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1360   ierr = PetscTryMethod(pc,"PCGAMGSetThresholdScale_C",(PC,PetscReal),(pc,v));CHKERRQ(ierr);
1361   PetscFunctionReturn(0);
1362 }
1363 
1364 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1365 {
1366   PC_MG   *mg      = (PC_MG*)pc->data;
1367   PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx;
1368   PetscFunctionBegin;
1369   pc_gamg->threshold_scale = v;
1370   PetscFunctionReturn(0);
1371 }
1372 
1373 /*@C
1374    PCGAMGSetType - Set solution method
1375 
1376    Collective on PC
1377 
1378    Input Parameters:
1379 +  pc - the preconditioner context
1380 -  type - PCGAMGAGG, PCGAMGGEO, or PCGAMGCLASSICAL
1381 
1382    Options Database Key:
1383 .  -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply
1384 
1385    Level: intermediate
1386 
1387 .seealso: PCGAMGGetType(), PCGAMG, PCGAMGType
1388 @*/
1389 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1390 {
1391   PetscErrorCode ierr;
1392 
1393   PetscFunctionBegin;
1394   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1395   ierr = PetscTryMethod(pc,"PCGAMGSetType_C",(PC,PCGAMGType),(pc,type));CHKERRQ(ierr);
1396   PetscFunctionReturn(0);
1397 }
1398 
1399 /*@C
1400    PCGAMGGetType - Get solution method
1401 
1402    Collective on PC
1403 
1404    Input Parameter:
1405 .  pc - the preconditioner context
1406 
1407    Output Parameter:
1408 .  type - the type of algorithm used
1409 
1410    Level: intermediate
1411 
1412 .seealso: PCGAMGSetType(), PCGAMGType
1413 @*/
1414 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1415 {
1416   PetscErrorCode ierr;
1417 
1418   PetscFunctionBegin;
1419   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
1420   ierr = PetscUseMethod(pc,"PCGAMGGetType_C",(PC,PCGAMGType*),(pc,type));CHKERRQ(ierr);
1421   PetscFunctionReturn(0);
1422 }
1423 
1424 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1425 {
1426   PC_MG          *mg      = (PC_MG*)pc->data;
1427   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1428 
1429   PetscFunctionBegin;
1430   *type = pc_gamg->type;
1431   PetscFunctionReturn(0);
1432 }
1433 
1434 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1435 {
1436   PetscErrorCode ierr,(*r)(PC);
1437   PC_MG          *mg      = (PC_MG*)pc->data;
1438   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1439 
1440   PetscFunctionBegin;
1441   pc_gamg->type = type;
1442   ierr = PetscFunctionListFind(GAMGList,type,&r);CHKERRQ(ierr);
1443   if (!r) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_UNKNOWN_TYPE,"Unknown GAMG type %s given",type);
1444   if (pc_gamg->ops->destroy) {
1445     ierr = (*pc_gamg->ops->destroy)(pc);CHKERRQ(ierr);
1446     ierr = PetscMemzero(pc_gamg->ops,sizeof(struct _PCGAMGOps));CHKERRQ(ierr);
1447     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1448     /* cleaning up common data in pc_gamg - this should disapear someday */
1449     pc_gamg->data_cell_cols = 0;
1450     pc_gamg->data_cell_rows = 0;
1451     pc_gamg->orig_data_cell_cols = 0;
1452     pc_gamg->orig_data_cell_rows = 0;
1453     ierr = PetscFree(pc_gamg->data);CHKERRQ(ierr);
1454     pc_gamg->data_sz = 0;
1455   }
1456   ierr = PetscFree(pc_gamg->gamg_type_name);CHKERRQ(ierr);
1457   ierr = PetscStrallocpy(type,&pc_gamg->gamg_type_name);CHKERRQ(ierr);
1458   ierr = (*r)(pc);CHKERRQ(ierr);
1459   PetscFunctionReturn(0);
1460 }
1461 
1462 /* -------------------------------------------------------------------------- */
1463 /*
1464    PCMGGetGridComplexity - compute coarse grid complexity of MG hierarchy
1465 
1466    Input Parameter:
1467 .  pc - the preconditioner context
1468 
1469    Output Parameter:
1470 .  gc - grid complexity = sum_i(nnz_i) / nnz_0
1471 
1472    Level: advanced
1473 */
1474 static PetscErrorCode PCMGGetGridComplexity(PC pc, PetscReal *gc)
1475 {
1476   PetscErrorCode ierr;
1477   PC_MG          *mg      = (PC_MG*)pc->data;
1478   PC_MG_Levels   **mglevels = mg->levels;
1479   PetscInt       lev;
1480   PetscLogDouble nnz0 = 0, sgc = 0;
1481   MatInfo        info;
1482 
1483   PetscFunctionBegin;
1484   if (!pc->setupcalled) {
1485     *gc = 0;
1486     PetscFunctionReturn(0);
1487   }
1488   if (!mg->nlevels) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MG has no levels");
1489   for (lev=0; lev<mg->nlevels; lev++) {
1490     Mat dB;
1491     ierr = KSPGetOperators(mglevels[lev]->smoothd,NULL,&dB);CHKERRQ(ierr);
1492     ierr = MatGetInfo(dB,MAT_GLOBAL_SUM,&info);CHKERRQ(ierr); /* global reduction */
1493     sgc += info.nz_used;
1494     if (lev==mg->nlevels-1) nnz0 = info.nz_used;
1495   }
1496   if (nnz0 > 0) *gc = (PetscReal)(sgc/nnz0);
1497   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number for grid points on finest level is not available");
1498   PetscFunctionReturn(0);
1499 }
1500 
1501 static PetscErrorCode PCView_GAMG(PC pc,PetscViewer viewer)
1502 {
1503   PetscErrorCode ierr,i;
1504   PC_MG          *mg      = (PC_MG*)pc->data;
1505   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1506   PetscReal       gc=0;
1507   PetscFunctionBegin;
1508   ierr = PetscViewerASCIIPrintf(viewer,"    GAMG specific options\n");CHKERRQ(ierr);
1509   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold for dropping small values in graph on each level =");CHKERRQ(ierr);
1510   for (i=0;i<pc_gamg->current_level;i++) {
1511     ierr = PetscViewerASCIIPrintf(viewer," %g",(double)pc_gamg->threshold[i]);CHKERRQ(ierr);
1512   }
1513   ierr = PetscViewerASCIIPrintf(viewer,"\n");CHKERRQ(ierr);
1514   ierr = PetscViewerASCIIPrintf(viewer,"      Threshold scaling factor for each level not specified = %g\n",(double)pc_gamg->threshold_scale);CHKERRQ(ierr);
1515   if (pc_gamg->use_aggs_in_asm) {
1516     ierr = PetscViewerASCIIPrintf(viewer,"      Using aggregates from coarsening process to define subdomains for PCASM\n");CHKERRQ(ierr);
1517   }
1518   if (pc_gamg->use_parallel_coarse_grid_solver) {
1519     ierr = PetscViewerASCIIPrintf(viewer,"      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n");CHKERRQ(ierr);
1520   }
1521 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
1522   if (pc_gamg->cpu_pin_coarse_grids) {
1523     /* ierr = PetscViewerASCIIPrintf(viewer,"      Pinning coarse grids to the CPU)\n");CHKERRQ(ierr); */
1524   }
1525 #endif
1526   /* if (pc_gamg->layout_type==PCGAMG_LAYOUT_COMPACT) { */
1527   /*   ierr = PetscViewerASCIIPrintf(viewer,"      Put reduced grids on processes in natural order (ie, 0,1,2...)\n");CHKERRQ(ierr); */
1528   /* } else { */
1529   /*   ierr = PetscViewerASCIIPrintf(viewer,"      Put reduced grids on whole machine (ie, 0,1*f,2*f...,np-f)\n");CHKERRQ(ierr); */
1530   /* } */
1531   if (pc_gamg->ops->view) {
1532     ierr = (*pc_gamg->ops->view)(pc,viewer);CHKERRQ(ierr);
1533   }
1534   ierr = PCMGGetGridComplexity(pc,&gc);CHKERRQ(ierr);
1535   ierr = PetscViewerASCIIPrintf(viewer,"      Complexity:    grid = %g\n",gc);CHKERRQ(ierr);
1536   PetscFunctionReturn(0);
1537 }
1538 
1539 PetscErrorCode PCSetFromOptions_GAMG(PetscOptionItems *PetscOptionsObject,PC pc)
1540 {
1541   PetscErrorCode ierr;
1542   PC_MG          *mg      = (PC_MG*)pc->data;
1543   PC_GAMG        *pc_gamg = (PC_GAMG*)mg->innerctx;
1544   PetscBool      flag,f2;
1545   MPI_Comm       comm;
1546   char           prefix[256],tname[32];
1547   PetscInt       i,n;
1548   const char     *pcpre;
1549   static const char *LayoutTypes[] = {"compact","spread","PCGAMGLayoutType","PC_GAMG_LAYOUT",NULL};
1550   PetscFunctionBegin;
1551   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1552   ierr = PetscOptionsHead(PetscOptionsObject,"GAMG options");CHKERRQ(ierr);
1553   ierr = PetscOptionsFList("-pc_gamg_type","Type of AMG method","PCGAMGSetType",GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag);CHKERRQ(ierr);
1554   if (flag) {
1555     ierr = PCGAMGSetType(pc,tname);CHKERRQ(ierr);
1556   }
1557   ierr = PetscOptionsFList("-pc_gamg_esteig_ksp_type","Krylov method for eigen estimator","PCGAMGSetEstEigKSPType",KSPList,pc_gamg->esteig_type,tname,sizeof(tname),&flag);CHKERRQ(ierr);
1558   if (flag) {
1559     ierr = PCGAMGSetEstEigKSPType(pc,tname);CHKERRQ(ierr);
1560   }
1561   ierr = PetscOptionsBool("-pc_gamg_repartition","Repartion coarse grids","PCGAMGSetRepartition",pc_gamg->repart,&pc_gamg->repart,NULL);CHKERRQ(ierr);
1562   f2 = PETSC_TRUE;
1563   ierr = PetscOptionsBool("-pc_gamg_use_sa_esteig","Use eigen estimate from Smoothed aggregation for smoother","PCGAMGSetUseSAEstEig",f2,&f2,&flag);CHKERRQ(ierr);
1564   if (flag) pc_gamg->use_sa_esteig = f2 ? 1 : 0;
1565   ierr = PetscOptionsBool("-pc_gamg_reuse_interpolation","Reuse prolongation operator","PCGAMGReuseInterpolation",pc_gamg->reuse_prol,&pc_gamg->reuse_prol,NULL);CHKERRQ(ierr);
1566   ierr = PetscOptionsBool("-pc_gamg_asm_use_agg","Use aggregation aggregates for ASM smoother","PCGAMGASMSetUseAggs",pc_gamg->use_aggs_in_asm,&pc_gamg->use_aggs_in_asm,NULL);CHKERRQ(ierr);
1567   ierr = PetscOptionsBool("-pc_gamg_use_parallel_coarse_grid_solver","Use parallel coarse grid solver (otherwise put last grid on one process)","PCGAMGSetUseParallelCoarseGridSolve",pc_gamg->use_parallel_coarse_grid_solver,&pc_gamg->use_parallel_coarse_grid_solver,NULL);CHKERRQ(ierr);
1568   ierr = PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids","Pin coarse grids to the CPU","PCGAMGSetCpuPinCoarseGrids",pc_gamg->cpu_pin_coarse_grids,&pc_gamg->cpu_pin_coarse_grids,NULL);CHKERRQ(ierr);
1569   ierr = PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type","compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth","PCGAMGSetCoarseGridLayoutType",LayoutTypes,(PetscEnum)pc_gamg->layout_type,(PetscEnum*)&pc_gamg->layout_type,NULL);CHKERRQ(ierr);
1570   ierr = PetscOptionsInt("-pc_gamg_process_eq_limit","Limit (goal) on number of equations per process on coarse grids","PCGAMGSetProcEqLim",pc_gamg->min_eq_proc,&pc_gamg->min_eq_proc,NULL);CHKERRQ(ierr);
1571   ierr = PetscOptionsInt("-pc_gamg_esteig_ksp_max_it","Number of iterations of eigen estimator","PCGAMGSetEstEigKSPMaxIt",pc_gamg->esteig_max_it,&pc_gamg->esteig_max_it,NULL);CHKERRQ(ierr);
1572   ierr = PetscOptionsInt("-pc_gamg_coarse_eq_limit","Limit on number of equations for the coarse grid","PCGAMGSetCoarseEqLim",pc_gamg->coarse_eq_limit,&pc_gamg->coarse_eq_limit,NULL);CHKERRQ(ierr);
1573   ierr = PetscOptionsReal("-pc_gamg_threshold_scale","Scaling of threshold for each level not specified","PCGAMGSetThresholdScale",pc_gamg->threshold_scale,&pc_gamg->threshold_scale,NULL);CHKERRQ(ierr);
1574   n = PETSC_MG_MAXLEVELS;
1575   ierr = PetscOptionsRealArray("-pc_gamg_threshold","Relative threshold to use for dropping edges in aggregation graph","PCGAMGSetThreshold",pc_gamg->threshold,&n,&flag);CHKERRQ(ierr);
1576   if (!flag || n < PETSC_MG_MAXLEVELS) {
1577     if (!flag) n = 1;
1578     i = n;
1579     do {pc_gamg->threshold[i] = pc_gamg->threshold[i-1]*pc_gamg->threshold_scale;} while (++i<PETSC_MG_MAXLEVELS);
1580   }
1581   ierr = PetscOptionsInt("-pc_mg_levels","Set number of MG levels","PCGAMGSetNlevels",pc_gamg->Nlevels,&pc_gamg->Nlevels,NULL);CHKERRQ(ierr);
1582   {
1583     PetscReal eminmax[2] = {0., 0.};
1584     n = 2;
1585     ierr = PetscOptionsRealArray("-pc_gamg_eigenvalues","extreme eigenvalues for smoothed aggregation","PCGAMGSetEigenvalues",eminmax,&n,&flag);CHKERRQ(ierr);
1586     if (flag) {
1587       if (n != 2) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_INCOMP,"-pc_gamg_eigenvalues: must specify 2 parameters, min and max eigenvalues");
1588       ierr = PCGAMGSetEigenvalues(pc, eminmax[1], eminmax[0]);CHKERRQ(ierr);
1589     }
1590   }
1591   /* set options for subtype */
1592   if (pc_gamg->ops->setfromoptions) {ierr = (*pc_gamg->ops->setfromoptions)(PetscOptionsObject,pc);CHKERRQ(ierr);}
1593 
1594   ierr = PCGetOptionsPrefix(pc, &pcpre);CHKERRQ(ierr);
1595   ierr = PetscSNPrintf(prefix,sizeof(prefix),"%spc_gamg_",pcpre ? pcpre : "");CHKERRQ(ierr);
1596   ierr = PetscOptionsTail();CHKERRQ(ierr);
1597   PetscFunctionReturn(0);
1598 }
1599 
1600 /* -------------------------------------------------------------------------- */
1601 /*MC
1602      PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1603 
1604    Options Database Keys:
1605 +   -pc_gamg_type <type> - one of agg, geo, or classical
1606 .   -pc_gamg_repartition  <true,default=false> - repartition the degrees of freedom accross the coarse grids as they are determined
1607 .   -pc_gamg_reuse_interpolation <true,default=false> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations
1608 .   -pc_gamg_asm_use_agg <true,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1609 .   -pc_gamg_process_eq_limit <limit, default=50> - GAMG will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit>
1610                                         equations on each process that has degrees of freedom
1611 .   -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1612 .   -pc_gamg_threshold[] <thresh,default=0> - Before aggregating the graph GAMG will remove small values from the graph on each level
1613 -   -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1614 
1615    Options Database Keys for default Aggregation:
1616 +  -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1617 .  -pc_gamg_sym_graph <true,default=false> - symmetrize the graph before computing the aggregation
1618 -  -pc_gamg_square_graph <n,default=1> - number of levels to square the graph before aggregating it
1619 
1620    Multigrid options:
1621 +  -pc_mg_cycles <v> - v or w, see PCMGSetCycleType()
1622 .  -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1623 .  -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1624 -  -pc_mg_levels <levels> - Number of levels of multigrid to use.
1625 
1626 
1627   Notes:
1628     In order to obtain good performance for PCGAMG for vector valued problems you must
1629        Call MatSetBlockSize() to indicate the number of degrees of freedom per grid point
1630        Call MatSetNearNullSpace() (or PCSetCoordinates() if solving the equations of elasticity) to indicate the near null space of the operator
1631        See the Users Manual Chapter 4 for more details
1632 
1633   Level: intermediate
1634 
1635 .seealso:  PCCreate(), PCSetType(), MatSetBlockSize(), PCMGType, PCSetCoordinates(), MatSetNearNullSpace(), PCGAMGSetType(), PCGAMGAGG, PCGAMGGEO, PCGAMGCLASSICAL, PCGAMGSetProcEqLim(),
1636            PCGAMGSetCoarseEqLim(), PCGAMGSetRepartition(), PCGAMGRegister(), PCGAMGSetReuseInterpolation(), PCGAMGASMSetUseAggs(), PCGAMGSetUseParallelCoarseGridSolve(), PCGAMGSetNlevels(), PCGAMGSetThreshold(), PCGAMGGetType(), PCGAMGSetReuseInterpolation(), PCGAMGSetUseSAEstEig(), PCGAMGSetEstEigKSPMaxIt(), PCGAMGSetEstEigKSPType()
1637 M*/
1638 
1639 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1640 {
1641   PetscErrorCode ierr,i;
1642   PC_GAMG        *pc_gamg;
1643   PC_MG          *mg;
1644 
1645   PetscFunctionBegin;
1646    /* register AMG type */
1647   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1648 
1649   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1650   ierr = PCSetType(pc, PCMG);CHKERRQ(ierr);
1651   ierr = PetscObjectChangeTypeName((PetscObject)pc, PCGAMG);CHKERRQ(ierr);
1652 
1653   /* create a supporting struct and attach it to pc */
1654   ierr         = PetscNewLog(pc,&pc_gamg);CHKERRQ(ierr);
1655   ierr         = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1656   mg           = (PC_MG*)pc->data;
1657   mg->innerctx = pc_gamg;
1658 
1659   ierr = PetscNewLog(pc,&pc_gamg->ops);CHKERRQ(ierr);
1660 
1661   pc_gamg->setup_count = 0;
1662   /* these should be in subctx but repartitioning needs simple arrays */
1663   pc_gamg->data_sz = 0;
1664   pc_gamg->data    = NULL;
1665 
1666   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1667   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1668   pc->ops->setup          = PCSetUp_GAMG;
1669   pc->ops->reset          = PCReset_GAMG;
1670   pc->ops->destroy        = PCDestroy_GAMG;
1671   mg->view                = PCView_GAMG;
1672 
1673   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCMGGetLevels_C",PCMGGetLevels_MG);CHKERRQ(ierr);
1674   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCMGSetLevels_C",PCMGSetLevels_MG);CHKERRQ(ierr);
1675   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetProcEqLim_C",PCGAMGSetProcEqLim_GAMG);CHKERRQ(ierr);
1676   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseEqLim_C",PCGAMGSetCoarseEqLim_GAMG);CHKERRQ(ierr);
1677   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetRepartition_C",PCGAMGSetRepartition_GAMG);CHKERRQ(ierr);
1678   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEstEigKSPType_C",PCGAMGSetEstEigKSPType_GAMG);CHKERRQ(ierr);
1679   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEstEigKSPMaxIt_C",PCGAMGSetEstEigKSPMaxIt_GAMG);CHKERRQ(ierr);
1680   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetEigenvalues_C",PCGAMGSetEigenvalues_GAMG);CHKERRQ(ierr);
1681   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseSAEstEig_C",PCGAMGSetUseSAEstEig_GAMG);CHKERRQ(ierr);
1682   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetReuseInterpolation_C",PCGAMGSetReuseInterpolation_GAMG);CHKERRQ(ierr);
1683   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGASMSetUseAggs_C",PCGAMGASMSetUseAggs_GAMG);CHKERRQ(ierr);
1684   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetUseParallelCoarseGridSolve_C",PCGAMGSetUseParallelCoarseGridSolve_GAMG);CHKERRQ(ierr);
1685   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCpuPinCoarseGrids_C",PCGAMGSetCpuPinCoarseGrids_GAMG);CHKERRQ(ierr);
1686   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetCoarseGridLayoutType_C",PCGAMGSetCoarseGridLayoutType_GAMG);CHKERRQ(ierr);
1687   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThreshold_C",PCGAMGSetThreshold_GAMG);CHKERRQ(ierr);
1688   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetThresholdScale_C",PCGAMGSetThresholdScale_GAMG);CHKERRQ(ierr);
1689   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetType_C",PCGAMGSetType_GAMG);CHKERRQ(ierr);
1690   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGGetType_C",PCGAMGGetType_GAMG);CHKERRQ(ierr);
1691   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCGAMGSetNlevels_C",PCGAMGSetNlevels_GAMG);CHKERRQ(ierr);
1692   pc_gamg->repart           = PETSC_FALSE;
1693   pc_gamg->reuse_prol       = PETSC_FALSE;
1694   pc_gamg->use_aggs_in_asm  = PETSC_FALSE;
1695   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1696   pc_gamg->cpu_pin_coarse_grids = PETSC_FALSE;
1697   pc_gamg->layout_type      = PCGAMG_LAYOUT_SPREAD;
1698   pc_gamg->min_eq_proc      = 50;
1699   pc_gamg->coarse_eq_limit  = 50;
1700   for (i=0;i<PETSC_MG_MAXLEVELS;i++) pc_gamg->threshold[i] = 0.;
1701   pc_gamg->threshold_scale = 1.;
1702   pc_gamg->Nlevels          = PETSC_MG_MAXLEVELS;
1703   pc_gamg->current_level    = 0; /* don't need to init really */
1704   ierr = PetscStrcpy(pc_gamg->esteig_type,NULL);CHKERRQ(ierr);
1705   pc_gamg->esteig_max_it    = 10;
1706   pc_gamg->use_sa_esteig    = -1;
1707   pc_gamg->emin             = 0;
1708   pc_gamg->emax             = 0;
1709 
1710   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1711 
1712   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1713   ierr = PCGAMGSetType(pc,PCGAMGAGG);CHKERRQ(ierr);
1714   PetscFunctionReturn(0);
1715 }
1716 
1717 /*@C
1718  PCGAMGInitializePackage - This function initializes everything in the PCGAMG package. It is called
1719     from PCInitializePackage().
1720 
1721  Level: developer
1722 
1723  .seealso: PetscInitialize()
1724 @*/
1725 PetscErrorCode PCGAMGInitializePackage(void)
1726 {
1727   PetscErrorCode ierr;
1728 
1729   PetscFunctionBegin;
1730   if (PCGAMGPackageInitialized) PetscFunctionReturn(0);
1731   PCGAMGPackageInitialized = PETSC_TRUE;
1732   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGGEO,PCCreateGAMG_GEO);CHKERRQ(ierr);
1733   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGAGG,PCCreateGAMG_AGG);CHKERRQ(ierr);
1734   ierr = PetscFunctionListAdd(&GAMGList,PCGAMGCLASSICAL,PCCreateGAMG_Classical);CHKERRQ(ierr);
1735   ierr = PetscRegisterFinalize(PCGAMGFinalizePackage);CHKERRQ(ierr);
1736 
1737   /* general events */
1738   ierr = PetscLogEventRegister("PCGAMGGraph_AGG", 0, &PC_GAMGGraph_AGG);CHKERRQ(ierr);
1739   ierr = PetscLogEventRegister("PCGAMGGraph_GEO", PC_CLASSID, &PC_GAMGGraph_GEO);CHKERRQ(ierr);
1740   ierr = PetscLogEventRegister("PCGAMGCoarse_AGG", PC_CLASSID, &PC_GAMGCoarsen_AGG);CHKERRQ(ierr);
1741   ierr = PetscLogEventRegister("PCGAMGCoarse_GEO", PC_CLASSID, &PC_GAMGCoarsen_GEO);CHKERRQ(ierr);
1742   ierr = PetscLogEventRegister("PCGAMGProl_AGG", PC_CLASSID, &PC_GAMGProlongator_AGG);CHKERRQ(ierr);
1743   ierr = PetscLogEventRegister("PCGAMGProl_GEO", PC_CLASSID, &PC_GAMGProlongator_GEO);CHKERRQ(ierr);
1744   ierr = PetscLogEventRegister("PCGAMGPOpt_AGG", PC_CLASSID, &PC_GAMGOptProlongator_AGG);CHKERRQ(ierr);
1745 
1746 #if defined PETSC_GAMG_USE_LOG
1747   ierr = PetscLogEventRegister("GAMG: createProl", PC_CLASSID, &petsc_gamg_setup_events[SET1]);CHKERRQ(ierr);
1748   ierr = PetscLogEventRegister("  Graph", PC_CLASSID, &petsc_gamg_setup_events[GRAPH]);CHKERRQ(ierr);
1749   /* PetscLogEventRegister("    G.Mat", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_MAT]); */
1750   /* PetscLogEventRegister("    G.Filter", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_FILTER]); */
1751   /* PetscLogEventRegister("    G.Square", PC_CLASSID, &petsc_gamg_setup_events[GRAPH_SQR]); */
1752   ierr = PetscLogEventRegister("  MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[SET4]);CHKERRQ(ierr);
1753   ierr = PetscLogEventRegister("  geo: growSupp", PC_CLASSID, &petsc_gamg_setup_events[SET5]);CHKERRQ(ierr);
1754   ierr = PetscLogEventRegister("  geo: triangle", PC_CLASSID, &petsc_gamg_setup_events[SET6]);CHKERRQ(ierr);
1755   ierr = PetscLogEventRegister("    search-set", PC_CLASSID, &petsc_gamg_setup_events[FIND_V]);CHKERRQ(ierr);
1756   ierr = PetscLogEventRegister("  SA: col data", PC_CLASSID, &petsc_gamg_setup_events[SET7]);CHKERRQ(ierr);
1757   ierr = PetscLogEventRegister("  SA: frmProl0", PC_CLASSID, &petsc_gamg_setup_events[SET8]);CHKERRQ(ierr);
1758   ierr = PetscLogEventRegister("  SA: smooth", PC_CLASSID, &petsc_gamg_setup_events[SET9]);CHKERRQ(ierr);
1759   ierr = PetscLogEventRegister("GAMG: partLevel", PC_CLASSID, &petsc_gamg_setup_events[SET2]);CHKERRQ(ierr);
1760   ierr = PetscLogEventRegister("  repartition", PC_CLASSID, &petsc_gamg_setup_events[SET12]);CHKERRQ(ierr);
1761   ierr = PetscLogEventRegister("  Invert-Sort", PC_CLASSID, &petsc_gamg_setup_events[SET13]);CHKERRQ(ierr);
1762   ierr = PetscLogEventRegister("  Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14]);CHKERRQ(ierr);
1763   ierr = PetscLogEventRegister("  Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15]);CHKERRQ(ierr);
1764 
1765   /* PetscLogEventRegister(" PL move data", PC_CLASSID, &petsc_gamg_setup_events[SET13]); */
1766   /* PetscLogEventRegister("GAMG: fix", PC_CLASSID, &petsc_gamg_setup_events[SET10]); */
1767   /* PetscLogEventRegister("GAMG: set levels", PC_CLASSID, &petsc_gamg_setup_events[SET11]); */
1768   /* create timer stages */
1769 #if defined GAMG_STAGES
1770   {
1771     char     str[32];
1772     PetscInt lidx;
1773     sprintf(str,"MG Level %d (finest)",0);
1774     ierr = PetscLogStageRegister(str, &gamg_stages[0]);CHKERRQ(ierr);
1775     for (lidx=1; lidx<9; lidx++) {
1776       sprintf(str,"MG Level %d",lidx);
1777       ierr = PetscLogStageRegister(str, &gamg_stages[lidx]);CHKERRQ(ierr);
1778     }
1779   }
1780 #endif
1781 #endif
1782   PetscFunctionReturn(0);
1783 }
1784 
1785 /*@C
1786  PCGAMGFinalizePackage - This function frees everything from the PCGAMG package. It is
1787     called from PetscFinalize() automatically.
1788 
1789  Level: developer
1790 
1791  .seealso: PetscFinalize()
1792 @*/
1793 PetscErrorCode PCGAMGFinalizePackage(void)
1794 {
1795   PetscErrorCode ierr;
1796 
1797   PetscFunctionBegin;
1798   PCGAMGPackageInitialized = PETSC_FALSE;
1799   ierr = PetscFunctionListDestroy(&GAMGList);CHKERRQ(ierr);
1800   PetscFunctionReturn(0);
1801 }
1802 
1803 /*@C
1804  PCGAMGRegister - Register a PCGAMG implementation.
1805 
1806  Input Parameters:
1807  + type - string that will be used as the name of the GAMG type.
1808  - create - function for creating the gamg context.
1809 
1810   Level: advanced
1811 
1812  .seealso: PCGAMGType, PCGAMG, PCGAMGSetType()
1813 @*/
1814 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1815 {
1816   PetscErrorCode ierr;
1817 
1818   PetscFunctionBegin;
1819   ierr = PCGAMGInitializePackage();CHKERRQ(ierr);
1820   ierr = PetscFunctionListAdd(&GAMGList,type,create);CHKERRQ(ierr);
1821   PetscFunctionReturn(0);
1822 }
1823 
1824