xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision c2a2c263f213def669b07e024d853b35c76389cb)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <../src/ksp/pc/impls/gamg/gamg.h>            /*I "petscpc.h" I*/
5 #include <../src/ksp/ksp/impls/cheby/chebyshevimpl.h> /*I "petscksp.h" I*/
6 
7 #if defined(PETSC_HAVE_CUDA)
8   #include <cuda_runtime.h>
9 #endif
10 
11 #if defined(PETSC_HAVE_HIP)
12   #include <hip/hip_runtime.h>
13 #endif
14 
15 PetscLogEvent petsc_gamg_setup_events[GAMG_NUM_SET];
16 PetscLogEvent petsc_gamg_setup_matmat_events[PETSC_MG_MAXLEVELS][3];
17 
18 // #define GAMG_STAGES
19 #if defined(GAMG_STAGES)
20 static PetscLogStage gamg_stages[PETSC_MG_MAXLEVELS];
21 #endif
22 
23 static PetscFunctionList GAMGList = NULL;
24 static PetscBool         PCGAMGPackageInitialized;
25 
26 static PetscErrorCode PCReset_GAMG(PC pc)
27 {
28   PC_MG   *mg      = (PC_MG *)pc->data;
29   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
30 
31   PetscFunctionBegin;
32   PetscCall(PetscFree(pc_gamg->data));
33   pc_gamg->data_sz = 0;
34   PetscCall(PetscFree(pc_gamg->orig_data));
35   for (PetscInt level = 0; level < PETSC_MG_MAXLEVELS; level++) {
36     mg->min_eigen_DinvA[level] = 0;
37     mg->max_eigen_DinvA[level] = 0;
38   }
39   pc_gamg->emin = 0;
40   pc_gamg->emax = 0;
41   PetscCall(PCReset_MG(pc));
42   PetscFunctionReturn(PETSC_SUCCESS);
43 }
44 
45 /*
46    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
47      of active processors.
48 
49    Input Parameter:
50    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
51           'pc_gamg->data_sz' are changed via repartitioning/reduction.
52    . Amat_fine - matrix on this fine (k) level
53    . cr_bs - coarse block size
54    In/Output Parameter:
55    . a_P_inout - prolongation operator to the next level (k-->k-1)
56    . a_nactive_proc - number of active procs
57    Output Parameter:
58    . a_Amat_crs - coarse matrix that is created (k-1)
59 */
60 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc, Mat Amat_fine, PetscInt cr_bs, Mat *a_P_inout, Mat *a_Amat_crs, PetscMPIInt *a_nactive_proc, IS *Pcolumnperm, PetscBool is_last)
61 {
62   PC_MG      *mg      = (PC_MG *)pc->data;
63   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
64   Mat         Cmat, Pold = *a_P_inout;
65   MPI_Comm    comm;
66   PetscMPIInt rank, size, new_size, nactive = *a_nactive_proc;
67   PetscInt    ncrs_eq, ncrs, f_bs;
68 
69   PetscFunctionBegin;
70   PetscCall(PetscObjectGetComm((PetscObject)Amat_fine, &comm));
71   PetscCallMPI(MPI_Comm_rank(comm, &rank));
72   PetscCallMPI(MPI_Comm_size(comm, &size));
73   PetscCall(MatGetBlockSize(Amat_fine, &f_bs));
74   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
75   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
76   PetscCall(MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat));
77   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
78   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
79 
80   if (Pcolumnperm) *Pcolumnperm = NULL;
81 
82   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
83   PetscCall(MatGetLocalSize(Cmat, &ncrs_eq, NULL));
84   if (pc_gamg->data_cell_rows > 0) {
85     ncrs = pc_gamg->data_sz / pc_gamg->data_cell_cols / pc_gamg->data_cell_rows;
86   } else {
87     PetscInt bs;
88     PetscCall(MatGetBlockSize(Cmat, &bs));
89     ncrs = ncrs_eq / bs;
90   }
91   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
92   if (pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0 && PetscDefined(HAVE_CUDA) && pc_gamg->current_level == 0) { /* 0 turns reducing to 1 process/device on; do for HIP, etc. */
93 #if defined(PETSC_HAVE_CUDA)
94     PetscShmComm pshmcomm;
95     PetscMPIInt  locrank;
96     MPI_Comm     loccomm;
97     PetscInt     s_nnodes, r_nnodes, new_new_size;
98     cudaError_t  cerr;
99     int          devCount;
100     PetscCall(PetscShmCommGet(comm, &pshmcomm));
101     PetscCall(PetscShmCommGetMpiShmComm(pshmcomm, &loccomm));
102     PetscCallMPI(MPI_Comm_rank(loccomm, &locrank));
103     s_nnodes = !locrank;
104     PetscCall(MPIU_Allreduce(&s_nnodes, &r_nnodes, 1, MPIU_INT, MPI_SUM, comm));
105     PetscCheck((size % r_nnodes) == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of nodes np=%d nnodes%" PetscInt_FMT, size, r_nnodes);
106     devCount = 0;
107     cerr     = cudaGetDeviceCount(&devCount);
108     cudaGetLastError();                         /* Reset the last error */
109     if (cerr == cudaSuccess && devCount >= 1) { /* There are devices, else go to heuristic */
110       new_new_size = r_nnodes * devCount;
111       new_size     = new_new_size;
112       PetscCall(PetscInfo(pc, "%s: Fine grid with Cuda. %" PetscInt_FMT " nodes. Change new active set size %d --> %d (devCount=%d #nodes=%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, r_nnodes, nactive, new_size, devCount, r_nnodes));
113     } else {
114       PetscCall(PetscInfo(pc, "%s: With Cuda but no device. Use heuristics.\n", ((PetscObject)pc)->prefix));
115       goto HEURISTIC;
116     }
117 #else
118     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "should not be here");
119 #endif
120   } else if (pc_gamg->level_reduction_factors[pc_gamg->current_level] > 0) {
121     if (nactive < pc_gamg->level_reduction_factors[pc_gamg->current_level]) {
122       new_size = 1;
123       PetscCall(PetscInfo(pc, "%s: reduction factor too small for %d active processes: reduce to one process\n", ((PetscObject)pc)->prefix, new_size));
124     } else {
125       PetscCheck(nactive % pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of active process %d wrt reduction factor %" PetscInt_FMT, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]);
126       new_size = nactive / pc_gamg->level_reduction_factors[pc_gamg->current_level];
127       PetscCall(PetscInfo(pc, "%s: Manually setting reduction to %d active processes (%d/%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, new_size, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]));
128     }
129   } else if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) {
130     new_size = 1;
131     PetscCall(PetscInfo(pc, "%s: Force coarsest grid reduction to %d active processes\n", ((PetscObject)pc)->prefix, new_size));
132   } else {
133     PetscInt ncrs_eq_glob;
134 #if defined(PETSC_HAVE_CUDA)
135   HEURISTIC:
136 #endif
137     PetscCall(MatGetSize(Cmat, &ncrs_eq_glob, NULL));
138     new_size = (PetscMPIInt)((float)ncrs_eq_glob / (float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
139     if (!new_size) new_size = 1;                                                       /* not likely, possible? */
140     else if (new_size >= nactive) new_size = nactive;                                  /* no change, rare */
141     PetscCall(PetscInfo(pc, "%s: Coarse grid reduction from %d to %d active processes\n", ((PetscObject)pc)->prefix, nactive, new_size));
142   }
143   if (new_size == nactive) {
144     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
145     if (new_size < size) {
146       /* odd case where multiple coarse grids are on one processor or no coarsening ... */
147       PetscCall(PetscInfo(pc, "%s: reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n", ((PetscObject)pc)->prefix, nactive));
148       if (pc_gamg->cpu_pin_coarse_grids) {
149         PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
150         PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
151       }
152     }
153     /* we know that the grid structure can be reused in MatPtAP */
154   } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
155     PetscInt *counts, *newproc_idx, ii, jj, kk, strideNew, *tidx, ncrs_new, ncrs_eq_new, nloc_old, expand_factor = 1, rfactor = 1;
156     IS        is_eq_newproc, is_eq_num, is_eq_num_prim, new_eq_indices;
157     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
158     nloc_old = ncrs_eq / cr_bs;
159     PetscCheck(ncrs_eq % cr_bs == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ncrs_eq %" PetscInt_FMT " not divisible by cr_bs %" PetscInt_FMT, ncrs_eq, cr_bs);
160     /* get new_size and rfactor */
161     if (pc_gamg->layout_type == PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
162       /* find factor */
163       if (new_size == 1) rfactor = size; /* don't modify */
164       else {
165         PetscReal best_fact = 0.;
166         jj                  = -1;
167         for (kk = 1; kk <= size; kk++) {
168           if (!(size % kk)) { /* a candidate */
169             PetscReal nactpe = (PetscReal)size / (PetscReal)kk, fact = nactpe / (PetscReal)new_size;
170             if (fact > 1.0) fact = 1. / fact; /* keep fact < 1 */
171             if (fact > best_fact) {
172               best_fact = fact;
173               jj        = kk;
174             }
175           }
176         }
177         if (jj != -1) rfactor = jj;
178         else rfactor = 1; /* a prime */
179         if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
180         else expand_factor = rfactor;
181       }
182       new_size = size / rfactor; /* make new size one that is factor */
183       if (new_size == nactive) { /* no repartitioning or reduction, bail out because nested here (rare) */
184         *a_Amat_crs = Cmat;
185         PetscCall(PetscInfo(pc, "%s: Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, new_size, ncrs_eq));
186         PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
187         PetscFunctionReturn(PETSC_SUCCESS);
188       }
189     }
190     /* make 'is_eq_newproc' */
191     PetscCall(PetscMalloc1(size, &counts));
192     if (pc_gamg->repart) { /* Repartition Cmat_{k} and move columns of P^{k}_{k-1} and coordinates of primal part accordingly */
193       Mat adj;
194       PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
195       PetscCall(PetscInfo(pc, "%s: Repartition: size (active): %d --> %d, %" PetscInt_FMT " local equations, using %s process layout\n", ((PetscObject)pc)->prefix, *a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread"));
196       /* get 'adj' */
197       if (cr_bs == 1) {
198         PetscCall(MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
199       } else {
200         /* make a scalar matrix to partition (no Stokes here) */
201         Mat                tMat;
202         PetscInt           Istart_crs, Iend_crs, ncols, jj, Ii;
203         const PetscScalar *vals;
204         const PetscInt    *idx;
205         PetscInt          *d_nnz, *o_nnz, M, N;
206         static PetscInt    llev = 0; /* ugly but just used for debugging */
207         MatType            mtype;
208 
209         PetscCall(PetscMalloc2(ncrs, &d_nnz, ncrs, &o_nnz));
210         PetscCall(MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs));
211         PetscCall(MatGetSize(Cmat, &M, &N));
212         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
213           PetscCall(MatGetRow(Cmat, Ii, &ncols, NULL, NULL));
214           d_nnz[jj] = ncols / cr_bs;
215           o_nnz[jj] = ncols / cr_bs;
216           PetscCall(MatRestoreRow(Cmat, Ii, &ncols, NULL, NULL));
217           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
218           if (o_nnz[jj] > (M / cr_bs - ncrs)) o_nnz[jj] = M / cr_bs - ncrs;
219         }
220 
221         PetscCall(MatGetType(Amat_fine, &mtype));
222         PetscCall(MatCreate(comm, &tMat));
223         PetscCall(MatSetSizes(tMat, ncrs, ncrs, PETSC_DETERMINE, PETSC_DETERMINE));
224         PetscCall(MatSetType(tMat, mtype));
225         PetscCall(MatSeqAIJSetPreallocation(tMat, 0, d_nnz));
226         PetscCall(MatMPIAIJSetPreallocation(tMat, 0, d_nnz, 0, o_nnz));
227         PetscCall(PetscFree2(d_nnz, o_nnz));
228 
229         for (ii = Istart_crs; ii < Iend_crs; ii++) {
230           PetscInt dest_row = ii / cr_bs;
231           PetscCall(MatGetRow(Cmat, ii, &ncols, &idx, &vals));
232           for (jj = 0; jj < ncols; jj++) {
233             PetscInt    dest_col = idx[jj] / cr_bs;
234             PetscScalar v        = 1.0;
235             PetscCall(MatSetValues(tMat, 1, &dest_row, 1, &dest_col, &v, ADD_VALUES));
236           }
237           PetscCall(MatRestoreRow(Cmat, ii, &ncols, &idx, &vals));
238         }
239         PetscCall(MatAssemblyBegin(tMat, MAT_FINAL_ASSEMBLY));
240         PetscCall(MatAssemblyEnd(tMat, MAT_FINAL_ASSEMBLY));
241 
242         if (llev++ == -1) {
243           PetscViewer viewer;
244           char        fname[32];
245           PetscCall(PetscSNPrintf(fname, sizeof(fname), "part_mat_%" PetscInt_FMT ".mat", llev));
246           PetscCall(PetscViewerBinaryOpen(comm, fname, FILE_MODE_WRITE, &viewer));
247           PetscCall(MatView(tMat, viewer));
248           PetscCall(PetscViewerDestroy(&viewer));
249         }
250         PetscCall(MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
251         PetscCall(MatDestroy(&tMat));
252       } /* create 'adj' */
253 
254       { /* partition: get newproc_idx */
255         char            prefix[256];
256         const char     *pcpre;
257         const PetscInt *is_idx;
258         MatPartitioning mpart;
259         IS              proc_is;
260 
261         PetscCall(MatPartitioningCreate(comm, &mpart));
262         PetscCall(MatPartitioningSetAdjacency(mpart, adj));
263         PetscCall(PCGetOptionsPrefix(pc, &pcpre));
264         PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
265         PetscCall(PetscObjectSetOptionsPrefix((PetscObject)mpart, prefix));
266         PetscCall(MatPartitioningSetFromOptions(mpart));
267         PetscCall(MatPartitioningSetNParts(mpart, new_size));
268         PetscCall(MatPartitioningApply(mpart, &proc_is));
269         PetscCall(MatPartitioningDestroy(&mpart));
270 
271         /* collect IS info */
272         PetscCall(PetscMalloc1(ncrs_eq, &newproc_idx));
273         PetscCall(ISGetIndices(proc_is, &is_idx));
274         for (kk = jj = 0; kk < nloc_old; kk++) {
275           for (ii = 0; ii < cr_bs; ii++, jj++) { newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */ }
276         }
277         PetscCall(ISRestoreIndices(proc_is, &is_idx));
278         PetscCall(ISDestroy(&proc_is));
279       }
280       PetscCall(MatDestroy(&adj));
281 
282       PetscCall(ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc));
283       PetscCall(PetscFree(newproc_idx));
284       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
285     } else { /* simple aggregation of parts -- 'is_eq_newproc' */
286       PetscInt targetPE;
287       PetscCheck(new_size != nactive, PETSC_COMM_SELF, PETSC_ERR_PLIB, "new_size==nactive. Should not happen");
288       PetscCall(PetscInfo(pc, "%s: Number of equations (loc) %" PetscInt_FMT " with simple aggregation\n", ((PetscObject)pc)->prefix, ncrs_eq));
289       targetPE = (rank / rfactor) * expand_factor;
290       PetscCall(ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc));
291     } /* end simple 'is_eq_newproc' */
292 
293     /*
294       Create an index set from the is_eq_newproc index set to indicate the mapping TO
295     */
296     PetscCall(ISPartitioningToNumbering(is_eq_newproc, &is_eq_num));
297     is_eq_num_prim = is_eq_num;
298     /*
299       Determine how many equations/vertices are assigned to each processor
300     */
301     PetscCall(ISPartitioningCount(is_eq_newproc, size, counts));
302     ncrs_eq_new = counts[rank];
303     PetscCall(ISDestroy(&is_eq_newproc));
304     ncrs_new = ncrs_eq_new / cr_bs;
305 
306     PetscCall(PetscFree(counts));
307     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxiliary data into some complex abstracted thing */
308     {
309       Vec             src_crd, dest_crd;
310       const PetscInt *idx, ndata_rows = pc_gamg->data_cell_rows, ndata_cols = pc_gamg->data_cell_cols, node_data_sz = ndata_rows * ndata_cols;
311       VecScatter      vecscat;
312       PetscScalar    *array;
313       IS              isscat;
314       /* move data (for primal equations only) */
315       /* Create a vector to contain the newly ordered element information */
316       PetscCall(VecCreate(comm, &dest_crd));
317       PetscCall(VecSetSizes(dest_crd, node_data_sz * ncrs_new, PETSC_DECIDE));
318       PetscCall(VecSetType(dest_crd, VECSTANDARD)); /* this is needed! */
319       /*
320         There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
321         a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
322       */
323       PetscCall(PetscMalloc1(ncrs * node_data_sz, &tidx));
324       PetscCall(ISGetIndices(is_eq_num_prim, &idx));
325       for (ii = 0, jj = 0; ii < ncrs; ii++) {
326         PetscInt id = idx[ii * cr_bs] / cr_bs; /* get node back */
327         for (kk = 0; kk < node_data_sz; kk++, jj++) tidx[jj] = id * node_data_sz + kk;
328       }
329       PetscCall(ISRestoreIndices(is_eq_num_prim, &idx));
330       PetscCall(ISCreateGeneral(comm, node_data_sz * ncrs, tidx, PETSC_COPY_VALUES, &isscat));
331       PetscCall(PetscFree(tidx));
332       /*
333         Create a vector to contain the original vertex information for each element
334       */
335       PetscCall(VecCreateSeq(PETSC_COMM_SELF, node_data_sz * ncrs, &src_crd));
336       for (jj = 0; jj < ndata_cols; jj++) {
337         const PetscInt stride0 = ncrs * pc_gamg->data_cell_rows;
338         for (ii = 0; ii < ncrs; ii++) {
339           for (kk = 0; kk < ndata_rows; kk++) {
340             PetscInt    ix = ii * ndata_rows + kk + jj * stride0, jx = ii * node_data_sz + kk * ndata_cols + jj;
341             PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
342             PetscCall(VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES));
343           }
344         }
345       }
346       PetscCall(VecAssemblyBegin(src_crd));
347       PetscCall(VecAssemblyEnd(src_crd));
348       /*
349         Scatter the element vertex information (still in the original vertex ordering)
350         to the correct processor
351       */
352       PetscCall(VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat));
353       PetscCall(ISDestroy(&isscat));
354       PetscCall(VecScatterBegin(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
355       PetscCall(VecScatterEnd(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
356       PetscCall(VecScatterDestroy(&vecscat));
357       PetscCall(VecDestroy(&src_crd));
358       /*
359         Put the element vertex data into a new allocation of the gdata->ele
360       */
361       PetscCall(PetscFree(pc_gamg->data));
362       PetscCall(PetscMalloc1(node_data_sz * ncrs_new, &pc_gamg->data));
363 
364       pc_gamg->data_sz = node_data_sz * ncrs_new;
365       strideNew        = ncrs_new * ndata_rows;
366 
367       PetscCall(VecGetArray(dest_crd, &array));
368       for (jj = 0; jj < ndata_cols; jj++) {
369         for (ii = 0; ii < ncrs_new; ii++) {
370           for (kk = 0; kk < ndata_rows; kk++) {
371             PetscInt ix = ii * ndata_rows + kk + jj * strideNew, jx = ii * node_data_sz + kk * ndata_cols + jj;
372             pc_gamg->data[ix] = PetscRealPart(array[jx]);
373           }
374         }
375       }
376       PetscCall(VecRestoreArray(dest_crd, &array));
377       PetscCall(VecDestroy(&dest_crd));
378     }
379     /* move A and P (columns) with new layout */
380     /*
381       Invert for MatCreateSubMatrix
382     */
383     PetscCall(ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices));
384     PetscCall(ISSort(new_eq_indices)); /* is this needed? */
385     PetscCall(ISSetBlockSize(new_eq_indices, cr_bs));
386     if (is_eq_num != is_eq_num_prim) { PetscCall(ISDestroy(&is_eq_num_prim)); /* could be same as 'is_eq_num' */ }
387     if (Pcolumnperm) {
388       PetscCall(PetscObjectReference((PetscObject)new_eq_indices));
389       *Pcolumnperm = new_eq_indices;
390     }
391     PetscCall(ISDestroy(&is_eq_num));
392 
393     /* 'a_Amat_crs' output */
394     {
395       Mat       mat;
396       PetscBool isset, isspd, isher;
397 #if !defined(PETSC_USE_COMPLEX)
398       PetscBool issym;
399 #endif
400 
401       PetscCall(MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat));
402       PetscCall(MatIsSPDKnown(Cmat, &isset, &isspd)); // like MatPropagateSymmetryOptions, but should set MAT_STRUCTURALLY_SYMMETRIC ?
403       if (isset) PetscCall(MatSetOption(mat, MAT_SPD, isspd));
404       else {
405         PetscCall(MatIsHermitianKnown(Cmat, &isset, &isher));
406         if (isset) PetscCall(MatSetOption(mat, MAT_HERMITIAN, isher));
407         else {
408 #if !defined(PETSC_USE_COMPLEX)
409           PetscCall(MatIsSymmetricKnown(Cmat, &isset, &issym));
410           if (isset) PetscCall(MatSetOption(mat, MAT_SYMMETRIC, issym));
411 #endif
412         }
413       }
414       *a_Amat_crs = mat;
415     }
416     PetscCall(MatDestroy(&Cmat));
417 
418     /* prolongator */
419     {
420       IS       findices;
421       PetscInt Istart, Iend;
422       Mat      Pnew;
423 
424       PetscCall(MatGetOwnershipRange(Pold, &Istart, &Iend));
425       PetscCall(ISCreateStride(comm, Iend - Istart, Istart, 1, &findices));
426       PetscCall(ISSetBlockSize(findices, f_bs));
427       PetscCall(MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew));
428       PetscCall(ISDestroy(&findices));
429       PetscCall(MatSetOption(Pnew, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
430 
431       PetscCall(MatDestroy(a_P_inout));
432 
433       /* output - repartitioned */
434       *a_P_inout = Pnew;
435     }
436     PetscCall(ISDestroy(&new_eq_indices));
437 
438     *a_nactive_proc = new_size; /* output */
439 
440     /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
441     if (pc_gamg->cpu_pin_coarse_grids) {
442 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
443       static PetscInt llev = 2;
444       PetscCall(PetscInfo(pc, "%s: Pinning level %" PetscInt_FMT " to the CPU\n", ((PetscObject)pc)->prefix, llev++));
445 #endif
446       PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
447       PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
448       if (1) { /* HACK: move this to MatBindCPU_MPIAIJXXX; lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
449         Mat         A = *a_Amat_crs, P = *a_P_inout;
450         PetscMPIInt size;
451         PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
452         if (size > 1) {
453           Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data;
454           PetscCall(VecBindToCPU(a->lvec, PETSC_TRUE));
455           PetscCall(VecBindToCPU(p->lvec, PETSC_TRUE));
456         }
457       }
458     }
459     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
460   } // processor reduce
461   PetscFunctionReturn(PETSC_SUCCESS);
462 }
463 
464 // used in GEO
465 PetscErrorCode PCGAMGSquareGraph_GAMG(PC a_pc, Mat Gmat1, Mat *Gmat2)
466 {
467   const char *prefix;
468   char        addp[32];
469   PC_MG      *mg      = (PC_MG *)a_pc->data;
470   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
471 
472   PetscFunctionBegin;
473   PetscCall(PCGetOptionsPrefix(a_pc, &prefix));
474   PetscCall(PetscInfo(a_pc, "%s: Square Graph on level %" PetscInt_FMT "\n", ((PetscObject)a_pc)->prefix, pc_gamg->current_level + 1));
475   PetscCall(MatProductCreate(Gmat1, Gmat1, NULL, Gmat2));
476   PetscCall(MatSetOptionsPrefix(*Gmat2, prefix));
477   PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_square_%" PetscInt_FMT "_", pc_gamg->current_level));
478   PetscCall(MatAppendOptionsPrefix(*Gmat2, addp));
479   if ((*Gmat2)->structurally_symmetric == PETSC_BOOL3_TRUE) {
480     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AB));
481   } else {
482     PetscCall(MatSetOption(Gmat1, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
483     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AtB));
484   }
485   PetscCall(MatProductSetFromOptions(*Gmat2));
486   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
487   PetscCall(MatProductSymbolic(*Gmat2));
488   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
489   PetscCall(MatProductClear(*Gmat2));
490   /* we only need the sparsity, cheat and tell PETSc the matrix has been assembled */
491   (*Gmat2)->assembled = PETSC_TRUE;
492   PetscFunctionReturn(PETSC_SUCCESS);
493 }
494 
495 /*
496    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
497                     by setting data structures and options.
498 
499    Input Parameter:
500 .  pc - the preconditioner context
501 
502 */
503 static PetscErrorCode PCSetUp_GAMG(PC pc)
504 {
505   PC_MG      *mg      = (PC_MG *)pc->data;
506   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
507   Mat         Pmat    = pc->pmat;
508   PetscInt    fine_level, level, level1, bs, M, N, qq, lidx, nASMBlocksArr[PETSC_MG_MAXLEVELS], cr_bs;
509   MPI_Comm    comm;
510   PetscMPIInt rank, size, nactivepe;
511   Mat         Aarr[PETSC_MG_MAXLEVELS], Parr[PETSC_MG_MAXLEVELS];
512   IS         *ASMLocalIDsArr[PETSC_MG_MAXLEVELS];
513   PetscBool   is_last = PETSC_FALSE;
514 #if defined(PETSC_USE_INFO)
515   PetscLogDouble nnz0 = 0., nnztot = 0.;
516   MatInfo        info;
517 #endif
518 
519   PetscFunctionBegin;
520   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
521   PetscCallMPI(MPI_Comm_rank(comm, &rank));
522   PetscCallMPI(MPI_Comm_size(comm, &size));
523   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
524   if (pc->setupcalled) {
525     if (!pc_gamg->reuse_prol || pc->flag == DIFFERENT_NONZERO_PATTERN) {
526       /* reset everything */
527       PetscCall(PCReset_MG(pc));
528       pc->setupcalled = 0;
529     } else {
530       PC_MG_Levels **mglevels = mg->levels;
531       /* just do Galerkin grids */
532       Mat B, dA, dB;
533       if (pc_gamg->Nlevels > 1) {
534         PetscInt gl;
535         /* currently only handle case where mat and pmat are the same on coarser levels */
536         PetscCall(KSPGetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, &dA, &dB));
537         /* (re)set to get dirty flag */
538         PetscCall(KSPSetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, dA, dB));
539 
540         for (level = pc_gamg->Nlevels - 2, gl = 0; level >= 0; level--, gl++) {
541           MatReuse reuse = MAT_INITIAL_MATRIX;
542 #if defined(GAMG_STAGES)
543           PetscCall(PetscLogStagePush(gamg_stages[gl]));
544 #endif
545           /* matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
546           PetscCall(KSPGetOperators(mglevels[level]->smoothd, NULL, &B));
547           if (B->product) {
548             if (B->product->A == dB && B->product->B == mglevels[level + 1]->interpolate) reuse = MAT_REUSE_MATRIX;
549           }
550           if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDestroy(&mglevels[level]->A));
551           if (reuse == MAT_REUSE_MATRIX) {
552             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, reuse matrix level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
553           } else {
554             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, with repartitioning (new matrix) level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
555           }
556           PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
557           PetscCall(MatPtAP(dB, mglevels[level + 1]->interpolate, reuse, PETSC_DEFAULT, &B));
558           PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
559           if (reuse == MAT_INITIAL_MATRIX) mglevels[level]->A = B;
560           PetscCall(KSPSetOperators(mglevels[level]->smoothd, B, B));
561           // check for redoing eigen estimates
562           if (pc_gamg->recompute_esteig) {
563             PetscBool ischeb;
564             KSP       smoother;
565             PetscCall(PCMGGetSmoother(pc, level + 1, &smoother));
566             PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
567             if (ischeb) {
568               KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
569               cheb->emin_provided = 0;
570               cheb->emax_provided = 0;
571             }
572             /* we could call PetscCall(KSPChebyshevSetEigenvalues(smoother, 0, 0)); but the logic does not work properly */
573           }
574           // inc
575           dB = B;
576 #if defined(GAMG_STAGES)
577           PetscCall(PetscLogStagePop());
578 #endif
579         }
580       }
581 
582       PetscCall(PCSetUp_MG(pc));
583       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
584       PetscFunctionReturn(PETSC_SUCCESS);
585     }
586   }
587 
588   if (!pc_gamg->data) {
589     if (pc_gamg->orig_data) {
590       PetscCall(MatGetBlockSize(Pmat, &bs));
591       PetscCall(MatGetLocalSize(Pmat, &qq, NULL));
592 
593       pc_gamg->data_sz        = (qq / bs) * pc_gamg->orig_data_cell_rows * pc_gamg->orig_data_cell_cols;
594       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
595       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
596 
597       PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
598       for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
599     } else {
600       PetscCheck(pc_gamg->ops->createdefaultdata, comm, PETSC_ERR_PLIB, "'createdefaultdata' not set(?) need to support NULL data");
601       PetscCall(pc_gamg->ops->createdefaultdata(pc, Pmat));
602     }
603   }
604 
605   /* cache original data for reuse */
606   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
607     PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data));
608     for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
609     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
610     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
611   }
612 
613   /* get basic dims */
614   PetscCall(MatGetBlockSize(Pmat, &bs));
615   PetscCall(MatGetSize(Pmat, &M, NULL));
616 
617 #if defined(PETSC_USE_INFO)
618   PetscCall(MatGetInfo(Pmat, MAT_GLOBAL_SUM, &info)); /* global reduction */
619   nnz0   = info.nz_used;
620   nnztot = info.nz_used;
621 #endif
622   PetscCall(PetscInfo(pc, "%s: level %d) N=%" PetscInt_FMT ", n data rows=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", block size %d, np=%d\n", ((PetscObject)pc)->prefix, 0, M, pc_gamg->data_cell_rows, pc_gamg->data_cell_cols, (PetscInt)(nnz0 / (PetscReal)M + 0.5), (int)bs, size));
623 
624   /* Get A_i and R_i */
625   for (level = 0, Aarr[0] = Pmat, nactivepe = size; level < (pc_gamg->Nlevels - 1) && (level == 0 || M > pc_gamg->coarse_eq_limit); level++) {
626     pc_gamg->current_level = level;
627     level1                 = level + 1;
628 #if defined(GAMG_STAGES)
629     if (!gamg_stages[level]) {
630       char str[32];
631       PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", (int)level));
632       PetscCall(PetscLogStageRegister(str, &gamg_stages[level]));
633     }
634     PetscCall(PetscLogStagePush(gamg_stages[level]));
635 #endif
636     /* construct prolongator - Parr[level1] */
637     if (level == 0 && pc_gamg->injection_index_size > 0) {
638       Mat      Prol;
639       MatType  mtype;
640       PetscInt prol_m, prol_n, Prol_N = (M / bs) * pc_gamg->injection_index_size, Istart, Iend, nn, row;
641       PetscCall(PetscInfo(pc, "Create fine grid injection space prolongation %" PetscInt_FMT " x %" PetscInt_FMT ". %s\n", M, Prol_N, pc_gamg->data ? "delete null space data" : ""));
642       PetscCall(MatGetOwnershipRange(Pmat, &Istart, &Iend));
643       PetscCall(MatGetLocalSize(Pmat, &prol_m, NULL)); // rows m x n
644       prol_n = (prol_m / bs) * pc_gamg->injection_index_size;
645       PetscCheck(pc_gamg->injection_index_size < bs, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Injection size %d must be less that block size %d", (int)pc_gamg->injection_index_size, (int)bs);
646       PetscCall(MatGetType(Pmat, &mtype));
647       PetscCall(MatCreate(PetscObjectComm((PetscObject)pc), &Prol));
648       PetscCall(MatSetBlockSizes(Prol, bs, pc_gamg->injection_index_size));
649       PetscCall(MatSetSizes(Prol, prol_m, prol_n, M, Prol_N));
650       PetscCall(MatSetType(Prol, mtype));
651 #if PetscDefined(HAVE_DEVICE)
652       PetscBool flg;
653       PetscCall(MatBoundToCPU(Pmat, &flg));
654       PetscCall(MatBindToCPU(Prol, flg));
655       if (flg) PetscCall(MatSetBindingPropagates(Prol, PETSC_TRUE));
656 #endif
657       PetscCall(MatSeqAIJSetPreallocation(Prol, 1, NULL));
658       PetscCall(MatMPIAIJSetPreallocation(Prol, 1, NULL, 0, NULL));
659       // set I \kron [1, 1, ... ]^T
660       for (PetscInt ii = Istart, col = (Istart / bs) * pc_gamg->injection_index_size; ii < Iend; ii += bs) {
661         const PetscScalar one = 1;
662         for (PetscInt jj = 0; jj < pc_gamg->injection_index_size; jj++, col++) {
663           PetscInt row = ii + pc_gamg->injection_index[jj];
664           PetscCall(MatSetValues(Prol, 1, &row, 1, &col, &one, INSERT_VALUES));
665         }
666       }
667       PetscCall(MatAssemblyBegin(Prol, MAT_FINAL_ASSEMBLY));
668       PetscCall(MatAssemblyEnd(Prol, MAT_FINAL_ASSEMBLY));
669       PetscCall(MatViewFromOptions(Prol, NULL, "-mat_view_injection"));
670       PetscCall(MatGetBlockSizes(Prol, NULL, &cr_bs)); // column size
671       Parr[level1] = Prol;
672       // can not deal with null space -- with array of 'injection cols' we could take 'injection rows and 'injection cols' to 'data'
673       if (pc_gamg->data) {
674         pc_gamg->data_cell_cols      = pc_gamg->injection_index_size;
675         pc_gamg->data_cell_rows      = pc_gamg->injection_index_size;
676         pc_gamg->orig_data_cell_cols = 0;
677         pc_gamg->orig_data_cell_rows = 0;
678         PetscCall(PetscFree(pc_gamg->data));
679         pc_gamg->data_sz = pc_gamg->injection_index_size * prol_n;
680         PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
681         for (row = nn = 0; row < prol_n; row += pc_gamg->injection_index_size) {
682           for (int jj = 0; jj < pc_gamg->injection_index_size; jj++) {
683             int idx = row * pc_gamg->injection_index_size + jj * pc_gamg->injection_index_size;
684             for (int kk = 0; kk < pc_gamg->injection_index_size; kk++, nn++) { pc_gamg->data[idx + kk] = (jj == kk) ? 1 : 0; }
685           }
686         }
687         PetscCheck(nn == pc_gamg->data_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nn != pc_gamg->data_sz %" PetscInt_FMT " %" PetscInt_FMT, pc_gamg->data_sz, nn);
688       }
689     } else {
690       Mat               Gmat, mat;
691       PetscCoarsenData *agg_lists;
692       Mat               Prol11;
693 
694       PetscCall(PCGAMGCreateGraph(pc, Aarr[level], &Gmat));
695       PetscCall(pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists)); // Gmat may have ghosts for QR aggregates not in matrix
696       PetscCall(PetscCDGetMat(agg_lists, &mat));
697       if (!mat) PetscCall(PetscCDSetMat(agg_lists, Gmat));
698       PetscCall(pc_gamg->ops->prolongator(pc, Aarr[level], agg_lists, &Prol11));
699       /* could have failed to create new level */
700       if (Prol11) {
701         const char *prefix;
702         char        addp[32];
703 
704         /* get new block size of coarse matrices */
705         PetscCall(MatGetBlockSizes(Prol11, NULL, &cr_bs)); // column size
706 
707         if (pc_gamg->ops->optprolongator) {
708           /* smooth */
709           PetscCall(pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11));
710         }
711 
712         if (pc_gamg->use_aggs_in_asm) {
713           PetscInt bs;
714           PetscCall(MatGetBlockSizes(Prol11, &bs, NULL)); // row block size
715           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
716           PetscCall(PetscInfo(pc, "%d: %" PetscInt_FMT " ASM local domains,  bs = %d\n", (int)level, nASMBlocksArr[level], (int)bs));
717         } else if (pc_gamg->asm_hem_aggs) {
718           MatCoarsen  crs;
719           const char *prefix;
720           PetscInt    bs;
721           PetscCall(PetscCDGetMat(agg_lists, &mat));
722           if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
723           PetscCall(PetscCDDestroy(agg_lists));
724           PetscCall(PetscInfo(pc, "HEM ASM passes = %d\n", (int)pc_gamg->asm_hem_aggs));
725           PetscCall(MatCoarsenCreate(PetscObjectComm((PetscObject)pc), &crs));
726           PetscCall(PetscObjectGetOptionsPrefix((PetscObject)pc, &prefix));
727           PetscCall(PetscObjectSetOptionsPrefix((PetscObject)crs, prefix));
728           PetscCall(MatCoarsenSetFromOptions(crs)); // get strength args
729           PetscCall(MatCoarsenSetType(crs, MATCOARSENHEM));
730           PetscCall(MatCoarsenSetMaximumIterations(crs, pc_gamg->asm_hem_aggs));
731           PetscCall(MatCoarsenSetAdjacency(crs, Gmat));
732           PetscCall(MatCoarsenSetStrictAggs(crs, PETSC_TRUE));
733           PetscCall(MatCoarsenApply(crs));
734           PetscCall(MatCoarsenViewFromOptions(crs, NULL, "-agg_hem_mat_coarsen_view"));
735           PetscCall(MatCoarsenGetData(crs, &agg_lists)); /* output */
736           PetscCall(MatCoarsenDestroy(&crs));
737           // create aggregates
738           PetscCall(MatGetBlockSizes(Aarr[level], &bs, NULL)); // row block size
739           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
740         }
741         PetscCall(PCGetOptionsPrefix(pc, &prefix));
742         PetscCall(MatSetOptionsPrefix(Prol11, prefix));
743         PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_prolongator_%d_", (int)level));
744         PetscCall(MatAppendOptionsPrefix(Prol11, addp));
745         /* Always generate the transpose with CUDA
746            Such behaviour can be adapted with -pc_gamg_prolongator_ prefixed options */
747         PetscCall(MatSetOption(Prol11, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
748         PetscCall(MatSetFromOptions(Prol11));
749         Parr[level1] = Prol11;
750       } else Parr[level1] = NULL; /* failed to coarsen */
751       PetscCall(PetscCDGetMat(agg_lists, &mat));
752       if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
753       PetscCall(MatDestroy(&Gmat));
754       PetscCall(PetscCDDestroy(agg_lists));
755     } /* construct prolongator scope */
756     if (level == 0) Aarr[0] = Pmat; /* use Pmat for finest level setup */
757     if (!Parr[level1]) {            /* failed to coarsen */
758       PetscCall(PetscInfo(pc, "%s: Stop gridding, level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
759 #if defined(GAMG_STAGES)
760       PetscCall(PetscLogStagePop());
761 #endif
762       break;
763     }
764     PetscCall(MatGetSize(Parr[level1], &M, &N)); /* N is next M, a loop test variables */
765     PetscCheck(!is_last, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Is last ?");
766     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
767     if (level1 == pc_gamg->Nlevels - 1) is_last = PETSC_TRUE;
768     if (level == PETSC_MG_MAXLEVELS - 2) is_last = PETSC_TRUE;
769     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
770     PetscCall(pc_gamg->ops->createlevel(pc, Aarr[level], cr_bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last));
771     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
772 
773     PetscCall(MatGetSize(Aarr[level1], &M, &N)); /* M is loop test variables */
774 #if defined(PETSC_USE_INFO)
775     PetscCall(MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info));
776     nnztot += info.nz_used;
777 #endif
778     PetscCall(PetscInfo(pc, "%s: %d) N=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", %d active pes\n", ((PetscObject)pc)->prefix, (int)level1, M, pc_gamg->data_cell_cols, (PetscInt)(info.nz_used / (PetscReal)M), nactivepe));
779 
780 #if defined(GAMG_STAGES)
781     PetscCall(PetscLogStagePop());
782 #endif
783     /* stop if one node or one proc -- could pull back for singular problems */
784     if ((pc_gamg->data_cell_cols && M / pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M / bs < 2)) {
785       PetscCall(PetscInfo(pc, "%s: HARD stop of coarsening on level %" PetscInt_FMT ".  Grid too small: %" PetscInt_FMT " block nodes\n", ((PetscObject)pc)->prefix, level, M / bs));
786       level++;
787       break;
788     } else if (level == PETSC_MG_MAXLEVELS - 2) { /* stop if we are limited by PC_MG_MAXLEVELS */
789       PetscCall(PetscInfo(pc, "%s: HARD stop of coarsening on level %" PetscInt_FMT ".  PC_MG_MAXLEVELS reached\n", ((PetscObject)pc)->prefix, level));
790       level++;
791       break;
792     }
793   } /* levels */
794   PetscCall(PetscFree(pc_gamg->data));
795 
796   PetscCall(PetscInfo(pc, "%s: %" PetscInt_FMT " levels, operator complexity = %g\n", ((PetscObject)pc)->prefix, level + 1, nnztot / nnz0));
797   pc_gamg->Nlevels = level + 1;
798   fine_level       = level;
799   PetscCall(PCMGSetLevels(pc, pc_gamg->Nlevels, NULL));
800 
801   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
802 
803     /* set default smoothers & set operators */
804     for (lidx = 1, level = pc_gamg->Nlevels - 2; lidx <= fine_level; lidx++, level--) {
805       KSP smoother;
806       PC  subpc;
807 
808       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
809       PetscCall(KSPGetPC(smoother, &subpc));
810 
811       PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
812       /* set ops */
813       PetscCall(KSPSetOperators(smoother, Aarr[level], Aarr[level]));
814       PetscCall(PCMGSetInterpolation(pc, lidx, Parr[level + 1]));
815 
816       /* set defaults */
817       PetscCall(KSPSetType(smoother, KSPCHEBYSHEV));
818 
819       /* set blocks for ASM smoother that uses the 'aggregates' */
820       if (pc_gamg->use_aggs_in_asm || pc_gamg->asm_hem_aggs) {
821         PetscInt sz;
822         IS      *iss;
823 
824         sz  = nASMBlocksArr[level];
825         iss = ASMLocalIDsArr[level];
826         PetscCall(PCSetType(subpc, PCASM));
827         PetscCall(PCASMSetOverlap(subpc, 0));
828         PetscCall(PCASMSetType(subpc, PC_ASM_BASIC));
829         if (!sz) {
830           IS is;
831           PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
832           PetscCall(PCASMSetLocalSubdomains(subpc, 1, NULL, &is));
833           PetscCall(ISDestroy(&is));
834         } else {
835           PetscInt kk;
836           PetscCall(PCASMSetLocalSubdomains(subpc, sz, iss, NULL));
837           for (kk = 0; kk < sz; kk++) PetscCall(ISDestroy(&iss[kk]));
838           PetscCall(PetscFree(iss));
839         }
840         ASMLocalIDsArr[level] = NULL;
841         nASMBlocksArr[level]  = 0;
842       } else {
843         PetscCall(PCSetType(subpc, PCJACOBI));
844       }
845     }
846     {
847       /* coarse grid */
848       KSP      smoother, *k2;
849       PC       subpc, pc2;
850       PetscInt ii, first;
851       Mat      Lmat = Aarr[(level = pc_gamg->Nlevels - 1)];
852       lidx          = 0;
853 
854       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
855       PetscCall(KSPSetOperators(smoother, Lmat, Lmat));
856       if (!pc_gamg->use_parallel_coarse_grid_solver) {
857         PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
858         PetscCall(KSPGetPC(smoother, &subpc));
859         PetscCall(PCSetType(subpc, PCBJACOBI));
860         PetscCall(PCSetUp(subpc));
861         PetscCall(PCBJacobiGetSubKSP(subpc, &ii, &first, &k2));
862         PetscCheck(ii == 1, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ii %" PetscInt_FMT " is not one", ii);
863         PetscCall(KSPGetPC(k2[0], &pc2));
864         PetscCall(PCSetType(pc2, PCLU));
865         PetscCall(PCFactorSetShiftType(pc2, MAT_SHIFT_INBLOCKS));
866         PetscCall(KSPSetTolerances(k2[0], PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 1));
867         PetscCall(KSPSetType(k2[0], KSPPREONLY));
868       }
869     }
870 
871     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
872     PetscObjectOptionsBegin((PetscObject)pc);
873     PetscCall(PCSetFromOptions_MG(pc, PetscOptionsObject));
874     PetscOptionsEnd();
875     PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
876 
877     /* set cheby eigen estimates from SA to use in the solver */
878     if (pc_gamg->use_sa_esteig) {
879       for (lidx = 1, level = pc_gamg->Nlevels - 2; level >= 0; lidx++, level--) {
880         KSP       smoother;
881         PetscBool ischeb;
882 
883         PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
884         PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
885         if (ischeb) {
886           KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
887 
888           // The command line will override these settings because KSPSetFromOptions is called in PCSetUp_MG
889           if (mg->max_eigen_DinvA[level] > 0) {
890             // SA uses Jacobi for P; we use SA estimates if the smoother is also Jacobi or if the user explicitly requested it.
891             // TODO: This should test whether it's the same Jacobi variant (DIAG, ROWSUM, etc.)
892             PetscReal emax, emin;
893 
894             emin = mg->min_eigen_DinvA[level];
895             emax = mg->max_eigen_DinvA[level];
896             PetscCall(PetscInfo(pc, "%s: PCSetUp_GAMG: call KSPChebyshevSetEigenvalues on level %" PetscInt_FMT " (N=%" PetscInt_FMT ") with emax = %g emin = %g\n", ((PetscObject)pc)->prefix, level, Aarr[level]->rmap->N, (double)emax, (double)emin));
897             cheb->emin_provided = emin;
898             cheb->emax_provided = emax;
899           }
900         }
901       }
902     }
903 
904     PetscCall(PCSetUp_MG(pc));
905 
906     /* clean up */
907     for (level = 1; level < pc_gamg->Nlevels; level++) {
908       PetscCall(MatDestroy(&Parr[level]));
909       PetscCall(MatDestroy(&Aarr[level]));
910     }
911   } else {
912     KSP smoother;
913 
914     PetscCall(PetscInfo(pc, "%s: One level solver used (system is seen as DD). Using default solver.\n", ((PetscObject)pc)->prefix));
915     PetscCall(PCMGGetSmoother(pc, 0, &smoother));
916     PetscCall(KSPSetOperators(smoother, Aarr[0], Aarr[0]));
917     PetscCall(KSPSetType(smoother, KSPPREONLY));
918     PetscCall(PCSetUp_MG(pc));
919   }
920   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
921   PetscFunctionReturn(PETSC_SUCCESS);
922 }
923 
924 /*
925  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
926    that was created with PCCreate_GAMG().
927 
928    Input Parameter:
929 .  pc - the preconditioner context
930 
931    Application Interface Routine: PCDestroy()
932 */
933 PetscErrorCode PCDestroy_GAMG(PC pc)
934 {
935   PC_MG   *mg      = (PC_MG *)pc->data;
936   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
937 
938   PetscFunctionBegin;
939   PetscCall(PCReset_GAMG(pc));
940   if (pc_gamg->ops->destroy) PetscCall((*pc_gamg->ops->destroy)(pc));
941   PetscCall(PetscFree(pc_gamg->ops));
942   PetscCall(PetscFree(pc_gamg->gamg_type_name));
943   PetscCall(PetscFree(pc_gamg));
944   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", NULL));
945   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", NULL));
946   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", NULL));
947   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", NULL));
948   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", NULL));
949   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", NULL));
950   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", NULL));
951   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", NULL));
952   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", NULL));
953   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", NULL));
954   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", NULL));
955   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", NULL));
956   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", NULL));
957   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", NULL));
958   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", NULL));
959   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", NULL));
960   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", NULL));
961   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", NULL));
962   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndices_C", NULL));
963   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", NULL));
964   PetscCall(PCDestroy_MG(pc));
965   PetscFunctionReturn(PETSC_SUCCESS);
966 }
967 
968 /*@
969   PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction in `PCGAMG`
970 
971   Logically Collective
972 
973   Input Parameters:
974 + pc - the preconditioner context
975 - n  - the number of equations
976 
977   Options Database Key:
978 . -pc_gamg_process_eq_limit <limit> - set the limit
979 
980   Level: intermediate
981 
982   Note:
983   `PCGAMG` will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
984   that has degrees of freedom
985 
986   Developer Note:
987   Should be named `PCGAMGSetProcessEquationLimit()`.
988 
989 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`
990 @*/
991 PetscErrorCode PCGAMGSetProcEqLim(PC pc, PetscInt n)
992 {
993   PetscFunctionBegin;
994   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
995   PetscTryMethod(pc, "PCGAMGSetProcEqLim_C", (PC, PetscInt), (pc, n));
996   PetscFunctionReturn(PETSC_SUCCESS);
997 }
998 
999 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
1000 {
1001   PC_MG   *mg      = (PC_MG *)pc->data;
1002   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1003 
1004   PetscFunctionBegin;
1005   if (n > 0) pc_gamg->min_eq_proc = n;
1006   PetscFunctionReturn(PETSC_SUCCESS);
1007 }
1008 
1009 /*@
1010   PCGAMGSetCoarseEqLim - Set maximum number of equations on the coarsest grid of `PCGAMG`
1011 
1012   Collective
1013 
1014   Input Parameters:
1015 + pc - the preconditioner context
1016 - n  - maximum number of equations to aim for
1017 
1018   Options Database Key:
1019 . -pc_gamg_coarse_eq_limit <limit> - set the limit
1020 
1021   Level: intermediate
1022 
1023   Note:
1024   For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
1025   has less than 1000 unknowns.
1026 
1027 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`,
1028           `PCGAMGSetParallelCoarseGridSolve()`
1029 @*/
1030 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
1031 {
1032   PetscFunctionBegin;
1033   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1034   PetscTryMethod(pc, "PCGAMGSetCoarseEqLim_C", (PC, PetscInt), (pc, n));
1035   PetscFunctionReturn(PETSC_SUCCESS);
1036 }
1037 
1038 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
1039 {
1040   PC_MG   *mg      = (PC_MG *)pc->data;
1041   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1042 
1043   PetscFunctionBegin;
1044   if (n > 0) pc_gamg->coarse_eq_limit = n;
1045   PetscFunctionReturn(PETSC_SUCCESS);
1046 }
1047 
1048 /*@
1049   PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids when reducing the number of MPI ranks to use
1050 
1051   Collective
1052 
1053   Input Parameters:
1054 + pc - the preconditioner context
1055 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1056 
1057   Options Database Key:
1058 . -pc_gamg_repartition <true,false> - turn on the repartitioning
1059 
1060   Level: intermediate
1061 
1062   Note:
1063   This will generally improve the loading balancing of the work on each level so the solves will be faster but it adds to the
1064   preconditioner setup time.
1065 
1066 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`
1067 @*/
1068 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
1069 {
1070   PetscFunctionBegin;
1071   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1072   PetscTryMethod(pc, "PCGAMGSetRepartition_C", (PC, PetscBool), (pc, n));
1073   PetscFunctionReturn(PETSC_SUCCESS);
1074 }
1075 
1076 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
1077 {
1078   PC_MG   *mg      = (PC_MG *)pc->data;
1079   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1080 
1081   PetscFunctionBegin;
1082   pc_gamg->repart = n;
1083   PetscFunctionReturn(PETSC_SUCCESS);
1084 }
1085 
1086 /*@
1087   PCGAMGSetUseSAEstEig - Use the eigen estimate from smoothed aggregation for the Chebyshev smoother during the solution process
1088 
1089   Collective
1090 
1091   Input Parameters:
1092 + pc - the preconditioner context
1093 - b  - flag
1094 
1095   Options Database Key:
1096 . -pc_gamg_use_sa_esteig <true,false> - use the eigen estimate
1097 
1098   Level: advanced
1099 
1100   Notes:
1101   Smoothed aggregation constructs the smoothed prolongator $P = (I - \omega D^{-1} A) T$ where $T$ is the tentative prolongator and $D$ is the diagonal of $A$.
1102   Eigenvalue estimates (based on a few `PCCG` or `PCGMRES` iterations) are computed to choose $\omega$ so that this is a stable smoothing operation.
1103   If `KSPCHEBYSHEV` with `PCJACOBI` (diagonal) preconditioning is used for smoothing on the finest level, then the eigenvalue estimates
1104   can be reused during the solution process.
1105   This option is only used when the smoother uses `PCJACOBI`, and should be turned off when a different `PCJacobiType` is used.
1106   It became default in PETSc 3.17.
1107 
1108 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`, `PCGAMGSetRecomputeEstEig()`
1109 @*/
1110 PetscErrorCode PCGAMGSetUseSAEstEig(PC pc, PetscBool b)
1111 {
1112   PetscFunctionBegin;
1113   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1114   PetscTryMethod(pc, "PCGAMGSetUseSAEstEig_C", (PC, PetscBool), (pc, b));
1115   PetscFunctionReturn(PETSC_SUCCESS);
1116 }
1117 
1118 static PetscErrorCode PCGAMGSetUseSAEstEig_GAMG(PC pc, PetscBool b)
1119 {
1120   PC_MG   *mg      = (PC_MG *)pc->data;
1121   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1122 
1123   PetscFunctionBegin;
1124   pc_gamg->use_sa_esteig = b;
1125   PetscFunctionReturn(PETSC_SUCCESS);
1126 }
1127 
1128 /*@
1129   PCGAMGSetRecomputeEstEig - Set flag for Chebyshev smoothers to recompute the eigen estimates when a new matrix is used
1130 
1131   Collective
1132 
1133   Input Parameters:
1134 + pc - the preconditioner context
1135 - b  - flag, default is `PETSC_TRUE`
1136 
1137   Options Database Key:
1138 . -pc_gamg_recompute_esteig <true> - use the eigen estimate
1139 
1140   Level: advanced
1141 
1142   Note:
1143   If the matrix changes only slightly in a new solve using ``PETSC_FALSE`` will save time in the setting up of the preconditioner
1144   and may not affect the solution time much.
1145 
1146 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`
1147 @*/
1148 PetscErrorCode PCGAMGSetRecomputeEstEig(PC pc, PetscBool b)
1149 {
1150   PetscFunctionBegin;
1151   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1152   PetscTryMethod(pc, "PCGAMGSetRecomputeEstEig_C", (PC, PetscBool), (pc, b));
1153   PetscFunctionReturn(PETSC_SUCCESS);
1154 }
1155 
1156 static PetscErrorCode PCGAMGSetRecomputeEstEig_GAMG(PC pc, PetscBool b)
1157 {
1158   PC_MG   *mg      = (PC_MG *)pc->data;
1159   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1160 
1161   PetscFunctionBegin;
1162   pc_gamg->recompute_esteig = b;
1163   PetscFunctionReturn(PETSC_SUCCESS);
1164 }
1165 
1166 /*@
1167   PCGAMGSetEigenvalues - Set WHAT eigenvalues WHY?
1168 
1169   Collective
1170 
1171   Input Parameters:
1172 + pc   - the preconditioner context
1173 . emax - max eigenvalue
1174 - emin - min eigenvalue
1175 
1176   Options Database Key:
1177 . -pc_gamg_eigenvalues <emin,emax> - estimates of the eigenvalues
1178 
1179   Level: intermediate
1180 
1181 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetUseSAEstEig()`
1182 @*/
1183 PetscErrorCode PCGAMGSetEigenvalues(PC pc, PetscReal emax, PetscReal emin)
1184 {
1185   PetscFunctionBegin;
1186   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1187   PetscTryMethod(pc, "PCGAMGSetEigenvalues_C", (PC, PetscReal, PetscReal), (pc, emax, emin));
1188   PetscFunctionReturn(PETSC_SUCCESS);
1189 }
1190 
1191 static PetscErrorCode PCGAMGSetEigenvalues_GAMG(PC pc, PetscReal emax, PetscReal emin)
1192 {
1193   PC_MG   *mg      = (PC_MG *)pc->data;
1194   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1195 
1196   PetscFunctionBegin;
1197   PetscCheck(emax > emin, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Maximum eigenvalue must be larger than minimum: max %g min %g", (double)emax, (double)emin);
1198   PetscCheck(emax * emin > 0.0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Both eigenvalues must be of the same sign: max %g min %g", (double)emax, (double)emin);
1199   pc_gamg->emax = emax;
1200   pc_gamg->emin = emin;
1201   PetscFunctionReturn(PETSC_SUCCESS);
1202 }
1203 
1204 /*@
1205   PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding a `PCGAMG` algebraic multigrid preconditioner
1206 
1207   Collective
1208 
1209   Input Parameters:
1210 + pc - the preconditioner context
1211 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1212 
1213   Options Database Key:
1214 . -pc_gamg_reuse_interpolation <true,false> - reuse the previous interpolation
1215 
1216   Level: intermediate
1217 
1218   Note:
1219   May negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
1220   rebuilding the preconditioner quicker.
1221 
1222 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`
1223 @*/
1224 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
1225 {
1226   PetscFunctionBegin;
1227   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1228   PetscTryMethod(pc, "PCGAMGSetReuseInterpolation_C", (PC, PetscBool), (pc, n));
1229   PetscFunctionReturn(PETSC_SUCCESS);
1230 }
1231 
1232 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
1233 {
1234   PC_MG   *mg      = (PC_MG *)pc->data;
1235   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1236 
1237   PetscFunctionBegin;
1238   pc_gamg->reuse_prol = n;
1239   PetscFunctionReturn(PETSC_SUCCESS);
1240 }
1241 
1242 /*@
1243   PCGAMGASMSetUseAggs - Have the `PCGAMG` smoother on each level use `PCASM` where the aggregates defined by the coarsening process are
1244   the subdomains for the additive Schwarz preconditioner used as the smoother
1245 
1246   Collective
1247 
1248   Input Parameters:
1249 + pc  - the preconditioner context
1250 - flg - `PETSC_TRUE` to use aggregates, `PETSC_FALSE` to not
1251 
1252   Options Database Key:
1253 . -pc_gamg_asm_use_agg <true,false> - use aggregates to define the additive Schwarz subdomains
1254 
1255   Level: intermediate
1256 
1257   Note:
1258   This option automatically sets the preconditioner on the levels to be `PCASM`.
1259 
1260 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCASM`, `PCSetType`
1261 @*/
1262 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
1263 {
1264   PetscFunctionBegin;
1265   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1266   PetscTryMethod(pc, "PCGAMGASMSetUseAggs_C", (PC, PetscBool), (pc, flg));
1267   PetscFunctionReturn(PETSC_SUCCESS);
1268 }
1269 
1270 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
1271 {
1272   PC_MG   *mg      = (PC_MG *)pc->data;
1273   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1274 
1275   PetscFunctionBegin;
1276   pc_gamg->use_aggs_in_asm = flg;
1277   PetscFunctionReturn(PETSC_SUCCESS);
1278 }
1279 
1280 /*@
1281   PCGAMGSetParallelCoarseGridSolve - allow a parallel coarse grid solver
1282 
1283   Collective
1284 
1285   Input Parameters:
1286 + pc  - the preconditioner context
1287 - flg - `PETSC_TRUE` to not force coarse grid onto one processor
1288 
1289   Options Database Key:
1290 . -pc_gamg_parallel_coarse_grid_solver - use a parallel coarse grid solver
1291 
1292   Level: intermediate
1293 
1294 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetCpuPinCoarseGrids()`, `PCGAMGSetRankReductionFactors()`
1295 @*/
1296 PetscErrorCode PCGAMGSetParallelCoarseGridSolve(PC pc, PetscBool flg)
1297 {
1298   PetscFunctionBegin;
1299   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1300   PetscTryMethod(pc, "PCGAMGSetParallelCoarseGridSolve_C", (PC, PetscBool), (pc, flg));
1301   PetscFunctionReturn(PETSC_SUCCESS);
1302 }
1303 
1304 static PetscErrorCode PCGAMGSetParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
1305 {
1306   PC_MG   *mg      = (PC_MG *)pc->data;
1307   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1308 
1309   PetscFunctionBegin;
1310   pc_gamg->use_parallel_coarse_grid_solver = flg;
1311   PetscFunctionReturn(PETSC_SUCCESS);
1312 }
1313 
1314 /*@
1315   PCGAMGSetCpuPinCoarseGrids - pin the coarse grids created in `PCGAMG` to run only on the CPU since the problems may be too small to run efficiently on the GPUs
1316 
1317   Collective
1318 
1319   Input Parameters:
1320 + pc  - the preconditioner context
1321 - flg - `PETSC_TRUE` to pin coarse grids to the CPU
1322 
1323   Options Database Key:
1324 . -pc_gamg_cpu_pin_coarse_grids - pin the coarse grids to the CPU
1325 
1326   Level: intermediate
1327 
1328 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetParallelCoarseGridSolve()`
1329 @*/
1330 PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
1331 {
1332   PetscFunctionBegin;
1333   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1334   PetscTryMethod(pc, "PCGAMGSetCpuPinCoarseGrids_C", (PC, PetscBool), (pc, flg));
1335   PetscFunctionReturn(PETSC_SUCCESS);
1336 }
1337 
1338 static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1339 {
1340   PC_MG   *mg      = (PC_MG *)pc->data;
1341   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1342 
1343   PetscFunctionBegin;
1344   pc_gamg->cpu_pin_coarse_grids = flg;
1345   PetscFunctionReturn(PETSC_SUCCESS);
1346 }
1347 
1348 /*@
1349   PCGAMGSetCoarseGridLayoutType - place coarse grids on processors with natural order (compact type)
1350 
1351   Collective
1352 
1353   Input Parameters:
1354 + pc  - the preconditioner context
1355 - flg - `PCGAMGLayoutType` type, either `PCGAMG_LAYOUT_COMPACT` or `PCGAMG_LAYOUT_SPREAD`
1356 
1357   Options Database Key:
1358 . -pc_gamg_coarse_grid_layout_type - place the coarse grids with natural ordering
1359 
1360   Level: intermediate
1361 
1362 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetCpuPinCoarseGrids()`, `PCGAMGLayoutType`, `PCGAMG_LAYOUT_COMPACT`, `PCGAMG_LAYOUT_SPREAD`
1363 @*/
1364 PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1365 {
1366   PetscFunctionBegin;
1367   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1368   PetscTryMethod(pc, "PCGAMGSetCoarseGridLayoutType_C", (PC, PCGAMGLayoutType), (pc, flg));
1369   PetscFunctionReturn(PETSC_SUCCESS);
1370 }
1371 
1372 static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1373 {
1374   PC_MG   *mg      = (PC_MG *)pc->data;
1375   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1376 
1377   PetscFunctionBegin;
1378   pc_gamg->layout_type = flg;
1379   PetscFunctionReturn(PETSC_SUCCESS);
1380 }
1381 
1382 /*@
1383   PCGAMGSetNlevels -  Sets the maximum number of levels `PCGAMG` will use
1384 
1385   Collective
1386 
1387   Input Parameters:
1388 + pc - the preconditioner
1389 - n  - the maximum number of levels to use
1390 
1391   Options Database Key:
1392 . -pc_mg_levels <n> - set the maximum number of levels to allow
1393 
1394   Level: intermediate
1395 
1396   Developer Notes:
1397   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1398 
1399 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`
1400 @*/
1401 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1402 {
1403   PetscFunctionBegin;
1404   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1405   PetscTryMethod(pc, "PCGAMGSetNlevels_C", (PC, PetscInt), (pc, n));
1406   PetscFunctionReturn(PETSC_SUCCESS);
1407 }
1408 
1409 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1410 {
1411   PC_MG   *mg      = (PC_MG *)pc->data;
1412   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1413 
1414   PetscFunctionBegin;
1415   pc_gamg->Nlevels = n;
1416   PetscFunctionReturn(PETSC_SUCCESS);
1417 }
1418 
1419 /*@
1420   PCGAMGASMSetHEM -  Sets the number of HEM matching passed
1421 
1422   Collective
1423 
1424   Input Parameters:
1425 + pc - the preconditioner
1426 - n  - number of HEM matching passed to construct ASM subdomains
1427 
1428   Options Database Key:
1429 . -pc_gamg_asm_hem <n> - set the number of HEM matching passed
1430 
1431   Level: intermediate
1432 
1433   Developer Notes:
1434   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1435 
1436 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`
1437 @*/
1438 PetscErrorCode PCGAMGASMSetHEM(PC pc, PetscInt n)
1439 {
1440   PetscFunctionBegin;
1441   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1442   PetscTryMethod(pc, "PCGAMGASMSetHEM_C", (PC, PetscInt), (pc, n));
1443   PetscFunctionReturn(PETSC_SUCCESS);
1444 }
1445 
1446 static PetscErrorCode PCGAMGASMSetHEM_GAMG(PC pc, PetscInt n)
1447 {
1448   PC_MG   *mg      = (PC_MG *)pc->data;
1449   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1450 
1451   PetscFunctionBegin;
1452   pc_gamg->asm_hem_aggs = n;
1453   PetscFunctionReturn(PETSC_SUCCESS);
1454 }
1455 
1456 /*@
1457   PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1458 
1459   Not Collective
1460 
1461   Input Parameters:
1462 + pc - the preconditioner context
1463 . v  - array of threshold values for finest n levels; 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1464 - n  - number of threshold values provided in array
1465 
1466   Options Database Key:
1467 . -pc_gamg_threshold <threshold> - the threshold to drop edges
1468 
1469   Level: intermediate
1470 
1471   Notes:
1472   Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1473   Before coarsening or aggregating the graph, `PCGAMG` removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1474 
1475   If `n` is less than the total number of coarsenings (see `PCGAMGSetNlevels()`), then threshold scaling (see `PCGAMGSetThresholdScale()`) is used for each successive coarsening.
1476   In this case, `PCGAMGSetThresholdScale()` must be called before `PCGAMGSetThreshold()`.
1477   If `n` is greater than the total number of levels, the excess entries in threshold will not be used.
1478 
1479 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetAggressiveLevels()`, `PCGAMGMISkSetAggressive()`, `PCGAMGSetMinDegreeOrderingMISk()`, `PCGAMGSetThresholdScale()`
1480 @*/
1481 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1482 {
1483   PetscFunctionBegin;
1484   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1485   if (n) PetscAssertPointer(v, 2);
1486   PetscTryMethod(pc, "PCGAMGSetThreshold_C", (PC, PetscReal[], PetscInt), (pc, v, n));
1487   PetscFunctionReturn(PETSC_SUCCESS);
1488 }
1489 
1490 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1491 {
1492   PC_MG   *mg      = (PC_MG *)pc->data;
1493   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1494   PetscInt i;
1495 
1496   PetscFunctionBegin;
1497   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->threshold[i] = v[i];
1498   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1499   PetscFunctionReturn(PETSC_SUCCESS);
1500 }
1501 
1502 /*@
1503   PCGAMGSetRankReductionFactors - Set a manual schedule for MPI rank reduction on coarse grids
1504 
1505   Collective
1506 
1507   Input Parameters:
1508 + pc - the preconditioner context
1509 . v  - array of reduction factors. 0 for first value forces a reduction to one process/device on first level in CUDA
1510 - n  - number of values provided in array
1511 
1512   Options Database Key:
1513 . -pc_gamg_rank_reduction_factors <factors> - provide the schedule
1514 
1515   Level: intermediate
1516 
1517 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetCoarseEqLim()`, `PCGAMGSetParallelCoarseGridSolve()`
1518 @*/
1519 PetscErrorCode PCGAMGSetRankReductionFactors(PC pc, PetscInt v[], PetscInt n)
1520 {
1521   PetscFunctionBegin;
1522   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1523   if (n) PetscAssertPointer(v, 2);
1524   PetscTryMethod(pc, "PCGAMGSetRankReductionFactors_C", (PC, PetscInt[], PetscInt), (pc, v, n));
1525   PetscFunctionReturn(PETSC_SUCCESS);
1526 }
1527 
1528 static PetscErrorCode PCGAMGSetRankReductionFactors_GAMG(PC pc, PetscInt v[], PetscInt n)
1529 {
1530   PC_MG   *mg      = (PC_MG *)pc->data;
1531   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1532   PetscInt i;
1533 
1534   PetscFunctionBegin;
1535   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->level_reduction_factors[i] = v[i];
1536   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->level_reduction_factors[i] = -1; /* 0 stop putting one process/device on first level */
1537   PetscFunctionReturn(PETSC_SUCCESS);
1538 }
1539 
1540 /*@
1541   PCGAMGSetThresholdScale - Relative threshold reduction at each level
1542 
1543   Not Collective
1544 
1545   Input Parameters:
1546 + pc - the preconditioner context
1547 - v  - the threshold value reduction, usually < 1.0
1548 
1549   Options Database Key:
1550 . -pc_gamg_threshold_scale <v> - set the relative threshold reduction on each level
1551 
1552   Level: advanced
1553 
1554   Note:
1555   The initial threshold (for an arbitrary number of levels starting from the finest) can be set with `PCGAMGSetThreshold()`.
1556   This scaling is used for each subsequent coarsening, but must be called before `PCGAMGSetThreshold()`.
1557 
1558 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetThreshold()`
1559 @*/
1560 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1561 {
1562   PetscFunctionBegin;
1563   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1564   PetscTryMethod(pc, "PCGAMGSetThresholdScale_C", (PC, PetscReal), (pc, v));
1565   PetscFunctionReturn(PETSC_SUCCESS);
1566 }
1567 
1568 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1569 {
1570   PC_MG   *mg      = (PC_MG *)pc->data;
1571   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1572 
1573   PetscFunctionBegin;
1574   pc_gamg->threshold_scale = v;
1575   PetscFunctionReturn(PETSC_SUCCESS);
1576 }
1577 
1578 /*@
1579   PCGAMGSetType - Set the type of algorithm `PCGAMG` should use
1580 
1581   Collective
1582 
1583   Input Parameters:
1584 + pc   - the preconditioner context
1585 - type - `PCGAMGAGG`, `PCGAMGGEO`, or `PCGAMGCLASSICAL`
1586 
1587   Options Database Key:
1588 . -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply - only agg is supported
1589 
1590   Level: intermediate
1591 
1592 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMGGetType()`, `PCGAMG`, `PCGAMGType`
1593 @*/
1594 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1595 {
1596   PetscFunctionBegin;
1597   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1598   PetscTryMethod(pc, "PCGAMGSetType_C", (PC, PCGAMGType), (pc, type));
1599   PetscFunctionReturn(PETSC_SUCCESS);
1600 }
1601 
1602 /*@
1603   PCGAMGGetType - Get the type of algorithm `PCGAMG` will use
1604 
1605   Collective
1606 
1607   Input Parameter:
1608 . pc - the preconditioner context
1609 
1610   Output Parameter:
1611 . type - the type of algorithm used
1612 
1613   Level: intermediate
1614 
1615 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCGAMG`, `PCGAMGSetType()`, `PCGAMGType`
1616 @*/
1617 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1618 {
1619   PetscFunctionBegin;
1620   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1621   PetscUseMethod(pc, "PCGAMGGetType_C", (PC, PCGAMGType *), (pc, type));
1622   PetscFunctionReturn(PETSC_SUCCESS);
1623 }
1624 
1625 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1626 {
1627   PC_MG   *mg      = (PC_MG *)pc->data;
1628   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1629 
1630   PetscFunctionBegin;
1631   *type = pc_gamg->type;
1632   PetscFunctionReturn(PETSC_SUCCESS);
1633 }
1634 
1635 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1636 {
1637   PC_MG   *mg      = (PC_MG *)pc->data;
1638   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1639   PetscErrorCode (*r)(PC);
1640 
1641   PetscFunctionBegin;
1642   pc_gamg->type = type;
1643   PetscCall(PetscFunctionListFind(GAMGList, type, &r));
1644   PetscCheck(r, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown GAMG type %s given", type);
1645   if (pc_gamg->ops->destroy) {
1646     PetscCall((*pc_gamg->ops->destroy)(pc));
1647     PetscCall(PetscMemzero(pc_gamg->ops, sizeof(struct _PCGAMGOps)));
1648     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1649     /* cleaning up common data in pc_gamg - this should disappear someday */
1650     pc_gamg->data_cell_cols      = 0;
1651     pc_gamg->data_cell_rows      = 0;
1652     pc_gamg->orig_data_cell_cols = 0;
1653     pc_gamg->orig_data_cell_rows = 0;
1654     PetscCall(PetscFree(pc_gamg->data));
1655     pc_gamg->data_sz = 0;
1656   }
1657   PetscCall(PetscFree(pc_gamg->gamg_type_name));
1658   PetscCall(PetscStrallocpy(type, &pc_gamg->gamg_type_name));
1659   PetscCall((*r)(pc));
1660   PetscFunctionReturn(PETSC_SUCCESS);
1661 }
1662 
1663 static PetscErrorCode PCView_GAMG(PC pc, PetscViewer viewer)
1664 {
1665   PC_MG    *mg      = (PC_MG *)pc->data;
1666   PC_GAMG  *pc_gamg = (PC_GAMG *)mg->innerctx;
1667   PetscReal gc = 0, oc = 0;
1668 
1669   PetscFunctionBegin;
1670   PetscCall(PetscViewerASCIIPrintf(viewer, "    GAMG specific options\n"));
1671   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold for dropping small values in graph on each level ="));
1672   for (PetscInt i = 0; i < mg->nlevels; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %g", (double)pc_gamg->threshold[i]));
1673   PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1674   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold scaling factor for each level not specified = %g\n", (double)pc_gamg->threshold_scale));
1675   if (pc_gamg->use_aggs_in_asm) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates from coarsening process to define subdomains for PCASM\n")); // this take presedence
1676   else if (pc_gamg->asm_hem_aggs) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates made with %d applications of heavy edge matching (HEM) to define subdomains for PCASM\n", (int)pc_gamg->asm_hem_aggs));
1677   if (pc_gamg->use_parallel_coarse_grid_solver) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n"));
1678   if (pc_gamg->injection_index_size) {
1679     PetscCall(PetscViewerASCIIPrintf(viewer, "      Using injection restriction/prolongation on first level, dofs:"));
1680     for (int i = 0; i < pc_gamg->injection_index_size; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %d", (int)pc_gamg->injection_index[i]));
1681     PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1682   }
1683   if (pc_gamg->ops->view) PetscCall((*pc_gamg->ops->view)(pc, viewer));
1684   PetscCall(PCMGGetGridComplexity(pc, &gc, &oc));
1685   PetscCall(PetscViewerASCIIPrintf(viewer, "      Complexity:    grid = %g    operator = %g\n", (double)gc, (double)oc));
1686   PetscFunctionReturn(PETSC_SUCCESS);
1687 }
1688 
1689 /*@
1690   PCGAMGSetInjectionIndex - Array of subset of variables per vertex to inject into coarse grid space
1691 
1692   Logically Collective
1693 
1694   Input Parameters:
1695 + pc  - the coarsen context
1696 . n   - number of indices
1697 - idx - array of indices
1698 
1699   Options Database Key:
1700 . -pc_gamg_injection_index - array of subset of variables per vertex to use for injection coarse grid space
1701 
1702   Level: intermediate
1703 
1704 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), `PCGAMG`
1705 @*/
1706 PetscErrorCode PCGAMGSetInjectionIndex(PC pc, PetscInt n, PetscInt idx[])
1707 {
1708   PetscFunctionBegin;
1709   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1710   PetscValidLogicalCollectiveInt(pc, n, 2);
1711   PetscTryMethod(pc, "PCGAMGSetInjectionIndex_C", (PC, PetscInt, PetscInt[]), (pc, n, idx));
1712   PetscFunctionReturn(PETSC_SUCCESS);
1713 }
1714 
1715 static PetscErrorCode PCGAMGSetInjectionIndex_GAMG(PC pc, PetscInt n, PetscInt idx[])
1716 {
1717   PC_MG   *mg      = (PC_MG *)pc->data;
1718   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1719 
1720   PetscFunctionBegin;
1721   pc_gamg->injection_index_size = n;
1722   PetscCheck(n < MAT_COARSEN_STRENGTH_INDEX_SIZE, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "array size %d larger than max %d", (int)n, MAT_COARSEN_STRENGTH_INDEX_SIZE);
1723   for (PetscInt i = 0; i < n; i++) pc_gamg->injection_index[i] = idx[i];
1724   PetscFunctionReturn(PETSC_SUCCESS);
1725 }
1726 
1727 static PetscErrorCode PCSetFromOptions_GAMG(PC pc, PetscOptionItems *PetscOptionsObject)
1728 {
1729   PC_MG             *mg      = (PC_MG *)pc->data;
1730   PC_GAMG           *pc_gamg = (PC_GAMG *)mg->innerctx;
1731   PetscBool          flag;
1732   MPI_Comm           comm;
1733   char               prefix[256], tname[32];
1734   PetscInt           i, n;
1735   const char        *pcpre;
1736   static const char *LayoutTypes[] = {"compact", "spread", "PCGAMGLayoutType", "PC_GAMG_LAYOUT", NULL};
1737 
1738   PetscFunctionBegin;
1739   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
1740   PetscOptionsHeadBegin(PetscOptionsObject, "GAMG options");
1741   PetscCall(PetscOptionsFList("-pc_gamg_type", "Type of AMG method (only 'agg' supported and useful)", "PCGAMGSetType", GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag));
1742   if (flag) PetscCall(PCGAMGSetType(pc, tname));
1743   PetscCall(PetscOptionsBool("-pc_gamg_repartition", "Repartion coarse grids", "PCGAMGSetRepartition", pc_gamg->repart, &pc_gamg->repart, NULL));
1744   PetscCall(PetscOptionsBool("-pc_gamg_use_sa_esteig", "Use eigen estimate from smoothed aggregation for smoother", "PCGAMGSetUseSAEstEig", pc_gamg->use_sa_esteig, &pc_gamg->use_sa_esteig, NULL));
1745   PetscCall(PetscOptionsBool("-pc_gamg_recompute_esteig", "Set flag to recompute eigen estimates for Chebyshev when matrix changes", "PCGAMGSetRecomputeEstEig", pc_gamg->recompute_esteig, &pc_gamg->recompute_esteig, NULL));
1746   PetscCall(PetscOptionsBool("-pc_gamg_reuse_interpolation", "Reuse prolongation operator", "PCGAMGReuseInterpolation", pc_gamg->reuse_prol, &pc_gamg->reuse_prol, NULL));
1747   PetscCall(PetscOptionsBool("-pc_gamg_asm_use_agg", "Use aggregation aggregates for ASM smoother", "PCGAMGASMSetUseAggs", pc_gamg->use_aggs_in_asm, &pc_gamg->use_aggs_in_asm, NULL));
1748   PetscCall(PetscOptionsBool("-pc_gamg_parallel_coarse_grid_solver", "Use parallel coarse grid solver (otherwise put last grid on one process)", "PCGAMGSetParallelCoarseGridSolve", pc_gamg->use_parallel_coarse_grid_solver, &pc_gamg->use_parallel_coarse_grid_solver, NULL));
1749   PetscCall(PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids", "Pin coarse grids to the CPU", "PCGAMGSetCpuPinCoarseGrids", pc_gamg->cpu_pin_coarse_grids, &pc_gamg->cpu_pin_coarse_grids, NULL));
1750   PetscCall(PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type", "compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth", "PCGAMGSetCoarseGridLayoutType", LayoutTypes,
1751                              (PetscEnum)pc_gamg->layout_type, (PetscEnum *)&pc_gamg->layout_type, NULL));
1752   PetscCall(PetscOptionsInt("-pc_gamg_process_eq_limit", "Limit (goal) on number of equations per process on coarse grids", "PCGAMGSetProcEqLim", pc_gamg->min_eq_proc, &pc_gamg->min_eq_proc, NULL));
1753   PetscCall(PetscOptionsInt("-pc_gamg_coarse_eq_limit", "Limit on number of equations for the coarse grid", "PCGAMGSetCoarseEqLim", pc_gamg->coarse_eq_limit, &pc_gamg->coarse_eq_limit, NULL));
1754   PetscCall(PetscOptionsInt("-pc_gamg_asm_hem_aggs", "Number of HEM matching passed in aggregates for ASM smoother", "PCGAMGASMSetHEM", pc_gamg->asm_hem_aggs, &pc_gamg->asm_hem_aggs, NULL));
1755   PetscCall(PetscOptionsReal("-pc_gamg_threshold_scale", "Scaling of threshold for each level not specified", "PCGAMGSetThresholdScale", pc_gamg->threshold_scale, &pc_gamg->threshold_scale, NULL));
1756   n = PETSC_MG_MAXLEVELS;
1757   PetscCall(PetscOptionsRealArray("-pc_gamg_threshold", "Relative threshold to use for dropping edges in aggregation graph", "PCGAMGSetThreshold", pc_gamg->threshold, &n, &flag));
1758   if (!flag || n < PETSC_MG_MAXLEVELS) {
1759     if (!flag) n = 1;
1760     i = n;
1761     do {
1762       pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1763     } while (++i < PETSC_MG_MAXLEVELS);
1764   }
1765   PetscCall(PetscOptionsInt("-pc_mg_levels", "Set number of MG levels (should get from base class)", "PCGAMGSetNlevels", pc_gamg->Nlevels, &pc_gamg->Nlevels, NULL));
1766   PetscCheck(pc_gamg->Nlevels <= PETSC_MG_MAXLEVELS, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_mg_levels (%d) >= PETSC_MG_MAXLEVELS (%d)", (int)pc_gamg->Nlevels, (int)PETSC_MG_MAXLEVELS);
1767   n = PETSC_MG_MAXLEVELS;
1768   PetscCall(PetscOptionsIntArray("-pc_gamg_rank_reduction_factors", "Manual schedule of coarse grid reduction factors that overrides internal heuristics (0 for first reduction puts one process/device)", "PCGAMGSetRankReductionFactors", pc_gamg->level_reduction_factors, &n, &flag));
1769   if (!flag) i = 0;
1770   else i = n;
1771   do {
1772     pc_gamg->level_reduction_factors[i] = -1;
1773   } while (++i < PETSC_MG_MAXLEVELS);
1774   {
1775     PetscReal eminmax[2] = {0., 0.};
1776     n                    = 2;
1777     PetscCall(PetscOptionsRealArray("-pc_gamg_eigenvalues", "extreme eigenvalues for smoothed aggregation", "PCGAMGSetEigenvalues", eminmax, &n, &flag));
1778     if (flag) {
1779       PetscCheck(n == 2, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_gamg_eigenvalues: must specify 2 parameters, min and max eigenvalues");
1780       PetscCall(PCGAMGSetEigenvalues(pc, eminmax[1], eminmax[0]));
1781     }
1782   }
1783   pc_gamg->injection_index_size = MAT_COARSEN_STRENGTH_INDEX_SIZE;
1784   PetscCall(PetscOptionsIntArray("-pc_gamg_injection_index", "Array of indices to use to use injection coarse grid space", "PCGAMGSetInjectionIndex", pc_gamg->injection_index, &pc_gamg->injection_index_size, NULL));
1785   /* set options for subtype */
1786   PetscCall((*pc_gamg->ops->setfromoptions)(pc, PetscOptionsObject));
1787 
1788   PetscCall(PCGetOptionsPrefix(pc, &pcpre));
1789   PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
1790   PetscOptionsHeadEnd();
1791   PetscFunctionReturn(PETSC_SUCCESS);
1792 }
1793 
1794 /*MC
1795   PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1796 
1797   Options Database Keys:
1798 + -pc_gamg_type <type,default=agg> - one of agg, geo, or classical (only smoothed aggregation, agg, supported)
1799 . -pc_gamg_repartition  <bool,default=false> - repartition the degrees of freedom across the coarse grids as they are determined
1800 . -pc_gamg_asm_use_agg <bool,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1801 . -pc_gamg_process_eq_limit <limit, default=50> - `PCGAMG` will reduce the number of MPI ranks used directly on the coarse grids so that there are around <limit>
1802                                         equations on each process that has degrees of freedom
1803 . -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1804 . -pc_gamg_reuse_interpolation <bool,default=true> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations (should always be true)
1805 . -pc_gamg_threshold[] <thresh,default=[-1,...]> - Before aggregating the graph `PCGAMG` will remove small values from the graph on each level (< 0 does no filtering)
1806 - -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1807 
1808   Options Database Keys for Aggregation:
1809 + -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation to construct prolongation
1810 . -pc_gamg_aggressive_coarsening <n,default=1> - number of aggressive coarsening (MIS-2) levels from finest.
1811 . -pc_gamg_aggressive_square_graph <bool,default=false> - Use square graph (A'A) or MIS-k (k=2) for aggressive coarsening
1812 . -pc_gamg_mis_k_minimum_degree_ordering <bool,default=true> - Use minimum degree ordering in greedy MIS algorithm
1813 . -pc_gamg_pc_gamg_asm_hem_aggs <n,default=0> - Number of HEM aggregation steps for ASM smoother
1814 - -pc_gamg_aggressive_mis_k <n,default=2> - Number (k) distance in MIS coarsening (>2 is 'aggressive')
1815 
1816   Options Database Keys for Multigrid:
1817 + -pc_mg_cycle_type <v> - v or w, see `PCMGSetCycleType()`
1818 . -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1819 . -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1820 - -pc_mg_levels <levels> - Number of levels of multigrid to use. GAMG has a heuristic so pc_mg_levels is not usually used with GAMG
1821 
1822   Level: intermediate
1823 
1824   Notes:
1825   To obtain good performance for `PCGAMG` for vector valued problems you must
1826   call `MatSetBlockSize()` to indicate the number of degrees of freedom per grid point
1827   call `MatSetNearNullSpace()` (or `PCSetCoordinates()` if solving the equations of elasticity) to indicate the near null space of the operator
1828 
1829   The many options for `PCMG` also work directly for `PCGAMG` such as controlling the smoothers on each level etc.
1830 
1831 .seealso: [the Users Manual section on PCGAMG](sec_amg), [the Users Manual section on PCMG](sec_mg), [](ch_ksp), `PCCreate()`, `PCSetType()`,
1832           `MatSetBlockSize()`,
1833           `PCMGType`, `PCSetCoordinates()`, `MatSetNearNullSpace()`, `PCGAMGSetType()`, `PCGAMGAGG`, `PCGAMGGEO`, `PCGAMGCLASSICAL`, `PCGAMGSetProcEqLim()`,
1834           `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRepartition()`, `PCGAMGRegister()`, `PCGAMGSetReuseInterpolation()`, `PCGAMGASMSetUseAggs()`,
1835           `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetNlevels()`, `PCGAMGSetThreshold()`, `PCGAMGGetType()`, `PCGAMGSetUseSAEstEig()`
1836 M*/
1837 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1838 {
1839   PC_GAMG *pc_gamg;
1840   PC_MG   *mg;
1841 
1842   PetscFunctionBegin;
1843   /* register AMG type */
1844   PetscCall(PCGAMGInitializePackage());
1845 
1846   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1847   PetscCall(PCSetType(pc, PCMG));
1848   PetscCall(PetscObjectChangeTypeName((PetscObject)pc, PCGAMG));
1849 
1850   /* create a supporting struct and attach it to pc */
1851   PetscCall(PetscNew(&pc_gamg));
1852   PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
1853   mg           = (PC_MG *)pc->data;
1854   mg->innerctx = pc_gamg;
1855 
1856   PetscCall(PetscNew(&pc_gamg->ops));
1857 
1858   /* these should be in subctx but repartitioning needs simple arrays */
1859   pc_gamg->data_sz = 0;
1860   pc_gamg->data    = NULL;
1861 
1862   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1863   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1864   pc->ops->setup          = PCSetUp_GAMG;
1865   pc->ops->reset          = PCReset_GAMG;
1866   pc->ops->destroy        = PCDestroy_GAMG;
1867   mg->view                = PCView_GAMG;
1868 
1869   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", PCGAMGSetProcEqLim_GAMG));
1870   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", PCGAMGSetCoarseEqLim_GAMG));
1871   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", PCGAMGSetRepartition_GAMG));
1872   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", PCGAMGSetEigenvalues_GAMG));
1873   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", PCGAMGSetUseSAEstEig_GAMG));
1874   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", PCGAMGSetRecomputeEstEig_GAMG));
1875   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", PCGAMGSetReuseInterpolation_GAMG));
1876   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", PCGAMGASMSetUseAggs_GAMG));
1877   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", PCGAMGSetParallelCoarseGridSolve_GAMG));
1878   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", PCGAMGSetCpuPinCoarseGrids_GAMG));
1879   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", PCGAMGSetCoarseGridLayoutType_GAMG));
1880   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", PCGAMGSetThreshold_GAMG));
1881   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", PCGAMGSetRankReductionFactors_GAMG));
1882   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", PCGAMGSetThresholdScale_GAMG));
1883   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", PCGAMGSetType_GAMG));
1884   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", PCGAMGGetType_GAMG));
1885   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", PCGAMGSetNlevels_GAMG));
1886   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", PCGAMGASMSetHEM_GAMG));
1887   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", PCGAMGSetInjectionIndex_GAMG));
1888   pc_gamg->repart                          = PETSC_FALSE;
1889   pc_gamg->reuse_prol                      = PETSC_TRUE;
1890   pc_gamg->use_aggs_in_asm                 = PETSC_FALSE;
1891   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1892   pc_gamg->cpu_pin_coarse_grids            = PETSC_FALSE;
1893   pc_gamg->layout_type                     = PCGAMG_LAYOUT_SPREAD;
1894   pc_gamg->min_eq_proc                     = 50;
1895   pc_gamg->asm_hem_aggs                    = 0;
1896   pc_gamg->coarse_eq_limit                 = 50;
1897   for (int i = 0; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = -1;
1898   pc_gamg->threshold_scale  = 1.;
1899   pc_gamg->Nlevels          = PETSC_MG_MAXLEVELS;
1900   pc_gamg->current_level    = 0; /* don't need to init really */
1901   pc_gamg->use_sa_esteig    = PETSC_TRUE;
1902   pc_gamg->recompute_esteig = PETSC_TRUE;
1903   pc_gamg->emin             = 0;
1904   pc_gamg->emax             = 0;
1905 
1906   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1907 
1908   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1909   PetscCall(PCGAMGSetType(pc, PCGAMGAGG));
1910   PetscFunctionReturn(PETSC_SUCCESS);
1911 }
1912 
1913 /*@C
1914   PCGAMGInitializePackage - This function initializes everything in the `PCGAMG` package. It is called
1915   from `PCInitializePackage()`.
1916 
1917   Level: developer
1918 
1919 .seealso: [](ch_ksp), `PetscInitialize()`
1920 @*/
1921 PetscErrorCode PCGAMGInitializePackage(void)
1922 {
1923   PetscInt l;
1924 
1925   PetscFunctionBegin;
1926   if (PCGAMGPackageInitialized) PetscFunctionReturn(PETSC_SUCCESS);
1927   PCGAMGPackageInitialized = PETSC_TRUE;
1928   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGGEO, PCCreateGAMG_GEO));
1929   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGAGG, PCCreateGAMG_AGG));
1930   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGCLASSICAL, PCCreateGAMG_Classical));
1931   PetscCall(PetscRegisterFinalize(PCGAMGFinalizePackage));
1932 
1933   /* general events */
1934   PetscCall(PetscLogEventRegister("PCSetUp_GAMG+", PC_CLASSID, &petsc_gamg_setup_events[GAMG_SETUP]));
1935   PetscCall(PetscLogEventRegister(" PCGAMGCreateG", PC_CLASSID, &petsc_gamg_setup_events[GAMG_GRAPH]));
1936   PetscCall(PetscLogEventRegister(" GAMG Coarsen", PC_CLASSID, &petsc_gamg_setup_events[GAMG_COARSEN]));
1937   PetscCall(PetscLogEventRegister("  GAMG MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[GAMG_MIS]));
1938   PetscCall(PetscLogEventRegister(" PCGAMGProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROL]));
1939   PetscCall(PetscLogEventRegister("  GAMG Prol-col", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLA]));
1940   PetscCall(PetscLogEventRegister("  GAMG Prol-lift", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLB]));
1941   PetscCall(PetscLogEventRegister(" PCGAMGOptProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPT]));
1942   PetscCall(PetscLogEventRegister("  GAMG smooth", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPTSM]));
1943   PetscCall(PetscLogEventRegister(" PCGAMGCreateL", PC_CLASSID, &petsc_gamg_setup_events[GAMG_LEVEL]));
1944   PetscCall(PetscLogEventRegister("  GAMG PtAP", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PTAP]));
1945   PetscCall(PetscLogEventRegister("  GAMG Reduce", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REDUCE]));
1946   PetscCall(PetscLogEventRegister("   GAMG Repart", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REPART]));
1947   /* PetscCall(PetscLogEventRegister("   GAMG Inv-Srt", PC_CLASSID, &petsc_gamg_setup_events[SET13])); */
1948   /* PetscCall(PetscLogEventRegister("   GAMG Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14])); */
1949   /* PetscCall(PetscLogEventRegister("   GAMG Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15])); */
1950   for (l = 0; l < PETSC_MG_MAXLEVELS; l++) {
1951     char ename[32];
1952 
1953     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Squ l%02" PetscInt_FMT, l));
1954     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][0]));
1955     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Gal l%02" PetscInt_FMT, l));
1956     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][1]));
1957     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Opt l%02" PetscInt_FMT, l));
1958     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][2]));
1959   }
1960 #if defined(GAMG_STAGES)
1961   { /* create timer stages */
1962     char str[32];
1963     PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", 0));
1964     PetscCall(PetscLogStageRegister(str, &gamg_stages[0]));
1965   }
1966 #endif
1967   PetscFunctionReturn(PETSC_SUCCESS);
1968 }
1969 
1970 /*@C
1971   PCGAMGFinalizePackage - This function frees everything from the `PCGAMG` package. It is
1972   called from `PetscFinalize()` automatically.
1973 
1974   Level: developer
1975 
1976 .seealso: [](ch_ksp), `PetscFinalize()`
1977 @*/
1978 PetscErrorCode PCGAMGFinalizePackage(void)
1979 {
1980   PetscFunctionBegin;
1981   PCGAMGPackageInitialized = PETSC_FALSE;
1982   PetscCall(PetscFunctionListDestroy(&GAMGList));
1983   PetscFunctionReturn(PETSC_SUCCESS);
1984 }
1985 
1986 /*@C
1987   PCGAMGRegister - Register a `PCGAMG` implementation.
1988 
1989   Input Parameters:
1990 + type   - string that will be used as the name of the `PCGAMG` type.
1991 - create - function for creating the gamg context.
1992 
1993   Level: developer
1994 
1995 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
1996 @*/
1997 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1998 {
1999   PetscFunctionBegin;
2000   PetscCall(PCGAMGInitializePackage());
2001   PetscCall(PetscFunctionListAdd(&GAMGList, type, create));
2002   PetscFunctionReturn(PETSC_SUCCESS);
2003 }
2004 
2005 /*@
2006   PCGAMGCreateGraph - Creates a graph that is used by the ``PCGAMGType`` in the coarsening process
2007 
2008   Input Parameters:
2009 + pc - the `PCGAMG`
2010 - A  - the matrix, for any level
2011 
2012   Output Parameter:
2013 . G - the graph
2014 
2015   Level: advanced
2016 
2017 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
2018 @*/
2019 PetscErrorCode PCGAMGCreateGraph(PC pc, Mat A, Mat *G)
2020 {
2021   PC_MG   *mg      = (PC_MG *)pc->data;
2022   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
2023 
2024   PetscFunctionBegin;
2025   PetscCall(pc_gamg->ops->creategraph(pc, A, G));
2026   PetscFunctionReturn(PETSC_SUCCESS);
2027 }
2028