xref: /petsc/src/ksp/pc/impls/gamg/gamg.c (revision ffeef943c8ee50edff320d8a3135bb0c94853e4c)
1 /*
2  GAMG geometric-algebric multigrid PC - Mark Adams 2011
3  */
4 #include <../src/ksp/pc/impls/gamg/gamg.h>            /*I "petscpc.h" I*/
5 #include <../src/ksp/ksp/impls/cheby/chebyshevimpl.h> /*I "petscksp.h" I*/
6 
7 #if defined(PETSC_HAVE_CUDA)
8   #include <cuda_runtime.h>
9 #endif
10 
11 #if defined(PETSC_HAVE_HIP)
12   #include <hip/hip_runtime.h>
13 #endif
14 
15 PetscLogEvent petsc_gamg_setup_events[GAMG_NUM_SET];
16 PetscLogEvent petsc_gamg_setup_matmat_events[PETSC_MG_MAXLEVELS][3];
17 
18 // #define GAMG_STAGES
19 #if defined(GAMG_STAGES)
20 static PetscLogStage gamg_stages[PETSC_MG_MAXLEVELS];
21 #endif
22 
23 static PetscFunctionList GAMGList = NULL;
24 static PetscBool         PCGAMGPackageInitialized;
25 
26 static PetscErrorCode PCReset_GAMG(PC pc)
27 {
28   PC_MG   *mg      = (PC_MG *)pc->data;
29   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
30 
31   PetscFunctionBegin;
32   PetscCall(PetscFree(pc_gamg->data));
33   pc_gamg->data_sz = 0;
34   PetscCall(PetscFree(pc_gamg->orig_data));
35   for (PetscInt level = 0; level < PETSC_MG_MAXLEVELS; level++) {
36     mg->min_eigen_DinvA[level] = 0;
37     mg->max_eigen_DinvA[level] = 0;
38   }
39   pc_gamg->emin = 0;
40   pc_gamg->emax = 0;
41   PetscCall(PCReset_MG(pc));
42   PetscFunctionReturn(PETSC_SUCCESS);
43 }
44 
45 /*
46    PCGAMGCreateLevel_GAMG: create coarse op with RAP.  repartition and/or reduce number
47      of active processors.
48 
49    Input Parameter:
50    . pc - parameters + side effect: coarse data in 'pc_gamg->data' and
51           'pc_gamg->data_sz' are changed via repartitioning/reduction.
52    . Amat_fine - matrix on this fine (k) level
53    . cr_bs - coarse block size
54    In/Output Parameter:
55    . a_P_inout - prolongation operator to the next level (k-->k-1)
56    . a_nactive_proc - number of active procs
57    Output Parameter:
58    . a_Amat_crs - coarse matrix that is created (k-1)
59 */
60 static PetscErrorCode PCGAMGCreateLevel_GAMG(PC pc, Mat Amat_fine, PetscInt cr_bs, Mat *a_P_inout, Mat *a_Amat_crs, PetscMPIInt *a_nactive_proc, IS *Pcolumnperm, PetscBool is_last)
61 {
62   PC_MG      *mg      = (PC_MG *)pc->data;
63   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
64   Mat         Cmat, Pold = *a_P_inout;
65   MPI_Comm    comm;
66   PetscMPIInt rank, size, new_size, nactive = *a_nactive_proc;
67   PetscInt    ncrs_eq, ncrs, f_bs;
68 
69   PetscFunctionBegin;
70   PetscCall(PetscObjectGetComm((PetscObject)Amat_fine, &comm));
71   PetscCallMPI(MPI_Comm_rank(comm, &rank));
72   PetscCallMPI(MPI_Comm_size(comm, &size));
73   PetscCall(MatGetBlockSize(Amat_fine, &f_bs));
74   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
75   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
76   PetscCall(MatPtAP(Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat));
77   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][1], 0, 0, 0, 0));
78   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_PTAP], 0, 0, 0, 0));
79 
80   if (Pcolumnperm) *Pcolumnperm = NULL;
81 
82   /* set 'ncrs' (nodes), 'ncrs_eq' (equations)*/
83   PetscCall(MatGetLocalSize(Cmat, &ncrs_eq, NULL));
84   if (pc_gamg->data_cell_rows > 0) {
85     ncrs = pc_gamg->data_sz / pc_gamg->data_cell_cols / pc_gamg->data_cell_rows;
86   } else {
87     PetscInt bs;
88     PetscCall(MatGetBlockSize(Cmat, &bs));
89     ncrs = ncrs_eq / bs;
90   }
91   /* get number of PEs to make active 'new_size', reduce, can be any integer 1-P */
92   if (pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0 && PetscDefined(HAVE_CUDA) && pc_gamg->current_level == 0) { /* 0 turns reducing to 1 process/device on; do for HIP, etc. */
93 #if defined(PETSC_HAVE_CUDA)
94     PetscShmComm pshmcomm;
95     PetscMPIInt  locrank;
96     MPI_Comm     loccomm;
97     PetscInt     s_nnodes, r_nnodes, new_new_size;
98     cudaError_t  cerr;
99     int          devCount;
100     PetscCall(PetscShmCommGet(comm, &pshmcomm));
101     PetscCall(PetscShmCommGetMpiShmComm(pshmcomm, &loccomm));
102     PetscCallMPI(MPI_Comm_rank(loccomm, &locrank));
103     s_nnodes = !locrank;
104     PetscCall(MPIU_Allreduce(&s_nnodes, &r_nnodes, 1, MPIU_INT, MPI_SUM, comm));
105     PetscCheck((size % r_nnodes) == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of nodes np=%d nnodes%" PetscInt_FMT, size, r_nnodes);
106     devCount = 0;
107     cerr     = cudaGetDeviceCount(&devCount);
108     cudaGetLastError();                         /* Reset the last error */
109     if (cerr == cudaSuccess && devCount >= 1) { /* There are devices, else go to heuristic */
110       new_new_size = r_nnodes * devCount;
111       new_size     = new_new_size;
112       PetscCall(PetscInfo(pc, "%s: Fine grid with Cuda. %" PetscInt_FMT " nodes. Change new active set size %d --> %d (devCount=%d #nodes=%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, r_nnodes, nactive, new_size, devCount, r_nnodes));
113     } else {
114       PetscCall(PetscInfo(pc, "%s: With Cuda but no device. Use heuristics.\n", ((PetscObject)pc)->prefix));
115       goto HEURISTIC;
116     }
117 #else
118     SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "should not be here");
119 #endif
120   } else if (pc_gamg->level_reduction_factors[pc_gamg->current_level] > 0) {
121     if (nactive < pc_gamg->level_reduction_factors[pc_gamg->current_level]) {
122       new_size = 1;
123       PetscCall(PetscInfo(pc, "%s: reduction factor too small for %d active processes: reduce to one process\n", ((PetscObject)pc)->prefix, new_size));
124     } else {
125       PetscCheck(nactive % pc_gamg->level_reduction_factors[pc_gamg->current_level] == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "odd number of active process %d wrt reduction factor %" PetscInt_FMT, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]);
126       new_size = nactive / pc_gamg->level_reduction_factors[pc_gamg->current_level];
127       PetscCall(PetscInfo(pc, "%s: Manually setting reduction to %d active processes (%d/%" PetscInt_FMT ")\n", ((PetscObject)pc)->prefix, new_size, nactive, pc_gamg->level_reduction_factors[pc_gamg->current_level]));
128     }
129   } else if (is_last && !pc_gamg->use_parallel_coarse_grid_solver) {
130     new_size = 1;
131     PetscCall(PetscInfo(pc, "%s: Force coarsest grid reduction to %d active processes\n", ((PetscObject)pc)->prefix, new_size));
132   } else {
133     PetscInt ncrs_eq_glob;
134 #if defined(PETSC_HAVE_CUDA)
135   HEURISTIC:
136 #endif
137     PetscCall(MatGetSize(Cmat, &ncrs_eq_glob, NULL));
138     new_size = (PetscMPIInt)((float)ncrs_eq_glob / (float)pc_gamg->min_eq_proc + 0.5); /* hardwire min. number of eq/proc */
139     if (!new_size) new_size = 1;                                                       /* not likely, possible? */
140     else if (new_size >= nactive) new_size = nactive;                                  /* no change, rare */
141     PetscCall(PetscInfo(pc, "%s: Coarse grid reduction from %d to %d active processes\n", ((PetscObject)pc)->prefix, nactive, new_size));
142   }
143   if (new_size == nactive) {
144     *a_Amat_crs = Cmat; /* output - no repartitioning or reduction - could bail here */
145     if (new_size < size) {
146       /* odd case where multiple coarse grids are on one processor or no coarsening ... */
147       PetscCall(PetscInfo(pc, "%s: reduced grid using same number of processors (%d) as last grid (use larger coarse grid)\n", ((PetscObject)pc)->prefix, nactive));
148       if (pc_gamg->cpu_pin_coarse_grids) {
149         PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
150         PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
151       }
152     }
153     /* we know that the grid structure can be reused in MatPtAP */
154   } else { /* reduce active processors - we know that the grid structure can NOT be reused in MatPtAP */
155     PetscInt *counts, *newproc_idx, ii, jj, kk, strideNew, *tidx, ncrs_new, ncrs_eq_new, nloc_old, expand_factor = 1, rfactor = 1;
156     IS        is_eq_newproc, is_eq_num, is_eq_num_prim, new_eq_indices;
157     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
158     nloc_old = ncrs_eq / cr_bs;
159     PetscCheck(ncrs_eq % cr_bs == 0, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ncrs_eq %" PetscInt_FMT " not divisible by cr_bs %" PetscInt_FMT, ncrs_eq, cr_bs);
160     /* get new_size and rfactor */
161     if (pc_gamg->layout_type == PCGAMG_LAYOUT_SPREAD || !pc_gamg->repart) {
162       /* find factor */
163       if (new_size == 1) rfactor = size; /* don't modify */
164       else {
165         PetscReal best_fact = 0.;
166         jj                  = -1;
167         for (kk = 1; kk <= size; kk++) {
168           if (!(size % kk)) { /* a candidate */
169             PetscReal nactpe = (PetscReal)size / (PetscReal)kk, fact = nactpe / (PetscReal)new_size;
170             if (fact > 1.0) fact = 1. / fact; /* keep fact < 1 */
171             if (fact > best_fact) {
172               best_fact = fact;
173               jj        = kk;
174             }
175           }
176         }
177         if (jj != -1) rfactor = jj;
178         else rfactor = 1; /* a prime */
179         if (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) expand_factor = 1;
180         else expand_factor = rfactor;
181       }
182       new_size = size / rfactor; /* make new size one that is factor */
183       if (new_size == nactive) { /* no repartitioning or reduction, bail out because nested here (rare) */
184         *a_Amat_crs = Cmat;
185         PetscCall(PetscInfo(pc, "%s: Finding factorable processor set stopped reduction: new_size=%d, neq(loc)=%" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, new_size, ncrs_eq));
186         PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
187         PetscFunctionReturn(PETSC_SUCCESS);
188       }
189     }
190     /* make 'is_eq_newproc' */
191     PetscCall(PetscMalloc1(size, &counts));
192     if (pc_gamg->repart) { /* Repartition Cmat_{k} and move columns of P^{k}_{k-1} and coordinates of primal part accordingly */
193       Mat adj;
194       PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
195       PetscCall(PetscInfo(pc, "%s: Repartition: size (active): %d --> %d, %" PetscInt_FMT " local equations, using %s process layout\n", ((PetscObject)pc)->prefix, *a_nactive_proc, new_size, ncrs_eq, (pc_gamg->layout_type == PCGAMG_LAYOUT_COMPACT) ? "compact" : "spread"));
196       /* get 'adj' */
197       if (cr_bs == 1) {
198         PetscCall(MatConvert(Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
199       } else {
200         /* make a scalar matrix to partition (no Stokes here) */
201         Mat                tMat;
202         PetscInt           Istart_crs, Iend_crs, ncols, jj, Ii;
203         const PetscScalar *vals;
204         const PetscInt    *idx;
205         PetscInt          *d_nnz, *o_nnz, M, N;
206         static PetscInt    llev = 0; /* ugly but just used for debugging */
207         MatType            mtype;
208 
209         PetscCall(PetscMalloc2(ncrs, &d_nnz, ncrs, &o_nnz));
210         PetscCall(MatGetOwnershipRange(Cmat, &Istart_crs, &Iend_crs));
211         PetscCall(MatGetSize(Cmat, &M, &N));
212         for (Ii = Istart_crs, jj = 0; Ii < Iend_crs; Ii += cr_bs, jj++) {
213           PetscCall(MatGetRow(Cmat, Ii, &ncols, NULL, NULL));
214           d_nnz[jj] = ncols / cr_bs;
215           o_nnz[jj] = ncols / cr_bs;
216           PetscCall(MatRestoreRow(Cmat, Ii, &ncols, NULL, NULL));
217           if (d_nnz[jj] > ncrs) d_nnz[jj] = ncrs;
218           if (o_nnz[jj] > (M / cr_bs - ncrs)) o_nnz[jj] = M / cr_bs - ncrs;
219         }
220 
221         PetscCall(MatGetType(Amat_fine, &mtype));
222         PetscCall(MatCreate(comm, &tMat));
223         PetscCall(MatSetSizes(tMat, ncrs, ncrs, PETSC_DETERMINE, PETSC_DETERMINE));
224         PetscCall(MatSetType(tMat, mtype));
225         PetscCall(MatSeqAIJSetPreallocation(tMat, 0, d_nnz));
226         PetscCall(MatMPIAIJSetPreallocation(tMat, 0, d_nnz, 0, o_nnz));
227         PetscCall(PetscFree2(d_nnz, o_nnz));
228 
229         for (ii = Istart_crs; ii < Iend_crs; ii++) {
230           PetscInt dest_row = ii / cr_bs;
231           PetscCall(MatGetRow(Cmat, ii, &ncols, &idx, &vals));
232           for (jj = 0; jj < ncols; jj++) {
233             PetscInt    dest_col = idx[jj] / cr_bs;
234             PetscScalar v        = 1.0;
235             PetscCall(MatSetValues(tMat, 1, &dest_row, 1, &dest_col, &v, ADD_VALUES));
236           }
237           PetscCall(MatRestoreRow(Cmat, ii, &ncols, &idx, &vals));
238         }
239         PetscCall(MatAssemblyBegin(tMat, MAT_FINAL_ASSEMBLY));
240         PetscCall(MatAssemblyEnd(tMat, MAT_FINAL_ASSEMBLY));
241 
242         if (llev++ == -1) {
243           PetscViewer viewer;
244           char        fname[32];
245           PetscCall(PetscSNPrintf(fname, sizeof(fname), "part_mat_%" PetscInt_FMT ".mat", llev));
246           PetscCall(PetscViewerBinaryOpen(comm, fname, FILE_MODE_WRITE, &viewer));
247           PetscCall(MatView(tMat, viewer));
248           PetscCall(PetscViewerDestroy(&viewer));
249         }
250         PetscCall(MatConvert(tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj));
251         PetscCall(MatDestroy(&tMat));
252       } /* create 'adj' */
253 
254       { /* partition: get newproc_idx */
255         char            prefix[256];
256         const char     *pcpre;
257         const PetscInt *is_idx;
258         MatPartitioning mpart;
259         IS              proc_is;
260 
261         PetscCall(MatPartitioningCreate(comm, &mpart));
262         PetscCall(MatPartitioningSetAdjacency(mpart, adj));
263         PetscCall(PCGetOptionsPrefix(pc, &pcpre));
264         PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
265         PetscCall(PetscObjectSetOptionsPrefix((PetscObject)mpart, prefix));
266         PetscCall(MatPartitioningSetFromOptions(mpart));
267         PetscCall(MatPartitioningSetNParts(mpart, new_size));
268         PetscCall(MatPartitioningApply(mpart, &proc_is));
269         PetscCall(MatPartitioningDestroy(&mpart));
270 
271         /* collect IS info */
272         PetscCall(PetscMalloc1(ncrs_eq, &newproc_idx));
273         PetscCall(ISGetIndices(proc_is, &is_idx));
274         for (kk = jj = 0; kk < nloc_old; kk++) {
275           for (ii = 0; ii < cr_bs; ii++, jj++) { newproc_idx[jj] = is_idx[kk] * expand_factor; /* distribution */ }
276         }
277         PetscCall(ISRestoreIndices(proc_is, &is_idx));
278         PetscCall(ISDestroy(&proc_is));
279       }
280       PetscCall(MatDestroy(&adj));
281 
282       PetscCall(ISCreateGeneral(comm, ncrs_eq, newproc_idx, PETSC_COPY_VALUES, &is_eq_newproc));
283       PetscCall(PetscFree(newproc_idx));
284       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REPART], 0, 0, 0, 0));
285     } else { /* simple aggregation of parts -- 'is_eq_newproc' */
286       PetscInt targetPE;
287       PetscCheck(new_size != nactive, PETSC_COMM_SELF, PETSC_ERR_PLIB, "new_size==nactive. Should not happen");
288       PetscCall(PetscInfo(pc, "%s: Number of equations (loc) %" PetscInt_FMT " with simple aggregation\n", ((PetscObject)pc)->prefix, ncrs_eq));
289       targetPE = (rank / rfactor) * expand_factor;
290       PetscCall(ISCreateStride(comm, ncrs_eq, targetPE, 0, &is_eq_newproc));
291     } /* end simple 'is_eq_newproc' */
292 
293     /*
294       Create an index set from the is_eq_newproc index set to indicate the mapping TO
295     */
296     PetscCall(ISPartitioningToNumbering(is_eq_newproc, &is_eq_num));
297     is_eq_num_prim = is_eq_num;
298     /*
299       Determine how many equations/vertices are assigned to each processor
300     */
301     PetscCall(ISPartitioningCount(is_eq_newproc, size, counts));
302     ncrs_eq_new = counts[rank];
303     PetscCall(ISDestroy(&is_eq_newproc));
304     ncrs_new = ncrs_eq_new / cr_bs;
305 
306     PetscCall(PetscFree(counts));
307     /* data movement scope -- this could be moved to subclasses so that we don't try to cram all auxiliary data into some complex abstracted thing */
308     {
309       Vec             src_crd, dest_crd;
310       const PetscInt *idx, ndata_rows = pc_gamg->data_cell_rows, ndata_cols = pc_gamg->data_cell_cols, node_data_sz = ndata_rows * ndata_cols;
311       VecScatter      vecscat;
312       PetscScalar    *array;
313       IS              isscat;
314       /* move data (for primal equations only) */
315       /* Create a vector to contain the newly ordered element information */
316       PetscCall(VecCreate(comm, &dest_crd));
317       PetscCall(VecSetSizes(dest_crd, node_data_sz * ncrs_new, PETSC_DECIDE));
318       PetscCall(VecSetType(dest_crd, VECSTANDARD)); /* this is needed! */
319       /*
320         There are 'ndata_rows*ndata_cols' data items per node, (one can think of the vectors of having
321         a block size of ...).  Note, ISs are expanded into equation space by 'cr_bs'.
322       */
323       PetscCall(PetscMalloc1(ncrs * node_data_sz, &tidx));
324       PetscCall(ISGetIndices(is_eq_num_prim, &idx));
325       for (ii = 0, jj = 0; ii < ncrs; ii++) {
326         PetscInt id = idx[ii * cr_bs] / cr_bs; /* get node back */
327         for (kk = 0; kk < node_data_sz; kk++, jj++) tidx[jj] = id * node_data_sz + kk;
328       }
329       PetscCall(ISRestoreIndices(is_eq_num_prim, &idx));
330       PetscCall(ISCreateGeneral(comm, node_data_sz * ncrs, tidx, PETSC_COPY_VALUES, &isscat));
331       PetscCall(PetscFree(tidx));
332       /*
333         Create a vector to contain the original vertex information for each element
334       */
335       PetscCall(VecCreateSeq(PETSC_COMM_SELF, node_data_sz * ncrs, &src_crd));
336       for (jj = 0; jj < ndata_cols; jj++) {
337         const PetscInt stride0 = ncrs * pc_gamg->data_cell_rows;
338         for (ii = 0; ii < ncrs; ii++) {
339           for (kk = 0; kk < ndata_rows; kk++) {
340             PetscInt    ix = ii * ndata_rows + kk + jj * stride0, jx = ii * node_data_sz + kk * ndata_cols + jj;
341             PetscScalar tt = (PetscScalar)pc_gamg->data[ix];
342             PetscCall(VecSetValues(src_crd, 1, &jx, &tt, INSERT_VALUES));
343           }
344         }
345       }
346       PetscCall(VecAssemblyBegin(src_crd));
347       PetscCall(VecAssemblyEnd(src_crd));
348       /*
349         Scatter the element vertex information (still in the original vertex ordering)
350         to the correct processor
351       */
352       PetscCall(VecScatterCreate(src_crd, NULL, dest_crd, isscat, &vecscat));
353       PetscCall(ISDestroy(&isscat));
354       PetscCall(VecScatterBegin(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
355       PetscCall(VecScatterEnd(vecscat, src_crd, dest_crd, INSERT_VALUES, SCATTER_FORWARD));
356       PetscCall(VecScatterDestroy(&vecscat));
357       PetscCall(VecDestroy(&src_crd));
358       /*
359         Put the element vertex data into a new allocation of the gdata->ele
360       */
361       PetscCall(PetscFree(pc_gamg->data));
362       PetscCall(PetscMalloc1(node_data_sz * ncrs_new, &pc_gamg->data));
363 
364       pc_gamg->data_sz = node_data_sz * ncrs_new;
365       strideNew        = ncrs_new * ndata_rows;
366 
367       PetscCall(VecGetArray(dest_crd, &array));
368       for (jj = 0; jj < ndata_cols; jj++) {
369         for (ii = 0; ii < ncrs_new; ii++) {
370           for (kk = 0; kk < ndata_rows; kk++) {
371             PetscInt ix = ii * ndata_rows + kk + jj * strideNew, jx = ii * node_data_sz + kk * ndata_cols + jj;
372             pc_gamg->data[ix] = PetscRealPart(array[jx]);
373           }
374         }
375       }
376       PetscCall(VecRestoreArray(dest_crd, &array));
377       PetscCall(VecDestroy(&dest_crd));
378     }
379     /* move A and P (columns) with new layout */
380     /*
381       Invert for MatCreateSubMatrix
382     */
383     PetscCall(ISInvertPermutation(is_eq_num, ncrs_eq_new, &new_eq_indices));
384     PetscCall(ISSort(new_eq_indices)); /* is this needed? */
385     PetscCall(ISSetBlockSize(new_eq_indices, cr_bs));
386     if (is_eq_num != is_eq_num_prim) { PetscCall(ISDestroy(&is_eq_num_prim)); /* could be same as 'is_eq_num' */ }
387     if (Pcolumnperm) {
388       PetscCall(PetscObjectReference((PetscObject)new_eq_indices));
389       *Pcolumnperm = new_eq_indices;
390     }
391     PetscCall(ISDestroy(&is_eq_num));
392 
393     /* 'a_Amat_crs' output */
394     {
395       Mat       mat;
396       PetscBool isset, isspd, isher;
397 #if !defined(PETSC_USE_COMPLEX)
398       PetscBool issym;
399 #endif
400 
401       PetscCall(MatCreateSubMatrix(Cmat, new_eq_indices, new_eq_indices, MAT_INITIAL_MATRIX, &mat));
402       PetscCall(MatIsSPDKnown(Cmat, &isset, &isspd)); // like MatPropagateSymmetryOptions, but should set MAT_STRUCTURALLY_SYMMETRIC ?
403       if (isset) PetscCall(MatSetOption(mat, MAT_SPD, isspd));
404       else {
405         PetscCall(MatIsHermitianKnown(Cmat, &isset, &isher));
406         if (isset) PetscCall(MatSetOption(mat, MAT_HERMITIAN, isher));
407         else {
408 #if !defined(PETSC_USE_COMPLEX)
409           PetscCall(MatIsSymmetricKnown(Cmat, &isset, &issym));
410           if (isset) PetscCall(MatSetOption(mat, MAT_SYMMETRIC, issym));
411 #endif
412         }
413       }
414       *a_Amat_crs = mat;
415     }
416     PetscCall(MatDestroy(&Cmat));
417 
418     /* prolongator */
419     {
420       IS       findices;
421       PetscInt Istart, Iend;
422       Mat      Pnew;
423 
424       PetscCall(MatGetOwnershipRange(Pold, &Istart, &Iend));
425       PetscCall(ISCreateStride(comm, Iend - Istart, Istart, 1, &findices));
426       PetscCall(ISSetBlockSize(findices, f_bs));
427       PetscCall(MatCreateSubMatrix(Pold, findices, new_eq_indices, MAT_INITIAL_MATRIX, &Pnew));
428       PetscCall(ISDestroy(&findices));
429       PetscCall(MatSetOption(Pnew, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
430 
431       PetscCall(MatDestroy(a_P_inout));
432 
433       /* output - repartitioned */
434       *a_P_inout = Pnew;
435     }
436     PetscCall(ISDestroy(&new_eq_indices));
437 
438     *a_nactive_proc = new_size; /* output */
439 
440     /* pinning on reduced grids, not a bad heuristic and optimization gets folded into process reduction optimization */
441     if (pc_gamg->cpu_pin_coarse_grids) {
442 #if defined(PETSC_HAVE_VIENNACL) || defined(PETSC_HAVE_CUDA)
443       static PetscInt llev = 2;
444       PetscCall(PetscInfo(pc, "%s: Pinning level %" PetscInt_FMT " to the CPU\n", ((PetscObject)pc)->prefix, llev++));
445 #endif
446       PetscCall(MatBindToCPU(*a_Amat_crs, PETSC_TRUE));
447       PetscCall(MatBindToCPU(*a_P_inout, PETSC_TRUE));
448       if (1) { /* HACK: move this to MatBindCPU_MPIAIJXXX; lvec is created, need to pin it, this is done in MatSetUpMultiply_MPIAIJ. Hack */
449         Mat         A = *a_Amat_crs, P = *a_P_inout;
450         PetscMPIInt size;
451         PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)A), &size));
452         if (size > 1) {
453           Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data, *p = (Mat_MPIAIJ *)P->data;
454           PetscCall(VecBindToCPU(a->lvec, PETSC_TRUE));
455           PetscCall(VecBindToCPU(p->lvec, PETSC_TRUE));
456         }
457       }
458     }
459     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_REDUCE], 0, 0, 0, 0));
460   } // processor reduce
461   PetscFunctionReturn(PETSC_SUCCESS);
462 }
463 
464 // used in GEO
465 PetscErrorCode PCGAMGSquareGraph_GAMG(PC a_pc, Mat Gmat1, Mat *Gmat2)
466 {
467   const char *prefix;
468   char        addp[32];
469   PC_MG      *mg      = (PC_MG *)a_pc->data;
470   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
471 
472   PetscFunctionBegin;
473   PetscCall(PCGetOptionsPrefix(a_pc, &prefix));
474   PetscCall(PetscInfo(a_pc, "%s: Square Graph on level %" PetscInt_FMT "\n", ((PetscObject)a_pc)->prefix, pc_gamg->current_level + 1));
475   PetscCall(MatProductCreate(Gmat1, Gmat1, NULL, Gmat2));
476   PetscCall(MatSetOptionsPrefix(*Gmat2, prefix));
477   PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_square_%" PetscInt_FMT "_", pc_gamg->current_level));
478   PetscCall(MatAppendOptionsPrefix(*Gmat2, addp));
479   if ((*Gmat2)->structurally_symmetric == PETSC_BOOL3_TRUE) {
480     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AB));
481   } else {
482     PetscCall(MatSetOption(Gmat1, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
483     PetscCall(MatProductSetType(*Gmat2, MATPRODUCT_AtB));
484   }
485   PetscCall(MatProductSetFromOptions(*Gmat2));
486   PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
487   PetscCall(MatProductSymbolic(*Gmat2));
488   PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[pc_gamg->current_level][0], 0, 0, 0, 0));
489   PetscCall(MatProductClear(*Gmat2));
490   /* we only need the sparsity, cheat and tell PETSc the matrix has been assembled */
491   (*Gmat2)->assembled = PETSC_TRUE;
492   PetscFunctionReturn(PETSC_SUCCESS);
493 }
494 
495 /*
496    PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner
497                     by setting data structures and options.
498 
499    Input Parameter:
500 .  pc - the preconditioner context
501 
502 */
503 static PetscErrorCode PCSetUp_GAMG(PC pc)
504 {
505   PC_MG      *mg      = (PC_MG *)pc->data;
506   PC_GAMG    *pc_gamg = (PC_GAMG *)mg->innerctx;
507   Mat         Pmat    = pc->pmat;
508   PetscInt    fine_level, level, level1, bs, M, N, qq, lidx, nASMBlocksArr[PETSC_MG_MAXLEVELS], cr_bs;
509   MPI_Comm    comm;
510   PetscMPIInt rank, size, nactivepe;
511   Mat         Aarr[PETSC_MG_MAXLEVELS], Parr[PETSC_MG_MAXLEVELS];
512   IS         *ASMLocalIDsArr[PETSC_MG_MAXLEVELS];
513   PetscBool   is_last = PETSC_FALSE;
514 #if defined(PETSC_USE_INFO)
515   PetscLogDouble nnz0 = 0., nnztot = 0.;
516   MatInfo        info;
517 #endif
518 
519   PetscFunctionBegin;
520   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
521   PetscCallMPI(MPI_Comm_rank(comm, &rank));
522   PetscCallMPI(MPI_Comm_size(comm, &size));
523   PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
524   if (pc->setupcalled) {
525     if (!pc_gamg->reuse_prol || pc->flag == DIFFERENT_NONZERO_PATTERN) {
526       /* reset everything */
527       PetscCall(PCReset_MG(pc));
528       pc->setupcalled = 0;
529     } else {
530       PC_MG_Levels **mglevels = mg->levels;
531       /* just do Galerkin grids */
532       Mat B, dA, dB;
533       if (pc_gamg->Nlevels > 1) {
534         PetscInt gl;
535         /* currently only handle case where mat and pmat are the same on coarser levels */
536         PetscCall(KSPGetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, &dA, &dB));
537         /* (re)set to get dirty flag */
538         PetscCall(KSPSetOperators(mglevels[pc_gamg->Nlevels - 1]->smoothd, dA, dB));
539 
540         for (level = pc_gamg->Nlevels - 2, gl = 0; level >= 0; level--, gl++) {
541           MatReuse reuse = MAT_INITIAL_MATRIX;
542 #if defined(GAMG_STAGES)
543           PetscCall(PetscLogStagePush(gamg_stages[gl]));
544 #endif
545           /* matrix structure can change from repartitioning or process reduction but don't know if we have process reduction here. Should fix */
546           PetscCall(KSPGetOperators(mglevels[level]->smoothd, NULL, &B));
547           if (B->product) {
548             if (B->product->A == dB && B->product->B == mglevels[level + 1]->interpolate) reuse = MAT_REUSE_MATRIX;
549           }
550           if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDestroy(&mglevels[level]->A));
551           if (reuse == MAT_REUSE_MATRIX) {
552             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, reuse matrix level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
553           } else {
554             PetscCall(PetscInfo(pc, "%s: RAP after initial setup, with repartitioning (new matrix) level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
555           }
556           PetscCall(PetscLogEventBegin(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
557           PetscCall(MatPtAP(dB, mglevels[level + 1]->interpolate, reuse, PETSC_DEFAULT, &B));
558           PetscCall(PetscLogEventEnd(petsc_gamg_setup_matmat_events[gl][1], 0, 0, 0, 0));
559           if (reuse == MAT_INITIAL_MATRIX) mglevels[level]->A = B;
560           PetscCall(KSPSetOperators(mglevels[level]->smoothd, B, B));
561           // check for redoing eigen estimates
562           if (pc_gamg->recompute_esteig) {
563             PetscBool ischeb;
564             KSP       smoother;
565             PetscCall(PCMGGetSmoother(pc, level + 1, &smoother));
566             PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
567             if (ischeb) {
568               KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
569               cheb->emin_provided = 0;
570               cheb->emax_provided = 0;
571             }
572             /* we could call PetscCall(KSPChebyshevSetEigenvalues(smoother, 0, 0)); but the logic does not work properly */
573           }
574           // inc
575           dB = B;
576 #if defined(GAMG_STAGES)
577           PetscCall(PetscLogStagePop());
578 #endif
579         }
580       }
581 
582       PetscCall(PCSetUp_MG(pc));
583       PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
584       PetscFunctionReturn(PETSC_SUCCESS);
585     }
586   }
587 
588   if (!pc_gamg->data) {
589     if (pc_gamg->orig_data) {
590       PetscCall(MatGetBlockSize(Pmat, &bs));
591       PetscCall(MatGetLocalSize(Pmat, &qq, NULL));
592 
593       pc_gamg->data_sz        = (qq / bs) * pc_gamg->orig_data_cell_rows * pc_gamg->orig_data_cell_cols;
594       pc_gamg->data_cell_rows = pc_gamg->orig_data_cell_rows;
595       pc_gamg->data_cell_cols = pc_gamg->orig_data_cell_cols;
596 
597       PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
598       for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->data[qq] = pc_gamg->orig_data[qq];
599     } else {
600       PetscCheck(pc_gamg->ops->createdefaultdata, comm, PETSC_ERR_PLIB, "'createdefaultdata' not set(?) need to support NULL data");
601       PetscCall(pc_gamg->ops->createdefaultdata(pc, Pmat));
602     }
603   }
604 
605   /* cache original data for reuse */
606   if (!pc_gamg->orig_data && (PetscBool)(!pc_gamg->reuse_prol)) {
607     PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->orig_data));
608     for (qq = 0; qq < pc_gamg->data_sz; qq++) pc_gamg->orig_data[qq] = pc_gamg->data[qq];
609     pc_gamg->orig_data_cell_rows = pc_gamg->data_cell_rows;
610     pc_gamg->orig_data_cell_cols = pc_gamg->data_cell_cols;
611   }
612 
613   /* get basic dims */
614   PetscCall(MatGetBlockSize(Pmat, &bs));
615   PetscCall(MatGetSize(Pmat, &M, NULL));
616 
617 #if defined(PETSC_USE_INFO)
618   PetscCall(MatGetInfo(Pmat, MAT_GLOBAL_SUM, &info)); /* global reduction */
619   nnz0   = info.nz_used;
620   nnztot = info.nz_used;
621 #endif
622   PetscCall(PetscInfo(pc, "%s: level %d) N=%" PetscInt_FMT ", n data rows=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", block size %d, np=%d\n", ((PetscObject)pc)->prefix, 0, M, pc_gamg->data_cell_rows, pc_gamg->data_cell_cols, (PetscInt)(nnz0 / (PetscReal)M + 0.5), (int)bs, size));
623 
624   /* Get A_i and R_i */
625   for (level = 0, Aarr[0] = Pmat, nactivepe = size; level < (pc_gamg->Nlevels - 1) && (level == 0 || M > pc_gamg->coarse_eq_limit); level++) {
626     pc_gamg->current_level = level;
627     level1                 = level + 1;
628 #if defined(GAMG_STAGES)
629     if (!gamg_stages[level]) {
630       char str[32];
631       PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", (int)level));
632       PetscCall(PetscLogStageRegister(str, &gamg_stages[level]));
633     }
634     PetscCall(PetscLogStagePush(gamg_stages[level]));
635 #endif
636     /* construct prolongator - Parr[level1] */
637     if (level == 0 && pc_gamg->injection_index_size > 0) {
638       Mat      Prol;
639       MatType  mtype;
640       PetscInt prol_m, prol_n, Prol_N = (M / bs) * pc_gamg->injection_index_size, Istart, Iend, nn, row;
641       PetscCall(PetscInfo(pc, "Create fine grid injection space prolongation %" PetscInt_FMT " x %" PetscInt_FMT ". %s\n", M, Prol_N, pc_gamg->data ? "delete null space data" : ""));
642       PetscCall(MatGetOwnershipRange(Pmat, &Istart, &Iend));
643       PetscCall(MatGetLocalSize(Pmat, &prol_m, NULL)); // rows m x n
644       prol_n = (prol_m / bs) * pc_gamg->injection_index_size;
645       PetscCheck(pc_gamg->injection_index_size < bs, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Injection size %d must be less that block size %d", (int)pc_gamg->injection_index_size, (int)bs);
646       PetscCall(MatGetType(Pmat, &mtype));
647       PetscCall(MatCreate(PetscObjectComm((PetscObject)pc), &Prol));
648       PetscCall(MatSetBlockSizes(Prol, bs, pc_gamg->injection_index_size));
649       PetscCall(MatSetSizes(Prol, prol_m, prol_n, M, Prol_N));
650       PetscCall(MatSetType(Prol, mtype));
651 #if PetscDefined(HAVE_DEVICE)
652       PetscBool flg;
653       PetscCall(MatBoundToCPU(Pmat, &flg));
654       PetscCall(MatBindToCPU(Prol, flg));
655       if (flg) PetscCall(MatSetBindingPropagates(Prol, PETSC_TRUE));
656 #endif
657       PetscCall(MatSeqAIJSetPreallocation(Prol, 1, NULL));
658       PetscCall(MatMPIAIJSetPreallocation(Prol, 1, NULL, 0, NULL));
659       // set I \kron [1, 1, ... ]^T
660       for (PetscInt ii = Istart, col = (Istart / bs) * pc_gamg->injection_index_size; ii < Iend; ii += bs) {
661         const PetscScalar one = 1;
662         for (PetscInt jj = 0; jj < pc_gamg->injection_index_size; jj++, col++) {
663           PetscInt row = ii + pc_gamg->injection_index[jj];
664           PetscCall(MatSetValues(Prol, 1, &row, 1, &col, &one, INSERT_VALUES));
665         }
666       }
667       PetscCall(MatAssemblyBegin(Prol, MAT_FINAL_ASSEMBLY));
668       PetscCall(MatAssemblyEnd(Prol, MAT_FINAL_ASSEMBLY));
669       PetscCall(MatViewFromOptions(Prol, NULL, "-mat_view_injection"));
670       PetscCall(MatGetBlockSizes(Prol, NULL, &cr_bs)); // column size
671       Parr[level1] = Prol;
672       // can not deal with null space -- with array of 'injection cols' we could take 'injection rows and 'injection cols' to 'data'
673       if (pc_gamg->data) {
674         pc_gamg->data_cell_cols      = pc_gamg->injection_index_size;
675         pc_gamg->data_cell_rows      = pc_gamg->injection_index_size;
676         pc_gamg->orig_data_cell_cols = 0;
677         pc_gamg->orig_data_cell_rows = 0;
678         PetscCall(PetscFree(pc_gamg->data));
679         pc_gamg->data_sz = pc_gamg->injection_index_size * prol_n;
680         PetscCall(PetscMalloc1(pc_gamg->data_sz, &pc_gamg->data));
681         for (row = nn = 0; row < prol_n; row += pc_gamg->injection_index_size) {
682           for (int jj = 0; jj < pc_gamg->injection_index_size; jj++) {
683             int idx = row * pc_gamg->injection_index_size + jj * pc_gamg->injection_index_size;
684             for (int kk = 0; kk < pc_gamg->injection_index_size; kk++, nn++) { pc_gamg->data[idx + kk] = (jj == kk) ? 1 : 0; }
685           }
686         }
687         PetscCheck(nn == pc_gamg->data_sz, PETSC_COMM_SELF, PETSC_ERR_PLIB, "nn != pc_gamg->data_sz %" PetscInt_FMT " %" PetscInt_FMT, pc_gamg->data_sz, nn);
688       }
689     } else {
690       Mat               Gmat, mat;
691       PetscCoarsenData *agg_lists;
692       Mat               Prol11;
693 
694       PetscCall(PCGAMGCreateGraph(pc, Aarr[level], &Gmat));
695       PetscCall(pc_gamg->ops->coarsen(pc, &Gmat, &agg_lists)); // Gmat may have ghosts for QR aggregates not in matrix
696       PetscCall(PetscCDGetMat(agg_lists, &mat));
697       if (!mat) PetscCall(PetscCDSetMat(agg_lists, Gmat));
698       PetscCall(pc_gamg->ops->prolongator(pc, Aarr[level], agg_lists, &Prol11));
699       /* could have failed to create new level */
700       if (Prol11) {
701         const char *prefix;
702         char        addp[32];
703 
704         /* get new block size of coarse matrices */
705         PetscCall(MatGetBlockSizes(Prol11, NULL, &cr_bs)); // column size
706 
707         if (pc_gamg->ops->optprolongator) {
708           /* smooth */
709           PetscCall(pc_gamg->ops->optprolongator(pc, Aarr[level], &Prol11));
710         }
711 
712         if (pc_gamg->use_aggs_in_asm) {
713           PetscInt bs;
714           PetscCall(MatGetBlockSizes(Prol11, &bs, NULL)); // row block size
715           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
716           PetscCall(PetscInfo(pc, "%d: %" PetscInt_FMT " ASM local domains,  bs = %d\n", (int)level, nASMBlocksArr[level], (int)bs));
717         } else if (pc_gamg->asm_hem_aggs) {
718           MatCoarsen  crs;
719           const char *prefix;
720           PetscInt    bs;
721           PetscCall(PetscCDGetMat(agg_lists, &mat));
722           if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
723           PetscCall(PetscCDDestroy(agg_lists));
724           PetscCall(PetscInfo(pc, "HEM ASM passes = %d\n", (int)pc_gamg->asm_hem_aggs));
725           PetscCall(MatCoarsenCreate(PetscObjectComm((PetscObject)pc), &crs));
726           PetscCall(PetscObjectGetOptionsPrefix((PetscObject)pc, &prefix));
727           PetscCall(PetscObjectSetOptionsPrefix((PetscObject)crs, prefix));
728           PetscCall(MatCoarsenSetFromOptions(crs)); // get strength args
729           PetscCall(MatCoarsenSetType(crs, MATCOARSENHEM));
730           PetscCall(MatCoarsenSetMaximumIterations(crs, pc_gamg->asm_hem_aggs));
731           PetscCall(MatCoarsenSetAdjacency(crs, Gmat));
732           PetscCall(MatCoarsenSetStrictAggs(crs, PETSC_TRUE));
733           PetscCall(MatCoarsenApply(crs));
734           PetscCall(MatCoarsenViewFromOptions(crs, NULL, "-agg_hem_mat_coarsen_view"));
735           PetscCall(MatCoarsenGetData(crs, &agg_lists)); /* output */
736           PetscCall(MatCoarsenDestroy(&crs));
737           // create aggregates
738           PetscCall(MatGetBlockSizes(Aarr[level], &bs, NULL)); // row block size
739           PetscCall(PetscCDGetASMBlocks(agg_lists, bs, &nASMBlocksArr[level], &ASMLocalIDsArr[level]));
740         }
741         PetscCall(PCGetOptionsPrefix(pc, &prefix));
742         PetscCall(MatSetOptionsPrefix(Prol11, prefix));
743         PetscCall(PetscSNPrintf(addp, sizeof(addp), "pc_gamg_prolongator_%d_", (int)level));
744         PetscCall(MatAppendOptionsPrefix(Prol11, addp));
745         /* Always generate the transpose with CUDA
746            Such behaviour can be adapted with -pc_gamg_prolongator_ prefixed options */
747         PetscCall(MatSetOption(Prol11, MAT_FORM_EXPLICIT_TRANSPOSE, PETSC_TRUE));
748         PetscCall(MatSetFromOptions(Prol11));
749         Parr[level1] = Prol11;
750       } else Parr[level1] = NULL; /* failed to coarsen */
751       PetscCall(PetscCDGetMat(agg_lists, &mat));
752       if (mat == Gmat) PetscCall(PetscCDClearMat(agg_lists)); // take the Mat away from the list (yuck)
753       PetscCall(MatDestroy(&Gmat));
754       PetscCall(PetscCDDestroy(agg_lists));
755     } /* construct prolongator scope */
756     if (level == 0) Aarr[0] = Pmat; /* use Pmat for finest level setup */
757     if (!Parr[level1]) {            /* failed to coarsen */
758       PetscCall(PetscInfo(pc, "%s: Stop gridding, level %" PetscInt_FMT "\n", ((PetscObject)pc)->prefix, level));
759 #if defined(GAMG_STAGES)
760       PetscCall(PetscLogStagePop());
761 #endif
762       break;
763     }
764     PetscCall(MatGetSize(Parr[level1], &M, &N)); /* N is next M, a loop test variables */
765     PetscCheck(!is_last, PETSC_COMM_SELF, PETSC_ERR_PLIB, "Is last ?");
766     if (N <= pc_gamg->coarse_eq_limit) is_last = PETSC_TRUE;
767     if (level1 == pc_gamg->Nlevels - 1) is_last = PETSC_TRUE;
768     if (level == PETSC_MG_MAXLEVELS - 2) is_last = PETSC_TRUE;
769     PetscCall(PetscLogEventBegin(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
770     PetscCall(pc_gamg->ops->createlevel(pc, Aarr[level], cr_bs, &Parr[level1], &Aarr[level1], &nactivepe, NULL, is_last));
771     PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_LEVEL], 0, 0, 0, 0));
772 
773     PetscCall(MatGetSize(Aarr[level1], &M, &N)); /* M is loop test variables */
774 #if defined(PETSC_USE_INFO)
775     PetscCall(MatGetInfo(Aarr[level1], MAT_GLOBAL_SUM, &info));
776     nnztot += info.nz_used;
777 #endif
778     PetscCall(PetscInfo(pc, "%s: %d) N=%" PetscInt_FMT ", n data cols=%" PetscInt_FMT ", nnz/row (ave)=%" PetscInt_FMT ", %d active pes\n", ((PetscObject)pc)->prefix, (int)level1, M, pc_gamg->data_cell_cols, (PetscInt)(info.nz_used / (PetscReal)M), nactivepe));
779 
780 #if defined(GAMG_STAGES)
781     PetscCall(PetscLogStagePop());
782 #endif
783     /* stop if one node or one proc -- could pull back for singular problems */
784     if ((pc_gamg->data_cell_cols && M / pc_gamg->data_cell_cols < 2) || (!pc_gamg->data_cell_cols && M / bs < 2)) {
785       PetscCall(PetscInfo(pc, "%s: HARD stop of coarsening on level %" PetscInt_FMT ".  Grid too small: %" PetscInt_FMT " block nodes\n", ((PetscObject)pc)->prefix, level, M / bs));
786       level++;
787       break;
788     } else if (level == PETSC_MG_MAXLEVELS - 2) { /* stop if we are limited by PC_MG_MAXLEVELS */
789       PetscCall(PetscInfo(pc, "%s: HARD stop of coarsening on level %" PetscInt_FMT ".  PC_MG_MAXLEVELS reached\n", ((PetscObject)pc)->prefix, level));
790       level++;
791       break;
792     }
793   } /* levels */
794   PetscCall(PetscFree(pc_gamg->data));
795 
796   PetscCall(PetscInfo(pc, "%s: %" PetscInt_FMT " levels, operator complexity = %g\n", ((PetscObject)pc)->prefix, level + 1, nnztot / nnz0));
797   pc_gamg->Nlevels = level + 1;
798   fine_level       = level;
799   PetscCall(PCMGSetLevels(pc, pc_gamg->Nlevels, NULL));
800 
801   if (pc_gamg->Nlevels > 1) { /* don't setup MG if one level */
802 
803     /* set default smoothers & set operators */
804     for (lidx = 1, level = pc_gamg->Nlevels - 2; lidx <= fine_level; lidx++, level--) {
805       KSP smoother;
806       PC  subpc;
807 
808       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
809       PetscCall(KSPGetPC(smoother, &subpc));
810 
811       PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
812       /* set ops */
813       PetscCall(KSPSetOperators(smoother, Aarr[level], Aarr[level]));
814       PetscCall(PCMGSetInterpolation(pc, lidx, Parr[level + 1]));
815 
816       /* set defaults */
817       PetscCall(KSPSetType(smoother, KSPCHEBYSHEV));
818 
819       /* set blocks for ASM smoother that uses the 'aggregates' */
820       if (pc_gamg->use_aggs_in_asm || pc_gamg->asm_hem_aggs) {
821         PetscInt sz;
822         IS      *iss;
823 
824         sz  = nASMBlocksArr[level];
825         iss = ASMLocalIDsArr[level];
826         PetscCall(PCSetType(subpc, PCASM));
827         PetscCall(PCASMSetOverlap(subpc, 0));
828         PetscCall(PCASMSetType(subpc, PC_ASM_BASIC));
829         if (!sz) {
830           IS is;
831           PetscCall(ISCreateGeneral(PETSC_COMM_SELF, 0, NULL, PETSC_COPY_VALUES, &is));
832           PetscCall(PCASMSetLocalSubdomains(subpc, 1, NULL, &is));
833           PetscCall(ISDestroy(&is));
834         } else {
835           PetscInt kk;
836           PetscCall(PCASMSetLocalSubdomains(subpc, sz, iss, NULL));
837           for (kk = 0; kk < sz; kk++) PetscCall(ISDestroy(&iss[kk]));
838           PetscCall(PetscFree(iss));
839         }
840         ASMLocalIDsArr[level] = NULL;
841         nASMBlocksArr[level]  = 0;
842       } else {
843         PetscCall(PCSetType(subpc, PCJACOBI));
844       }
845     }
846     {
847       /* coarse grid */
848       KSP      smoother, *k2;
849       PC       subpc, pc2;
850       PetscInt ii, first;
851       Mat      Lmat = Aarr[(level = pc_gamg->Nlevels - 1)];
852       lidx          = 0;
853 
854       PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
855       PetscCall(KSPSetOperators(smoother, Lmat, Lmat));
856       if (!pc_gamg->use_parallel_coarse_grid_solver) {
857         PetscCall(KSPSetNormType(smoother, KSP_NORM_NONE));
858         PetscCall(KSPGetPC(smoother, &subpc));
859         PetscCall(PCSetType(subpc, PCBJACOBI));
860         PetscCall(PCSetUp(subpc));
861         PetscCall(PCBJacobiGetSubKSP(subpc, &ii, &first, &k2));
862         PetscCheck(ii == 1, PETSC_COMM_SELF, PETSC_ERR_PLIB, "ii %" PetscInt_FMT " is not one", ii);
863         PetscCall(KSPGetPC(k2[0], &pc2));
864         PetscCall(PCSetType(pc2, PCLU));
865         PetscCall(PCFactorSetShiftType(pc2, MAT_SHIFT_INBLOCKS));
866         PetscCall(KSPSetTolerances(k2[0], PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 1));
867         PetscCall(KSPSetType(k2[0], KSPPREONLY));
868       }
869     }
870 
871     /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */
872     PetscObjectOptionsBegin((PetscObject)pc);
873     PetscCall(PCSetFromOptions_MG(pc, PetscOptionsObject));
874     PetscOptionsEnd();
875     PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
876 
877     /* set cheby eigen estimates from SA to use in the solver */
878     if (pc_gamg->use_sa_esteig) {
879       for (lidx = 1, level = pc_gamg->Nlevels - 2; level >= 0; lidx++, level--) {
880         KSP       smoother;
881         PetscBool ischeb;
882 
883         PetscCall(PCMGGetSmoother(pc, lidx, &smoother));
884         PetscCall(PetscObjectTypeCompare((PetscObject)smoother, KSPCHEBYSHEV, &ischeb));
885         if (ischeb) {
886           KSP_Chebyshev *cheb = (KSP_Chebyshev *)smoother->data;
887 
888           // The command line will override these settings because KSPSetFromOptions is called in PCSetUp_MG
889           if (mg->max_eigen_DinvA[level] > 0) {
890             // SA uses Jacobi for P; we use SA estimates if the smoother is also Jacobi or if the user explicitly requested it.
891             // TODO: This should test whether it's the same Jacobi variant (DIAG, ROWSUM, etc.)
892             PetscReal emax, emin;
893 
894             emin = mg->min_eigen_DinvA[level];
895             emax = mg->max_eigen_DinvA[level];
896             PetscCall(PetscInfo(pc, "%s: PCSetUp_GAMG: call KSPChebyshevSetEigenvalues on level %" PetscInt_FMT " (N=%" PetscInt_FMT ") with emax = %g emin = %g\n", ((PetscObject)pc)->prefix, level, Aarr[level]->rmap->N, (double)emax, (double)emin));
897             cheb->emin_provided = emin;
898             cheb->emax_provided = emax;
899           }
900         }
901       }
902     }
903 
904     PetscCall(PCSetUp_MG(pc));
905 
906     /* clean up */
907     for (level = 1; level < pc_gamg->Nlevels; level++) {
908       PetscCall(MatDestroy(&Parr[level]));
909       PetscCall(MatDestroy(&Aarr[level]));
910     }
911   } else {
912     KSP smoother;
913 
914     PetscCall(PetscInfo(pc, "%s: One level solver used (system is seen as DD). Using default solver.\n", ((PetscObject)pc)->prefix));
915     PetscCall(PCMGGetSmoother(pc, 0, &smoother));
916     PetscCall(KSPSetOperators(smoother, Aarr[0], Aarr[0]));
917     PetscCall(KSPSetType(smoother, KSPPREONLY));
918     PetscCall(PCSetUp_MG(pc));
919   }
920   PetscCall(PetscLogEventEnd(petsc_gamg_setup_events[GAMG_SETUP], 0, 0, 0, 0));
921   PetscFunctionReturn(PETSC_SUCCESS);
922 }
923 
924 /*
925  PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner
926    that was created with PCCreate_GAMG().
927 
928    Input Parameter:
929 .  pc - the preconditioner context
930 
931    Application Interface Routine: PCDestroy()
932 */
933 PetscErrorCode PCDestroy_GAMG(PC pc)
934 {
935   PC_MG   *mg      = (PC_MG *)pc->data;
936   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
937 
938   PetscFunctionBegin;
939   PetscCall(PCReset_GAMG(pc));
940   if (pc_gamg->ops->destroy) PetscCall((*pc_gamg->ops->destroy)(pc));
941   PetscCall(PetscFree(pc_gamg->ops));
942   PetscCall(PetscFree(pc_gamg->gamg_type_name));
943   PetscCall(PetscFree(pc_gamg));
944   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", NULL));
945   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", NULL));
946   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", NULL));
947   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", NULL));
948   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", NULL));
949   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", NULL));
950   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", NULL));
951   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", NULL));
952   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", NULL));
953   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", NULL));
954   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", NULL));
955   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", NULL));
956   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", NULL));
957   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", NULL));
958   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", NULL));
959   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", NULL));
960   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", NULL));
961   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", NULL));
962   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndices_C", NULL));
963   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", NULL));
964   PetscCall(PCDestroy_MG(pc));
965   PetscFunctionReturn(PETSC_SUCCESS);
966 }
967 
968 /*@
969   PCGAMGSetProcEqLim - Set number of equations to aim for per process on the coarse grids via processor reduction in `PCGAMG`
970 
971   Logically Collective
972 
973   Input Parameters:
974 + pc - the preconditioner context
975 - n  - the number of equations
976 
977   Options Database Key:
978 . -pc_gamg_process_eq_limit <limit> - set the limit
979 
980   Level: intermediate
981 
982   Note:
983   `PCGAMG` will reduce the number of MPI processes used directly on the coarse grids so that there are around <limit> equations on each process
984   that has degrees of freedom
985 
986 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`
987 @*/
988 PetscErrorCode PCGAMGSetProcEqLim(PC pc, PetscInt n)
989 {
990   PetscFunctionBegin;
991   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
992   PetscTryMethod(pc, "PCGAMGSetProcEqLim_C", (PC, PetscInt), (pc, n));
993   PetscFunctionReturn(PETSC_SUCCESS);
994 }
995 
996 static PetscErrorCode PCGAMGSetProcEqLim_GAMG(PC pc, PetscInt n)
997 {
998   PC_MG   *mg      = (PC_MG *)pc->data;
999   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1000 
1001   PetscFunctionBegin;
1002   if (n > 0) pc_gamg->min_eq_proc = n;
1003   PetscFunctionReturn(PETSC_SUCCESS);
1004 }
1005 
1006 /*@
1007   PCGAMGSetCoarseEqLim - Set maximum number of equations on the coarsest grid of `PCGAMG`
1008 
1009   Collective
1010 
1011   Input Parameters:
1012 + pc - the preconditioner context
1013 - n  - maximum number of equations to aim for
1014 
1015   Options Database Key:
1016 . -pc_gamg_coarse_eq_limit <limit> - set the limit
1017 
1018   Level: intermediate
1019 
1020   Note:
1021   For example -pc_gamg_coarse_eq_limit 1000 will stop coarsening once the coarse grid
1022   has less than 1000 unknowns.
1023 
1024 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`, `PCGAMGSetRepartition()`
1025 @*/
1026 PetscErrorCode PCGAMGSetCoarseEqLim(PC pc, PetscInt n)
1027 {
1028   PetscFunctionBegin;
1029   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1030   PetscTryMethod(pc, "PCGAMGSetCoarseEqLim_C", (PC, PetscInt), (pc, n));
1031   PetscFunctionReturn(PETSC_SUCCESS);
1032 }
1033 
1034 static PetscErrorCode PCGAMGSetCoarseEqLim_GAMG(PC pc, PetscInt n)
1035 {
1036   PC_MG   *mg      = (PC_MG *)pc->data;
1037   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1038 
1039   PetscFunctionBegin;
1040   if (n > 0) pc_gamg->coarse_eq_limit = n;
1041   PetscFunctionReturn(PETSC_SUCCESS);
1042 }
1043 
1044 /*@
1045   PCGAMGSetRepartition - Repartition the degrees of freedom across the processors on the coarser grids when reducing the number of MPI ranks to use
1046 
1047   Collective
1048 
1049   Input Parameters:
1050 + pc - the preconditioner context
1051 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1052 
1053   Options Database Key:
1054 . -pc_gamg_repartition <true,false> - turn on the repartitioning
1055 
1056   Level: intermediate
1057 
1058   Note:
1059   This will generally improve the loading balancing of the work on each level
1060 
1061 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetRankReductionFactors()`
1062 @*/
1063 PetscErrorCode PCGAMGSetRepartition(PC pc, PetscBool n)
1064 {
1065   PetscFunctionBegin;
1066   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1067   PetscTryMethod(pc, "PCGAMGSetRepartition_C", (PC, PetscBool), (pc, n));
1068   PetscFunctionReturn(PETSC_SUCCESS);
1069 }
1070 
1071 static PetscErrorCode PCGAMGSetRepartition_GAMG(PC pc, PetscBool n)
1072 {
1073   PC_MG   *mg      = (PC_MG *)pc->data;
1074   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1075 
1076   PetscFunctionBegin;
1077   pc_gamg->repart = n;
1078   PetscFunctionReturn(PETSC_SUCCESS);
1079 }
1080 
1081 /*@
1082   PCGAMGSetUseSAEstEig - Use the eigen estimate from smoothed aggregation for the Chebyshev smoother during the solution process
1083 
1084   Collective
1085 
1086   Input Parameters:
1087 + pc - the preconditioner context
1088 - b  - flag
1089 
1090   Options Database Key:
1091 . -pc_gamg_use_sa_esteig <true,false> - use the eigen estimate
1092 
1093   Level: advanced
1094 
1095   Notes:
1096   Smoothed aggregation constructs the smoothed prolongator $P = (I - \omega D^{-1} A) T$ where $T$ is the tentative prolongator and $D$ is the diagonal of $A$.
1097   Eigenvalue estimates (based on a few `PCCG` or `PCGMRES` iterations) are computed to choose $\omega$ so that this is a stable smoothing operation.
1098   If Chebyshev with Jacobi (diagonal) preconditioning is used for smoothing, then the eigenvalue estimates can be reused during the solution process
1099   This option is only used when the smoother uses Jacobi, and should be turned off if a different `PCJacobiType` is used.
1100   It became default in PETSc 3.17.
1101 
1102 .seealso: [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`, `PCGAMGSetRecomputeEstEig()`
1103 @*/
1104 PetscErrorCode PCGAMGSetUseSAEstEig(PC pc, PetscBool b)
1105 {
1106   PetscFunctionBegin;
1107   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1108   PetscTryMethod(pc, "PCGAMGSetUseSAEstEig_C", (PC, PetscBool), (pc, b));
1109   PetscFunctionReturn(PETSC_SUCCESS);
1110 }
1111 
1112 static PetscErrorCode PCGAMGSetUseSAEstEig_GAMG(PC pc, PetscBool b)
1113 {
1114   PC_MG   *mg      = (PC_MG *)pc->data;
1115   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1116 
1117   PetscFunctionBegin;
1118   pc_gamg->use_sa_esteig = b;
1119   PetscFunctionReturn(PETSC_SUCCESS);
1120 }
1121 
1122 /*@
1123   PCGAMGSetRecomputeEstEig - Set flag for Chebyshev smoothers to recompute the eigen estimates
1124 
1125   Collective
1126 
1127   Input Parameters:
1128 + pc - the preconditioner context
1129 - b  - flag
1130 
1131   Options Database Key:
1132 . -pc_gamg_recompute_esteig <true> - use the eigen estimate
1133 
1134   Level: advanced
1135 
1136 .seealso: [](ch_ksp), `PCGAMG`, `KSPChebyshevSetEigenvalues()`, `KSPChebyshevEstEigSet()`
1137 @*/
1138 PetscErrorCode PCGAMGSetRecomputeEstEig(PC pc, PetscBool b)
1139 {
1140   PetscFunctionBegin;
1141   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1142   PetscTryMethod(pc, "PCGAMGSetRecomputeEstEig_C", (PC, PetscBool), (pc, b));
1143   PetscFunctionReturn(PETSC_SUCCESS);
1144 }
1145 
1146 static PetscErrorCode PCGAMGSetRecomputeEstEig_GAMG(PC pc, PetscBool b)
1147 {
1148   PC_MG   *mg      = (PC_MG *)pc->data;
1149   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1150 
1151   PetscFunctionBegin;
1152   pc_gamg->recompute_esteig = b;
1153   PetscFunctionReturn(PETSC_SUCCESS);
1154 }
1155 
1156 /*@
1157   PCGAMGSetEigenvalues - Set WHAT eigenvalues WHY?
1158 
1159   Collective
1160 
1161   Input Parameters:
1162 + pc   - the preconditioner context
1163 . emax - max eigenvalue
1164 - emin - min eigenvalue
1165 
1166   Options Database Key:
1167 . -pc_gamg_eigenvalues <emin,emax> - estimates of the eigenvalues
1168 
1169   Level: intermediate
1170 
1171 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetUseSAEstEig()`
1172 @*/
1173 PetscErrorCode PCGAMGSetEigenvalues(PC pc, PetscReal emax, PetscReal emin)
1174 {
1175   PetscFunctionBegin;
1176   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1177   PetscTryMethod(pc, "PCGAMGSetEigenvalues_C", (PC, PetscReal, PetscReal), (pc, emax, emin));
1178   PetscFunctionReturn(PETSC_SUCCESS);
1179 }
1180 
1181 static PetscErrorCode PCGAMGSetEigenvalues_GAMG(PC pc, PetscReal emax, PetscReal emin)
1182 {
1183   PC_MG   *mg      = (PC_MG *)pc->data;
1184   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1185 
1186   PetscFunctionBegin;
1187   PetscCheck(emax > emin, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Maximum eigenvalue must be larger than minimum: max %g min %g", (double)emax, (double)emin);
1188   PetscCheck(emax * emin > 0.0, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "Both eigenvalues must be of the same sign: max %g min %g", (double)emax, (double)emin);
1189   pc_gamg->emax = emax;
1190   pc_gamg->emin = emin;
1191   PetscFunctionReturn(PETSC_SUCCESS);
1192 }
1193 
1194 /*@
1195   PCGAMGSetReuseInterpolation - Reuse prolongation when rebuilding a `PCGAMG` algebraic multigrid preconditioner
1196 
1197   Collective
1198 
1199   Input Parameters:
1200 + pc - the preconditioner context
1201 - n  - `PETSC_TRUE` or `PETSC_FALSE`
1202 
1203   Options Database Key:
1204 . -pc_gamg_reuse_interpolation <true,false> - reuse the previous interpolation
1205 
1206   Level: intermediate
1207 
1208   Note:
1209   May negatively affect the convergence rate of the method on new matrices if the matrix entries change a great deal, but allows
1210   rebuilding the preconditioner quicker.
1211 
1212 .seealso: [](ch_ksp), `PCGAMG`
1213 @*/
1214 PetscErrorCode PCGAMGSetReuseInterpolation(PC pc, PetscBool n)
1215 {
1216   PetscFunctionBegin;
1217   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1218   PetscTryMethod(pc, "PCGAMGSetReuseInterpolation_C", (PC, PetscBool), (pc, n));
1219   PetscFunctionReturn(PETSC_SUCCESS);
1220 }
1221 
1222 static PetscErrorCode PCGAMGSetReuseInterpolation_GAMG(PC pc, PetscBool n)
1223 {
1224   PC_MG   *mg      = (PC_MG *)pc->data;
1225   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1226 
1227   PetscFunctionBegin;
1228   pc_gamg->reuse_prol = n;
1229   PetscFunctionReturn(PETSC_SUCCESS);
1230 }
1231 
1232 /*@
1233   PCGAMGASMSetUseAggs - Have the `PCGAMG` smoother on each level use the aggregates defined by the coarsening process as the subdomains for the additive Schwarz preconditioner
1234   used as the smoother
1235 
1236   Collective
1237 
1238   Input Parameters:
1239 + pc  - the preconditioner context
1240 - flg - `PETSC_TRUE` to use aggregates, `PETSC_FALSE` to not
1241 
1242   Options Database Key:
1243 . -pc_gamg_asm_use_agg <true,false> - use aggregates to define the additive Schwarz subdomains
1244 
1245   Level: intermediate
1246 
1247 .seealso: [](ch_ksp), `PCGAMG`, `PCASM`, `PCSetType`
1248 @*/
1249 PetscErrorCode PCGAMGASMSetUseAggs(PC pc, PetscBool flg)
1250 {
1251   PetscFunctionBegin;
1252   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1253   PetscTryMethod(pc, "PCGAMGASMSetUseAggs_C", (PC, PetscBool), (pc, flg));
1254   PetscFunctionReturn(PETSC_SUCCESS);
1255 }
1256 
1257 static PetscErrorCode PCGAMGASMSetUseAggs_GAMG(PC pc, PetscBool flg)
1258 {
1259   PC_MG   *mg      = (PC_MG *)pc->data;
1260   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1261 
1262   PetscFunctionBegin;
1263   pc_gamg->use_aggs_in_asm = flg;
1264   PetscFunctionReturn(PETSC_SUCCESS);
1265 }
1266 
1267 /*@
1268   PCGAMGSetParallelCoarseGridSolve - allow a parallel coarse grid solver
1269 
1270   Collective
1271 
1272   Input Parameters:
1273 + pc  - the preconditioner context
1274 - flg - `PETSC_TRUE` to not force coarse grid onto one processor
1275 
1276   Options Database Key:
1277 . -pc_gamg_parallel_coarse_grid_solver - use a parallel coarse grid direct solver
1278 
1279   Level: intermediate
1280 
1281 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetCpuPinCoarseGrids()`
1282 @*/
1283 PetscErrorCode PCGAMGSetParallelCoarseGridSolve(PC pc, PetscBool flg)
1284 {
1285   PetscFunctionBegin;
1286   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1287   PetscTryMethod(pc, "PCGAMGSetParallelCoarseGridSolve_C", (PC, PetscBool), (pc, flg));
1288   PetscFunctionReturn(PETSC_SUCCESS);
1289 }
1290 
1291 static PetscErrorCode PCGAMGSetParallelCoarseGridSolve_GAMG(PC pc, PetscBool flg)
1292 {
1293   PC_MG   *mg      = (PC_MG *)pc->data;
1294   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1295 
1296   PetscFunctionBegin;
1297   pc_gamg->use_parallel_coarse_grid_solver = flg;
1298   PetscFunctionReturn(PETSC_SUCCESS);
1299 }
1300 
1301 /*@
1302   PCGAMGSetCpuPinCoarseGrids - pin the coarse grids created in `PCGAMG` to run only on the CPU since the problems may be too small to run efficiently on the GPUs
1303 
1304   Collective
1305 
1306   Input Parameters:
1307 + pc  - the preconditioner context
1308 - flg - `PETSC_TRUE` to pin coarse grids to the CPU
1309 
1310   Options Database Key:
1311 . -pc_gamg_cpu_pin_coarse_grids - pin the coarse grids to the CPU
1312 
1313   Level: intermediate
1314 
1315 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetCoarseGridLayoutType()`, `PCGAMGSetParallelCoarseGridSolve()`
1316 @*/
1317 PetscErrorCode PCGAMGSetCpuPinCoarseGrids(PC pc, PetscBool flg)
1318 {
1319   PetscFunctionBegin;
1320   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1321   PetscTryMethod(pc, "PCGAMGSetCpuPinCoarseGrids_C", (PC, PetscBool), (pc, flg));
1322   PetscFunctionReturn(PETSC_SUCCESS);
1323 }
1324 
1325 static PetscErrorCode PCGAMGSetCpuPinCoarseGrids_GAMG(PC pc, PetscBool flg)
1326 {
1327   PC_MG   *mg      = (PC_MG *)pc->data;
1328   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1329 
1330   PetscFunctionBegin;
1331   pc_gamg->cpu_pin_coarse_grids = flg;
1332   PetscFunctionReturn(PETSC_SUCCESS);
1333 }
1334 
1335 /*@
1336   PCGAMGSetCoarseGridLayoutType - place coarse grids on processors with natural order (compact type)
1337 
1338   Collective
1339 
1340   Input Parameters:
1341 + pc  - the preconditioner context
1342 - flg - `PCGAMGLayoutType` type, either `PCGAMG_LAYOUT_COMPACT` or `PCGAMG_LAYOUT_SPREAD`
1343 
1344   Options Database Key:
1345 . -pc_gamg_coarse_grid_layout_type - place the coarse grids with natural ordering
1346 
1347   Level: intermediate
1348 
1349 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetCpuPinCoarseGrids()`, `PCGAMGLayoutType`, `PCGAMG_LAYOUT_COMPACT`, `PCGAMG_LAYOUT_SPREAD`
1350 @*/
1351 PetscErrorCode PCGAMGSetCoarseGridLayoutType(PC pc, PCGAMGLayoutType flg)
1352 {
1353   PetscFunctionBegin;
1354   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1355   PetscTryMethod(pc, "PCGAMGSetCoarseGridLayoutType_C", (PC, PCGAMGLayoutType), (pc, flg));
1356   PetscFunctionReturn(PETSC_SUCCESS);
1357 }
1358 
1359 static PetscErrorCode PCGAMGSetCoarseGridLayoutType_GAMG(PC pc, PCGAMGLayoutType flg)
1360 {
1361   PC_MG   *mg      = (PC_MG *)pc->data;
1362   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1363 
1364   PetscFunctionBegin;
1365   pc_gamg->layout_type = flg;
1366   PetscFunctionReturn(PETSC_SUCCESS);
1367 }
1368 
1369 /*@
1370   PCGAMGSetNlevels -  Sets the maximum number of levels `PCGAMG` will use
1371 
1372   Collective
1373 
1374   Input Parameters:
1375 + pc - the preconditioner
1376 - n  - the maximum number of levels to use
1377 
1378   Options Database Key:
1379 . -pc_mg_levels <n> - set the maximum number of levels to allow
1380 
1381   Level: intermediate
1382 
1383   Developer Notes:
1384   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1385 
1386 .seealso: [](ch_ksp), `PCGAMG`
1387 @*/
1388 PetscErrorCode PCGAMGSetNlevels(PC pc, PetscInt n)
1389 {
1390   PetscFunctionBegin;
1391   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1392   PetscTryMethod(pc, "PCGAMGSetNlevels_C", (PC, PetscInt), (pc, n));
1393   PetscFunctionReturn(PETSC_SUCCESS);
1394 }
1395 
1396 static PetscErrorCode PCGAMGSetNlevels_GAMG(PC pc, PetscInt n)
1397 {
1398   PC_MG   *mg      = (PC_MG *)pc->data;
1399   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1400 
1401   PetscFunctionBegin;
1402   pc_gamg->Nlevels = n;
1403   PetscFunctionReturn(PETSC_SUCCESS);
1404 }
1405 
1406 /*@
1407   PCGAMGASMSetHEM -  Sets the number of HEM matching passed
1408 
1409   Collective
1410 
1411   Input Parameters:
1412 + pc - the preconditioner
1413 - n  - number of HEM matching passed to construct ASM subdomains
1414 
1415   Options Database Key:
1416 . -pc_gamg_asm_hem <n> - set the number of HEM matching passed
1417 
1418   Level: intermediate
1419 
1420   Developer Notes:
1421   Should be called `PCGAMGSetMaximumNumberlevels()` and possible be shared with `PCMG`
1422 
1423 .seealso: [](ch_ksp), `PCGAMG`
1424 @*/
1425 PetscErrorCode PCGAMGASMSetHEM(PC pc, PetscInt n)
1426 {
1427   PetscFunctionBegin;
1428   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1429   PetscTryMethod(pc, "PCGAMGASMSetHEM_C", (PC, PetscInt), (pc, n));
1430   PetscFunctionReturn(PETSC_SUCCESS);
1431 }
1432 
1433 static PetscErrorCode PCGAMGASMSetHEM_GAMG(PC pc, PetscInt n)
1434 {
1435   PC_MG   *mg      = (PC_MG *)pc->data;
1436   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1437 
1438   PetscFunctionBegin;
1439   pc_gamg->asm_hem_aggs = n;
1440   PetscFunctionReturn(PETSC_SUCCESS);
1441 }
1442 
1443 /*@
1444   PCGAMGSetThreshold - Relative threshold to use for dropping edges in aggregation graph
1445 
1446   Not Collective
1447 
1448   Input Parameters:
1449 + pc - the preconditioner context
1450 . v  - array of threshold values for finest n levels; 0.0 means keep all nonzero entries in the graph; negative means keep even zero entries in the graph
1451 - n  - number of threshold values provided in array
1452 
1453   Options Database Key:
1454 . -pc_gamg_threshold <threshold> - the threshold to drop edges
1455 
1456   Level: intermediate
1457 
1458   Notes:
1459   Increasing the threshold decreases the rate of coarsening. Conversely reducing the threshold increases the rate of coarsening (aggressive coarsening) and thereby reduces the complexity of the coarse grids, and generally results in slower solver converge rates. Reducing coarse grid complexity reduced the complexity of Galerkin coarse grid construction considerably.
1460   Before coarsening or aggregating the graph, `PCGAMG` removes small values from the graph with this threshold, and thus reducing the coupling in the graph and a different (perhaps better) coarser set of points.
1461 
1462   If `n` is less than the total number of coarsenings (see `PCGAMGSetNlevels()`), then threshold scaling (see `PCGAMGSetThresholdScale()`) is used for each successive coarsening.
1463   In this case, `PCGAMGSetThresholdScale()` must be called before `PCGAMGSetThreshold()`.
1464   If `n` is greater than the total number of levels, the excess entries in threshold will not be used.
1465 
1466 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetAggressiveLevels()`, `PCGAMGMISkSetAggressive()`, `PCGAMGSetMinDegreeOrderingMISk()`, `PCGAMGSetThresholdScale()`
1467 @*/
1468 PetscErrorCode PCGAMGSetThreshold(PC pc, PetscReal v[], PetscInt n)
1469 {
1470   PetscFunctionBegin;
1471   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1472   if (n) PetscAssertPointer(v, 2);
1473   PetscTryMethod(pc, "PCGAMGSetThreshold_C", (PC, PetscReal[], PetscInt), (pc, v, n));
1474   PetscFunctionReturn(PETSC_SUCCESS);
1475 }
1476 
1477 static PetscErrorCode PCGAMGSetThreshold_GAMG(PC pc, PetscReal v[], PetscInt n)
1478 {
1479   PC_MG   *mg      = (PC_MG *)pc->data;
1480   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1481   PetscInt i;
1482 
1483   PetscFunctionBegin;
1484   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->threshold[i] = v[i];
1485   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1486   PetscFunctionReturn(PETSC_SUCCESS);
1487 }
1488 
1489 /*@
1490   PCGAMGSetRankReductionFactors - Set a manual schedule for MPI rank reduction on coarse grids
1491 
1492   Collective
1493 
1494   Input Parameters:
1495 + pc - the preconditioner context
1496 . v  - array of reduction factors. 0 for first value forces a reduction to one process/device on first level in CUDA
1497 - n  - number of values provided in array
1498 
1499   Options Database Key:
1500 . -pc_gamg_rank_reduction_factors <factors> - provide the schedule
1501 
1502   Level: intermediate
1503 
1504 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetProcEqLim()`, `PCGAMGSetCoarseEqLim()`
1505 @*/
1506 PetscErrorCode PCGAMGSetRankReductionFactors(PC pc, PetscInt v[], PetscInt n)
1507 {
1508   PetscFunctionBegin;
1509   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1510   if (n) PetscAssertPointer(v, 2);
1511   PetscTryMethod(pc, "PCGAMGSetRankReductionFactors_C", (PC, PetscInt[], PetscInt), (pc, v, n));
1512   PetscFunctionReturn(PETSC_SUCCESS);
1513 }
1514 
1515 static PetscErrorCode PCGAMGSetRankReductionFactors_GAMG(PC pc, PetscInt v[], PetscInt n)
1516 {
1517   PC_MG   *mg      = (PC_MG *)pc->data;
1518   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1519   PetscInt i;
1520 
1521   PetscFunctionBegin;
1522   for (i = 0; i < PetscMin(n, PETSC_MG_MAXLEVELS); i++) pc_gamg->level_reduction_factors[i] = v[i];
1523   for (; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->level_reduction_factors[i] = -1; /* 0 stop putting one process/device on first level */
1524   PetscFunctionReturn(PETSC_SUCCESS);
1525 }
1526 
1527 /*@
1528   PCGAMGSetThresholdScale - Relative threshold reduction at each level
1529 
1530   Not Collective
1531 
1532   Input Parameters:
1533 + pc - the preconditioner context
1534 - v  - the threshold value reduction, usually < 1.0
1535 
1536   Options Database Key:
1537 . -pc_gamg_threshold_scale <v> - set the relative threshold reduction on each level
1538 
1539   Level: advanced
1540 
1541   Note:
1542   The initial threshold (for an arbitrary number of levels starting from the finest) can be set with `PCGAMGSetThreshold()`.
1543   This scaling is used for each subsequent coarsening, but must be called before `PCGAMGSetThreshold()`.
1544 
1545 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetThreshold()`
1546 @*/
1547 PetscErrorCode PCGAMGSetThresholdScale(PC pc, PetscReal v)
1548 {
1549   PetscFunctionBegin;
1550   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1551   PetscTryMethod(pc, "PCGAMGSetThresholdScale_C", (PC, PetscReal), (pc, v));
1552   PetscFunctionReturn(PETSC_SUCCESS);
1553 }
1554 
1555 static PetscErrorCode PCGAMGSetThresholdScale_GAMG(PC pc, PetscReal v)
1556 {
1557   PC_MG   *mg      = (PC_MG *)pc->data;
1558   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1559 
1560   PetscFunctionBegin;
1561   pc_gamg->threshold_scale = v;
1562   PetscFunctionReturn(PETSC_SUCCESS);
1563 }
1564 
1565 /*@
1566   PCGAMGSetType - Set the type of algorithm `PCGAMG` should use
1567 
1568   Collective
1569 
1570   Input Parameters:
1571 + pc   - the preconditioner context
1572 - type - `PCGAMGAGG`, `PCGAMGGEO`, or `PCGAMGCLASSICAL`
1573 
1574   Options Database Key:
1575 . -pc_gamg_type <agg,geo,classical> - type of algebraic multigrid to apply - only agg supported
1576 
1577   Level: intermediate
1578 
1579 .seealso: [](ch_ksp), `PCGAMGGetType()`, `PCGAMG`, `PCGAMGType`
1580 @*/
1581 PetscErrorCode PCGAMGSetType(PC pc, PCGAMGType type)
1582 {
1583   PetscFunctionBegin;
1584   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1585   PetscTryMethod(pc, "PCGAMGSetType_C", (PC, PCGAMGType), (pc, type));
1586   PetscFunctionReturn(PETSC_SUCCESS);
1587 }
1588 
1589 /*@
1590   PCGAMGGetType - Get the type of algorithm `PCGAMG` will use
1591 
1592   Collective
1593 
1594   Input Parameter:
1595 . pc - the preconditioner context
1596 
1597   Output Parameter:
1598 . type - the type of algorithm used
1599 
1600   Level: intermediate
1601 
1602 .seealso: [](ch_ksp), `PCGAMG`, `PCGAMGSetType()`, `PCGAMGType`
1603 @*/
1604 PetscErrorCode PCGAMGGetType(PC pc, PCGAMGType *type)
1605 {
1606   PetscFunctionBegin;
1607   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1608   PetscUseMethod(pc, "PCGAMGGetType_C", (PC, PCGAMGType *), (pc, type));
1609   PetscFunctionReturn(PETSC_SUCCESS);
1610 }
1611 
1612 static PetscErrorCode PCGAMGGetType_GAMG(PC pc, PCGAMGType *type)
1613 {
1614   PC_MG   *mg      = (PC_MG *)pc->data;
1615   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1616 
1617   PetscFunctionBegin;
1618   *type = pc_gamg->type;
1619   PetscFunctionReturn(PETSC_SUCCESS);
1620 }
1621 
1622 static PetscErrorCode PCGAMGSetType_GAMG(PC pc, PCGAMGType type)
1623 {
1624   PC_MG   *mg      = (PC_MG *)pc->data;
1625   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1626   PetscErrorCode (*r)(PC);
1627 
1628   PetscFunctionBegin;
1629   pc_gamg->type = type;
1630   PetscCall(PetscFunctionListFind(GAMGList, type, &r));
1631   PetscCheck(r, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_UNKNOWN_TYPE, "Unknown GAMG type %s given", type);
1632   if (pc_gamg->ops->destroy) {
1633     PetscCall((*pc_gamg->ops->destroy)(pc));
1634     PetscCall(PetscMemzero(pc_gamg->ops, sizeof(struct _PCGAMGOps)));
1635     pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1636     /* cleaning up common data in pc_gamg - this should disappear someday */
1637     pc_gamg->data_cell_cols      = 0;
1638     pc_gamg->data_cell_rows      = 0;
1639     pc_gamg->orig_data_cell_cols = 0;
1640     pc_gamg->orig_data_cell_rows = 0;
1641     PetscCall(PetscFree(pc_gamg->data));
1642     pc_gamg->data_sz = 0;
1643   }
1644   PetscCall(PetscFree(pc_gamg->gamg_type_name));
1645   PetscCall(PetscStrallocpy(type, &pc_gamg->gamg_type_name));
1646   PetscCall((*r)(pc));
1647   PetscFunctionReturn(PETSC_SUCCESS);
1648 }
1649 
1650 static PetscErrorCode PCView_GAMG(PC pc, PetscViewer viewer)
1651 {
1652   PC_MG    *mg      = (PC_MG *)pc->data;
1653   PC_GAMG  *pc_gamg = (PC_GAMG *)mg->innerctx;
1654   PetscReal gc = 0, oc = 0;
1655 
1656   PetscFunctionBegin;
1657   PetscCall(PetscViewerASCIIPrintf(viewer, "    GAMG specific options\n"));
1658   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold for dropping small values in graph on each level ="));
1659   for (PetscInt i = 0; i < mg->nlevels; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %g", (double)pc_gamg->threshold[i]));
1660   PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1661   PetscCall(PetscViewerASCIIPrintf(viewer, "      Threshold scaling factor for each level not specified = %g\n", (double)pc_gamg->threshold_scale));
1662   if (pc_gamg->use_aggs_in_asm) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates from coarsening process to define subdomains for PCASM\n")); // this take presedence
1663   else if (pc_gamg->asm_hem_aggs) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using aggregates made with %d applications of heavy edge matching (HEM) to define subdomains for PCASM\n", (int)pc_gamg->asm_hem_aggs));
1664   if (pc_gamg->use_parallel_coarse_grid_solver) PetscCall(PetscViewerASCIIPrintf(viewer, "      Using parallel coarse grid solver (all coarse grid equations not put on one process)\n"));
1665   if (pc_gamg->injection_index_size) {
1666     PetscCall(PetscViewerASCIIPrintf(viewer, "      Using injection restriction/prolongation on first level, dofs:"));
1667     for (int i = 0; i < pc_gamg->injection_index_size; i++) PetscCall(PetscViewerASCIIPrintf(viewer, " %d", (int)pc_gamg->injection_index[i]));
1668     PetscCall(PetscViewerASCIIPrintf(viewer, "\n"));
1669   }
1670   if (pc_gamg->ops->view) PetscCall((*pc_gamg->ops->view)(pc, viewer));
1671   PetscCall(PCMGGetGridComplexity(pc, &gc, &oc));
1672   PetscCall(PetscViewerASCIIPrintf(viewer, "      Complexity:    grid = %g    operator = %g\n", (double)gc, (double)oc));
1673   PetscFunctionReturn(PETSC_SUCCESS);
1674 }
1675 
1676 /*@
1677   PCGAMGSetInjectionIndex - Array of subset of variables per vertex to inject into coarse grid space
1678 
1679   Logically Collective
1680 
1681   Input Parameters:
1682 + pc  - the coarsen context
1683 . n   - number of indices
1684 - idx - array of indices
1685 
1686   Options Database Key:
1687 . -pc_gamg_injection_index - array of subset of variables per vertex to use for injection coarse grid space
1688 
1689   Level: intermediate
1690 
1691 .seealso: `PCGAMG`
1692 @*/
1693 PetscErrorCode PCGAMGSetInjectionIndex(PC pc, PetscInt n, PetscInt idx[])
1694 {
1695   PetscFunctionBegin;
1696   PetscValidHeaderSpecific(pc, PC_CLASSID, 1);
1697   PetscValidLogicalCollectiveInt(pc, n, 2);
1698   PetscTryMethod(pc, "PCGAMGSetInjectionIndex_C", (PC, PetscInt, PetscInt[]), (pc, n, idx));
1699   PetscFunctionReturn(PETSC_SUCCESS);
1700 }
1701 
1702 static PetscErrorCode PCGAMGSetInjectionIndex_GAMG(PC pc, PetscInt n, PetscInt idx[])
1703 {
1704   PC_MG   *mg      = (PC_MG *)pc->data;
1705   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
1706 
1707   PetscFunctionBegin;
1708   pc_gamg->injection_index_size = n;
1709   PetscCheck(n < MAT_COARSEN_STRENGTH_INDEX_SIZE, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "array size %d larger than max %d", (int)n, MAT_COARSEN_STRENGTH_INDEX_SIZE);
1710   for (PetscInt i = 0; i < n; i++) pc_gamg->injection_index[i] = idx[i];
1711   PetscFunctionReturn(PETSC_SUCCESS);
1712 }
1713 
1714 static PetscErrorCode PCSetFromOptions_GAMG(PC pc, PetscOptionItems *PetscOptionsObject)
1715 {
1716   PC_MG             *mg      = (PC_MG *)pc->data;
1717   PC_GAMG           *pc_gamg = (PC_GAMG *)mg->innerctx;
1718   PetscBool          flag;
1719   MPI_Comm           comm;
1720   char               prefix[256], tname[32];
1721   PetscInt           i, n;
1722   const char        *pcpre;
1723   static const char *LayoutTypes[] = {"compact", "spread", "PCGAMGLayoutType", "PC_GAMG_LAYOUT", NULL};
1724 
1725   PetscFunctionBegin;
1726   PetscCall(PetscObjectGetComm((PetscObject)pc, &comm));
1727   PetscOptionsHeadBegin(PetscOptionsObject, "GAMG options");
1728   PetscCall(PetscOptionsFList("-pc_gamg_type", "Type of AMG method (only 'agg' supported and useful)", "PCGAMGSetType", GAMGList, pc_gamg->gamg_type_name, tname, sizeof(tname), &flag));
1729   if (flag) PetscCall(PCGAMGSetType(pc, tname));
1730   PetscCall(PetscOptionsBool("-pc_gamg_repartition", "Repartion coarse grids", "PCGAMGSetRepartition", pc_gamg->repart, &pc_gamg->repart, NULL));
1731   PetscCall(PetscOptionsBool("-pc_gamg_use_sa_esteig", "Use eigen estimate from smoothed aggregation for smoother", "PCGAMGSetUseSAEstEig", pc_gamg->use_sa_esteig, &pc_gamg->use_sa_esteig, NULL));
1732   PetscCall(PetscOptionsBool("-pc_gamg_recompute_esteig", "Set flag to recompute eigen estimates for Chebyshev when matrix changes", "PCGAMGSetRecomputeEstEig", pc_gamg->recompute_esteig, &pc_gamg->recompute_esteig, NULL));
1733   PetscCall(PetscOptionsBool("-pc_gamg_reuse_interpolation", "Reuse prolongation operator", "PCGAMGReuseInterpolation", pc_gamg->reuse_prol, &pc_gamg->reuse_prol, NULL));
1734   PetscCall(PetscOptionsBool("-pc_gamg_asm_use_agg", "Use aggregation aggregates for ASM smoother", "PCGAMGASMSetUseAggs", pc_gamg->use_aggs_in_asm, &pc_gamg->use_aggs_in_asm, NULL));
1735   PetscCall(PetscOptionsBool("-pc_gamg_parallel_coarse_grid_solver", "Use parallel coarse grid solver (otherwise put last grid on one process)", "PCGAMGSetParallelCoarseGridSolve", pc_gamg->use_parallel_coarse_grid_solver, &pc_gamg->use_parallel_coarse_grid_solver, NULL));
1736   PetscCall(PetscOptionsBool("-pc_gamg_cpu_pin_coarse_grids", "Pin coarse grids to the CPU", "PCGAMGSetCpuPinCoarseGrids", pc_gamg->cpu_pin_coarse_grids, &pc_gamg->cpu_pin_coarse_grids, NULL));
1737   PetscCall(PetscOptionsEnum("-pc_gamg_coarse_grid_layout_type", "compact: place reduced grids on processes in natural order; spread: distribute to whole machine for more memory bandwidth", "PCGAMGSetCoarseGridLayoutType", LayoutTypes,
1738                              (PetscEnum)pc_gamg->layout_type, (PetscEnum *)&pc_gamg->layout_type, NULL));
1739   PetscCall(PetscOptionsInt("-pc_gamg_process_eq_limit", "Limit (goal) on number of equations per process on coarse grids", "PCGAMGSetProcEqLim", pc_gamg->min_eq_proc, &pc_gamg->min_eq_proc, NULL));
1740   PetscCall(PetscOptionsInt("-pc_gamg_coarse_eq_limit", "Limit on number of equations for the coarse grid", "PCGAMGSetCoarseEqLim", pc_gamg->coarse_eq_limit, &pc_gamg->coarse_eq_limit, NULL));
1741   PetscCall(PetscOptionsInt("-pc_gamg_asm_hem_aggs", "Number of HEM matching passed in aggregates for ASM smoother", "PCGAMGASMSetHEM", pc_gamg->asm_hem_aggs, &pc_gamg->asm_hem_aggs, NULL));
1742   PetscCall(PetscOptionsReal("-pc_gamg_threshold_scale", "Scaling of threshold for each level not specified", "PCGAMGSetThresholdScale", pc_gamg->threshold_scale, &pc_gamg->threshold_scale, NULL));
1743   n = PETSC_MG_MAXLEVELS;
1744   PetscCall(PetscOptionsRealArray("-pc_gamg_threshold", "Relative threshold to use for dropping edges in aggregation graph", "PCGAMGSetThreshold", pc_gamg->threshold, &n, &flag));
1745   if (!flag || n < PETSC_MG_MAXLEVELS) {
1746     if (!flag) n = 1;
1747     i = n;
1748     do {
1749       pc_gamg->threshold[i] = pc_gamg->threshold[i - 1] * pc_gamg->threshold_scale;
1750     } while (++i < PETSC_MG_MAXLEVELS);
1751   }
1752   PetscCall(PetscOptionsInt("-pc_mg_levels", "Set number of MG levels (should get from base class)", "PCGAMGSetNlevels", pc_gamg->Nlevels, &pc_gamg->Nlevels, NULL));
1753   PetscCheck(pc_gamg->Nlevels <= PETSC_MG_MAXLEVELS, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_mg_levels (%d) >= PETSC_MG_MAXLEVELS (%d)", (int)pc_gamg->Nlevels, (int)PETSC_MG_MAXLEVELS);
1754   n = PETSC_MG_MAXLEVELS;
1755   PetscCall(PetscOptionsIntArray("-pc_gamg_rank_reduction_factors", "Manual schedule of coarse grid reduction factors that overrides internal heuristics (0 for first reduction puts one process/device)", "PCGAMGSetRankReductionFactors", pc_gamg->level_reduction_factors, &n, &flag));
1756   if (!flag) i = 0;
1757   else i = n;
1758   do {
1759     pc_gamg->level_reduction_factors[i] = -1;
1760   } while (++i < PETSC_MG_MAXLEVELS);
1761   {
1762     PetscReal eminmax[2] = {0., 0.};
1763     n                    = 2;
1764     PetscCall(PetscOptionsRealArray("-pc_gamg_eigenvalues", "extreme eigenvalues for smoothed aggregation", "PCGAMGSetEigenvalues", eminmax, &n, &flag));
1765     if (flag) {
1766       PetscCheck(n == 2, PetscObjectComm((PetscObject)pc), PETSC_ERR_ARG_INCOMP, "-pc_gamg_eigenvalues: must specify 2 parameters, min and max eigenvalues");
1767       PetscCall(PCGAMGSetEigenvalues(pc, eminmax[1], eminmax[0]));
1768     }
1769   }
1770   pc_gamg->injection_index_size = MAT_COARSEN_STRENGTH_INDEX_SIZE;
1771   PetscCall(PetscOptionsIntArray("-pc_gamg_injection_index", "Array of indices to use to use injection coarse grid space", "PCGAMGSetInjectionIndex", pc_gamg->injection_index, &pc_gamg->injection_index_size, NULL));
1772   /* set options for subtype */
1773   PetscCall((*pc_gamg->ops->setfromoptions)(pc, PetscOptionsObject));
1774 
1775   PetscCall(PCGetOptionsPrefix(pc, &pcpre));
1776   PetscCall(PetscSNPrintf(prefix, sizeof(prefix), "%spc_gamg_", pcpre ? pcpre : ""));
1777   PetscOptionsHeadEnd();
1778   PetscFunctionReturn(PETSC_SUCCESS);
1779 }
1780 
1781 /*MC
1782   PCGAMG - Geometric algebraic multigrid (AMG) preconditioner
1783 
1784   Options Database Keys:
1785 + -pc_gamg_type <type,default=agg> - one of agg, geo, or classical (only smoothed aggregation, agg, supported)
1786 . -pc_gamg_repartition  <bool,default=false> - repartition the degrees of freedom across the coarse grids as they are determined
1787 . -pc_gamg_asm_use_agg <bool,default=false> - use the aggregates from the coasening process to defined the subdomains on each level for the PCASM smoother
1788 . -pc_gamg_process_eq_limit <limit, default=50> - `PCGAMG` will reduce the number of MPI ranks used directly on the coarse grids so that there are around <limit>
1789                                         equations on each process that has degrees of freedom
1790 . -pc_gamg_coarse_eq_limit <limit, default=50> - Set maximum number of equations on coarsest grid to aim for.
1791 . -pc_gamg_reuse_interpolation <bool,default=true> - when rebuilding the algebraic multigrid preconditioner reuse the previously computed interpolations (should always be true)
1792 . -pc_gamg_threshold[] <thresh,default=[-1,...]> - Before aggregating the graph `PCGAMG` will remove small values from the graph on each level (< 0 does no filtering)
1793 - -pc_gamg_threshold_scale <scale,default=1> - Scaling of threshold on each coarser grid if not specified
1794 
1795   Options Database Keys for Aggregation:
1796 + -pc_gamg_agg_nsmooths <nsmooth, default=1> - number of smoothing steps to use with smooth aggregation
1797 . -pc_gamg_aggressive_coarsening <n,default=1> - number of aggressive coarsening (MIS-2) levels from finest.
1798 . -pc_gamg_aggressive_square_graph <bool,default=false> - Use square graph (A'A) or MIS-k (k=2) for aggressive coarsening
1799 . -pc_gamg_mis_k_minimum_degree_ordering <bool,default=true> - Use minimum degree ordering in greedy MIS algorithm
1800 . -pc_gamg_pc_gamg_asm_hem_aggs <n,default=0> - Number of HEM aggregation steps for ASM smoother
1801 - -pc_gamg_aggressive_mis_k <n,default=2> - Number (k) distance in MIS coarsening (>2 is 'aggressive')
1802 
1803   Options Database Keys for Multigrid:
1804 + -pc_mg_cycle_type <v> - v or w, see `PCMGSetCycleType()`
1805 . -pc_mg_distinct_smoothup - configure the up and down (pre and post) smoothers separately, see PCMGSetDistinctSmoothUp()
1806 . -pc_mg_type <multiplicative> - (one of) additive multiplicative full kascade
1807 - -pc_mg_levels <levels> - Number of levels of multigrid to use. GAMG has a heuristic so pc_mg_levels is not usually used with GAMG
1808 
1809   Level: intermediate
1810 
1811   Notes:
1812   To obtain good performance for `PCGAMG` for vector valued problems you must
1813   call `MatSetBlockSize()` to indicate the number of degrees of freedom per grid point
1814   call `MatSetNearNullSpace()` (or `PCSetCoordinates()` if solving the equations of elasticity) to indicate the near null space of the operator
1815 
1816   The many options for `PCMG` also work directly for `PCGAMG` such as controlling the smoothers on each level etc.
1817 
1818   See [the Users Manual section on PCGAMG](sec_amg) and [the Users Manual section on PCMG](sec_mg)for more details.
1819 
1820 .seealso: [](ch_ksp), `PCCreate()`, `PCSetType()`, `MatSetBlockSize()`, `PCMGType`, `PCSetCoordinates()`, `MatSetNearNullSpace()`, `PCGAMGSetType()`, `PCGAMGAGG`, `PCGAMGGEO`, `PCGAMGCLASSICAL`, `PCGAMGSetProcEqLim()`,
1821           `PCGAMGSetCoarseEqLim()`, `PCGAMGSetRepartition()`, `PCGAMGRegister()`, `PCGAMGSetReuseInterpolation()`, `PCGAMGASMSetUseAggs()`, `PCGAMGSetParallelCoarseGridSolve()`, `PCGAMGSetNlevels()`, `PCGAMGSetThreshold()`, `PCGAMGGetType()`, `PCGAMGSetUseSAEstEig()`
1822 M*/
1823 PETSC_EXTERN PetscErrorCode PCCreate_GAMG(PC pc)
1824 {
1825   PC_GAMG *pc_gamg;
1826   PC_MG   *mg;
1827 
1828   PetscFunctionBegin;
1829   /* register AMG type */
1830   PetscCall(PCGAMGInitializePackage());
1831 
1832   /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */
1833   PetscCall(PCSetType(pc, PCMG));
1834   PetscCall(PetscObjectChangeTypeName((PetscObject)pc, PCGAMG));
1835 
1836   /* create a supporting struct and attach it to pc */
1837   PetscCall(PetscNew(&pc_gamg));
1838   PetscCall(PCMGSetGalerkin(pc, PC_MG_GALERKIN_EXTERNAL));
1839   mg           = (PC_MG *)pc->data;
1840   mg->innerctx = pc_gamg;
1841 
1842   PetscCall(PetscNew(&pc_gamg->ops));
1843 
1844   /* these should be in subctx but repartitioning needs simple arrays */
1845   pc_gamg->data_sz = 0;
1846   pc_gamg->data    = NULL;
1847 
1848   /* overwrite the pointers of PCMG by the functions of base class PCGAMG */
1849   pc->ops->setfromoptions = PCSetFromOptions_GAMG;
1850   pc->ops->setup          = PCSetUp_GAMG;
1851   pc->ops->reset          = PCReset_GAMG;
1852   pc->ops->destroy        = PCDestroy_GAMG;
1853   mg->view                = PCView_GAMG;
1854 
1855   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetProcEqLim_C", PCGAMGSetProcEqLim_GAMG));
1856   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseEqLim_C", PCGAMGSetCoarseEqLim_GAMG));
1857   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRepartition_C", PCGAMGSetRepartition_GAMG));
1858   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetEigenvalues_C", PCGAMGSetEigenvalues_GAMG));
1859   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetUseSAEstEig_C", PCGAMGSetUseSAEstEig_GAMG));
1860   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRecomputeEstEig_C", PCGAMGSetRecomputeEstEig_GAMG));
1861   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetReuseInterpolation_C", PCGAMGSetReuseInterpolation_GAMG));
1862   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetUseAggs_C", PCGAMGASMSetUseAggs_GAMG));
1863   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetParallelCoarseGridSolve_C", PCGAMGSetParallelCoarseGridSolve_GAMG));
1864   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCpuPinCoarseGrids_C", PCGAMGSetCpuPinCoarseGrids_GAMG));
1865   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetCoarseGridLayoutType_C", PCGAMGSetCoarseGridLayoutType_GAMG));
1866   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThreshold_C", PCGAMGSetThreshold_GAMG));
1867   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetRankReductionFactors_C", PCGAMGSetRankReductionFactors_GAMG));
1868   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetThresholdScale_C", PCGAMGSetThresholdScale_GAMG));
1869   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetType_C", PCGAMGSetType_GAMG));
1870   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGGetType_C", PCGAMGGetType_GAMG));
1871   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetNlevels_C", PCGAMGSetNlevels_GAMG));
1872   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGASMSetHEM_C", PCGAMGASMSetHEM_GAMG));
1873   PetscCall(PetscObjectComposeFunction((PetscObject)pc, "PCGAMGSetInjectionIndex_C", PCGAMGSetInjectionIndex_GAMG));
1874   pc_gamg->repart                          = PETSC_FALSE;
1875   pc_gamg->reuse_prol                      = PETSC_TRUE;
1876   pc_gamg->use_aggs_in_asm                 = PETSC_FALSE;
1877   pc_gamg->use_parallel_coarse_grid_solver = PETSC_FALSE;
1878   pc_gamg->cpu_pin_coarse_grids            = PETSC_FALSE;
1879   pc_gamg->layout_type                     = PCGAMG_LAYOUT_SPREAD;
1880   pc_gamg->min_eq_proc                     = 50;
1881   pc_gamg->asm_hem_aggs                    = 0;
1882   pc_gamg->coarse_eq_limit                 = 50;
1883   for (int i = 0; i < PETSC_MG_MAXLEVELS; i++) pc_gamg->threshold[i] = -1;
1884   pc_gamg->threshold_scale  = 1.;
1885   pc_gamg->Nlevels          = PETSC_MG_MAXLEVELS;
1886   pc_gamg->current_level    = 0; /* don't need to init really */
1887   pc_gamg->use_sa_esteig    = PETSC_TRUE;
1888   pc_gamg->recompute_esteig = PETSC_TRUE;
1889   pc_gamg->emin             = 0;
1890   pc_gamg->emax             = 0;
1891 
1892   pc_gamg->ops->createlevel = PCGAMGCreateLevel_GAMG;
1893 
1894   /* PCSetUp_GAMG assumes that the type has been set, so set it to the default now */
1895   PetscCall(PCGAMGSetType(pc, PCGAMGAGG));
1896   PetscFunctionReturn(PETSC_SUCCESS);
1897 }
1898 
1899 /*@C
1900   PCGAMGInitializePackage - This function initializes everything in the `PCGAMG` package. It is called
1901   from `PCInitializePackage()`.
1902 
1903   Level: developer
1904 
1905 .seealso: [](ch_ksp), `PetscInitialize()`
1906 @*/
1907 PetscErrorCode PCGAMGInitializePackage(void)
1908 {
1909   PetscInt l;
1910 
1911   PetscFunctionBegin;
1912   if (PCGAMGPackageInitialized) PetscFunctionReturn(PETSC_SUCCESS);
1913   PCGAMGPackageInitialized = PETSC_TRUE;
1914   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGGEO, PCCreateGAMG_GEO));
1915   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGAGG, PCCreateGAMG_AGG));
1916   PetscCall(PetscFunctionListAdd(&GAMGList, PCGAMGCLASSICAL, PCCreateGAMG_Classical));
1917   PetscCall(PetscRegisterFinalize(PCGAMGFinalizePackage));
1918 
1919   /* general events */
1920   PetscCall(PetscLogEventRegister("PCSetUp_GAMG+", PC_CLASSID, &petsc_gamg_setup_events[GAMG_SETUP]));
1921   PetscCall(PetscLogEventRegister(" PCGAMGCreateG", PC_CLASSID, &petsc_gamg_setup_events[GAMG_GRAPH]));
1922   PetscCall(PetscLogEventRegister(" GAMG Coarsen", PC_CLASSID, &petsc_gamg_setup_events[GAMG_COARSEN]));
1923   PetscCall(PetscLogEventRegister("  GAMG MIS/Agg", PC_CLASSID, &petsc_gamg_setup_events[GAMG_MIS]));
1924   PetscCall(PetscLogEventRegister(" PCGAMGProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROL]));
1925   PetscCall(PetscLogEventRegister("  GAMG Prol-col", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLA]));
1926   PetscCall(PetscLogEventRegister("  GAMG Prol-lift", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PROLB]));
1927   PetscCall(PetscLogEventRegister(" PCGAMGOptProl", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPT]));
1928   PetscCall(PetscLogEventRegister("  GAMG smooth", PC_CLASSID, &petsc_gamg_setup_events[GAMG_OPTSM]));
1929   PetscCall(PetscLogEventRegister(" PCGAMGCreateL", PC_CLASSID, &petsc_gamg_setup_events[GAMG_LEVEL]));
1930   PetscCall(PetscLogEventRegister("  GAMG PtAP", PC_CLASSID, &petsc_gamg_setup_events[GAMG_PTAP]));
1931   PetscCall(PetscLogEventRegister("  GAMG Reduce", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REDUCE]));
1932   PetscCall(PetscLogEventRegister("   GAMG Repart", PC_CLASSID, &petsc_gamg_setup_events[GAMG_REPART]));
1933   /* PetscCall(PetscLogEventRegister("   GAMG Inv-Srt", PC_CLASSID, &petsc_gamg_setup_events[SET13])); */
1934   /* PetscCall(PetscLogEventRegister("   GAMG Move A", PC_CLASSID, &petsc_gamg_setup_events[SET14])); */
1935   /* PetscCall(PetscLogEventRegister("   GAMG Move P", PC_CLASSID, &petsc_gamg_setup_events[SET15])); */
1936   for (l = 0; l < PETSC_MG_MAXLEVELS; l++) {
1937     char ename[32];
1938 
1939     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Squ l%02" PetscInt_FMT, l));
1940     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][0]));
1941     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Gal l%02" PetscInt_FMT, l));
1942     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][1]));
1943     PetscCall(PetscSNPrintf(ename, sizeof(ename), "PCGAMG Opt l%02" PetscInt_FMT, l));
1944     PetscCall(PetscLogEventRegister(ename, PC_CLASSID, &petsc_gamg_setup_matmat_events[l][2]));
1945   }
1946 #if defined(GAMG_STAGES)
1947   { /* create timer stages */
1948     char str[32];
1949     PetscCall(PetscSNPrintf(str, PETSC_STATIC_ARRAY_LENGTH(str), "GAMG Level %d", 0));
1950     PetscCall(PetscLogStageRegister(str, &gamg_stages[0]));
1951   }
1952 #endif
1953   PetscFunctionReturn(PETSC_SUCCESS);
1954 }
1955 
1956 /*@C
1957   PCGAMGFinalizePackage - This function frees everything from the `PCGAMG` package. It is
1958   called from `PetscFinalize()` automatically.
1959 
1960   Level: developer
1961 
1962 .seealso: [](ch_ksp), `PetscFinalize()`
1963 @*/
1964 PetscErrorCode PCGAMGFinalizePackage(void)
1965 {
1966   PetscFunctionBegin;
1967   PCGAMGPackageInitialized = PETSC_FALSE;
1968   PetscCall(PetscFunctionListDestroy(&GAMGList));
1969   PetscFunctionReturn(PETSC_SUCCESS);
1970 }
1971 
1972 /*@C
1973   PCGAMGRegister - Register a `PCGAMG` implementation.
1974 
1975   Input Parameters:
1976 + type   - string that will be used as the name of the `PCGAMG` type.
1977 - create - function for creating the gamg context.
1978 
1979   Level: developer
1980 
1981 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
1982 @*/
1983 PetscErrorCode PCGAMGRegister(PCGAMGType type, PetscErrorCode (*create)(PC))
1984 {
1985   PetscFunctionBegin;
1986   PetscCall(PCGAMGInitializePackage());
1987   PetscCall(PetscFunctionListAdd(&GAMGList, type, create));
1988   PetscFunctionReturn(PETSC_SUCCESS);
1989 }
1990 
1991 /*@
1992   PCGAMGCreateGraph - Creates a graph that is used by the ``PCGAMGType`` in the coarsening process
1993 
1994   Input Parameters:
1995 + pc - the `PCGAMG`
1996 - A  - the matrix, for any level
1997 
1998   Output Parameter:
1999 . G - the graph
2000 
2001   Level: advanced
2002 
2003 .seealso: [](ch_ksp), `PCGAMGType`, `PCGAMG`, `PCGAMGSetType()`
2004 @*/
2005 PetscErrorCode PCGAMGCreateGraph(PC pc, Mat A, Mat *G)
2006 {
2007   PC_MG   *mg      = (PC_MG *)pc->data;
2008   PC_GAMG *pc_gamg = (PC_GAMG *)mg->innerctx;
2009 
2010   PetscFunctionBegin;
2011   PetscCall(pc_gamg->ops->creategraph(pc, A, G));
2012   PetscFunctionReturn(PETSC_SUCCESS);
2013 }
2014