xref: /petsc/src/ksp/pc/impls/ml/ml.c (revision 20e868fa2b43a38176a20241390167aa58f219d9)
1 
2 /*
3    Provides an interface to the ML smoothed Aggregation
4    Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs
5                                     Jed Brown, see [PETSC #18321, #18449].
6 */
7 #include <petsc/private/pcimpl.h>   /*I "petscpc.h" I*/
8 #include <petsc/private/pcmgimpl.h>                    /*I "petscksp.h" I*/
9 #include <../src/mat/impls/aij/seq/aij.h>
10 #include <../src/mat/impls/aij/mpi/mpiaij.h>
11 #include <petscdm.h>            /* for DMDestroy(&pc->mg) hack */
12 
13 EXTERN_C_BEGIN
14 /* HAVE_CONFIG_H flag is required by ML include files */
15 #if !defined(HAVE_CONFIG_H)
16 #define HAVE_CONFIG_H
17 #endif
18 #include <ml_include.h>
19 #include <ml_viz_stats.h>
20 EXTERN_C_END
21 
22 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType;
23 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0};
24 
25 /* The context (data structure) at each grid level */
26 typedef struct {
27   Vec x,b,r;                  /* global vectors */
28   Mat A,P,R;
29   KSP ksp;
30   Vec coords;                 /* projected by ML, if PCSetCoordinates is called; values packed by node */
31 } GridCtx;
32 
33 /* The context used to input PETSc matrix into ML at fine grid */
34 typedef struct {
35   Mat         A;       /* Petsc matrix in aij format */
36   Mat         Aloc;    /* local portion of A to be used by ML */
37   Vec         x,y;
38   ML_Operator *mlmat;
39   PetscScalar *pwork;  /* tmp array used by PetscML_comm() */
40 } FineGridCtx;
41 
42 /* The context associates a ML matrix with a PETSc shell matrix */
43 typedef struct {
44   Mat         A;        /* PETSc shell matrix associated with mlmat */
45   ML_Operator *mlmat;   /* ML matrix assorciated with A */
46   Vec         y, work;
47 } Mat_MLShell;
48 
49 /* Private context for the ML preconditioner */
50 typedef struct {
51   ML                *ml_object;
52   ML_Aggregate      *agg_object;
53   GridCtx           *gridctx;
54   FineGridCtx       *PetscMLdata;
55   PetscInt          Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization,MinPerProc,PutOnSingleProc,RepartitionType,ZoltanScheme;
56   PetscReal         Threshold,DampingFactor,EnergyMinimizationDropTol,MaxMinRatio,AuxThreshold;
57   PetscBool         SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable,Repartition,Aux;
58   PetscBool         reuse_interpolation;
59   PCMLNullSpaceType nulltype;
60   PetscMPIInt       size; /* size of communicator for pc->pmat */
61   PetscInt          dim;  /* data from PCSetCoordinates(_ML) */
62   PetscInt          nloc;
63   PetscReal         *coords; /* ML has a grid object for each level: the finest grid will point into coords */
64 } PC_ML;
65 
66 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[])
67 {
68   PetscErrorCode ierr;
69   PetscInt       m,i,j,k=0,row,*aj;
70   PetscScalar    *aa;
71   FineGridCtx    *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data);
72   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)ml->Aloc->data;
73 
74   ierr = MatGetSize(ml->Aloc,&m,NULL); if (ierr) return(0);
75   for (i = 0; i<N_requested_rows; i++) {
76     row            = requested_rows[i];
77     row_lengths[i] = a->ilen[row];
78     if (allocated_space < k+row_lengths[i]) return(0);
79     if ((row >= 0) || (row <= (m-1))) {
80       aj = a->j + a->i[row];
81       aa = a->a + a->i[row];
82       for (j=0; j<row_lengths[i]; j++) {
83         columns[k]  = aj[j];
84         values[k++] = aa[j];
85       }
86     }
87   }
88   return(1);
89 }
90 
91 static PetscErrorCode PetscML_comm(double p[],void *ML_data)
92 {
93   PetscErrorCode    ierr;
94   FineGridCtx       *ml = (FineGridCtx*)ML_data;
95   Mat               A   = ml->A;
96   Mat_MPIAIJ        *a  = (Mat_MPIAIJ*)A->data;
97   PetscMPIInt       size;
98   PetscInt          i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n;
99   const PetscScalar *array;
100 
101   PetscFunctionBegin;
102   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
103   if (size == 1) return 0;
104 
105   ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr);
106   ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
107   ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
108   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
109   ierr = VecGetArrayRead(a->lvec,&array);CHKERRQ(ierr);
110   for (i=in_length; i<out_length; i++) p[i] = array[i-in_length];
111   ierr = VecRestoreArrayRead(a->lvec,&array);CHKERRQ(ierr);
112   PetscFunctionReturn(0);
113 }
114 
115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[])
116 {
117   PetscErrorCode ierr;
118   FineGridCtx    *ml = (FineGridCtx*)ML_Get_MyMatvecData(ML_data);
119   Mat            A   = ml->A, Aloc=ml->Aloc;
120   PetscMPIInt    size;
121   PetscScalar    *pwork=ml->pwork;
122   PetscInt       i;
123 
124   PetscFunctionBegin;
125   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
126   if (size == 1) {
127     ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr);
128   } else {
129     for (i=0; i<in_length; i++) pwork[i] = p[i];
130     ierr = PetscML_comm(pwork,ml);CHKERRQ(ierr);
131     ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr);
132   }
133   ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr);
134   ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr);
135   ierr = VecResetArray(ml->x);CHKERRQ(ierr);
136   ierr = VecResetArray(ml->y);CHKERRQ(ierr);
137   PetscFunctionReturn(0);
138 }
139 
140 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y)
141 {
142   PetscErrorCode    ierr;
143   Mat_MLShell       *shell;
144   PetscScalar       *yarray;
145   const PetscScalar *xarray;
146   PetscInt          x_length,y_length;
147 
148   PetscFunctionBegin;
149   ierr     = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
150   ierr     = VecGetArrayRead(x,&xarray);CHKERRQ(ierr);
151   ierr     = VecGetArray(y,&yarray);CHKERRQ(ierr);
152   x_length = shell->mlmat->invec_leng;
153   y_length = shell->mlmat->outvec_leng;
154   PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat,x_length,(PetscScalar*)xarray,y_length,yarray));
155   ierr = VecRestoreArrayRead(x,&xarray);CHKERRQ(ierr);
156   ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr);
157   PetscFunctionReturn(0);
158 }
159 
160 /* Computes y = w + A * x
161    It is possible that w == y, but not x == y
162 */
163 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y)
164 {
165   Mat_MLShell       *shell;
166   PetscScalar       *yarray;
167   const PetscScalar *xarray;
168   PetscInt          x_length,y_length;
169   PetscErrorCode    ierr;
170 
171   PetscFunctionBegin;
172   ierr = MatShellGetContext(A, (void**) &shell);CHKERRQ(ierr);
173   if (y == w) {
174     if (!shell->work) {
175       ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr);
176     }
177     ierr     = VecGetArrayRead(x,           &xarray);CHKERRQ(ierr);
178     ierr     = VecGetArray(shell->work, &yarray);CHKERRQ(ierr);
179     x_length = shell->mlmat->invec_leng;
180     y_length = shell->mlmat->outvec_leng;
181     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar*)xarray, y_length, yarray));
182     ierr = VecRestoreArrayRead(x,           &xarray);CHKERRQ(ierr);
183     ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr);
184     ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr);
185   } else {
186     ierr     = VecGetArrayRead(x, &xarray);CHKERRQ(ierr);
187     ierr     = VecGetArray(y, &yarray);CHKERRQ(ierr);
188     x_length = shell->mlmat->invec_leng;
189     y_length = shell->mlmat->outvec_leng;
190     PetscStackCall("ML_Operator_Apply",ML_Operator_Apply(shell->mlmat, x_length, (PetscScalar *)xarray, y_length, yarray));
191     ierr = VecRestoreArrayRead(x, &xarray);CHKERRQ(ierr);
192     ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr);
193     ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr);
194   }
195   PetscFunctionReturn(0);
196 }
197 
198 /* newtype is ignored since only handles one case */
199 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc)
200 {
201   PetscErrorCode ierr;
202   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
203   Mat_SeqAIJ     *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
204   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
205   PetscScalar    *aa=a->a,*ba=b->a,*ca;
206   PetscInt       am =A->rmap->n,an=A->cmap->n,i,j,k;
207   PetscInt       *ci,*cj,ncols;
208 
209   PetscFunctionBegin;
210   if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an);
211 
212   if (scall == MAT_INITIAL_MATRIX) {
213     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
214     ci[0] = 0;
215     for (i=0; i<am; i++) ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
216     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
217     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
218 
219     k = 0;
220     for (i=0; i<am; i++) {
221       /* diagonal portion of A */
222       ncols = ai[i+1] - ai[i];
223       for (j=0; j<ncols; j++) {
224         cj[k]   = *aj++;
225         ca[k++] = *aa++;
226       }
227       /* off-diagonal portion of A */
228       ncols = bi[i+1] - bi[i];
229       for (j=0; j<ncols; j++) {
230         cj[k]   = an + (*bj); bj++;
231         ca[k++] = *ba++;
232       }
233     }
234     if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]);
235 
236     /* put together the new matrix */
237     an   = mpimat->A->cmap->n+mpimat->B->cmap->n;
238     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr);
239 
240     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
241     /* Since these are PETSc arrays, change flags to free them as necessary. */
242     mat          = (Mat_SeqAIJ*)(*Aloc)->data;
243     mat->free_a  = PETSC_TRUE;
244     mat->free_ij = PETSC_TRUE;
245 
246     mat->nonew = 0;
247   } else if (scall == MAT_REUSE_MATRIX) {
248     mat=(Mat_SeqAIJ*)(*Aloc)->data;
249     ci = mat->i; cj = mat->j; ca = mat->a;
250     for (i=0; i<am; i++) {
251       /* diagonal portion of A */
252       ncols = ai[i+1] - ai[i];
253       for (j=0; j<ncols; j++) *ca++ = *aa++;
254       /* off-diagonal portion of A */
255       ncols = bi[i+1] - bi[i];
256       for (j=0; j<ncols; j++) *ca++ = *ba++;
257     }
258   } else SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
259   PetscFunctionReturn(0);
260 }
261 
262 static PetscErrorCode MatDestroy_ML(Mat A)
263 {
264   PetscErrorCode ierr;
265   Mat_MLShell    *shell;
266 
267   PetscFunctionBegin;
268   ierr = MatShellGetContext(A,(void**)&shell);CHKERRQ(ierr);
269   ierr = VecDestroy(&shell->y);CHKERRQ(ierr);
270   if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);}
271   ierr = PetscFree(shell);CHKERRQ(ierr);
272   PetscFunctionReturn(0);
273 }
274 
275 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
276 {
277   struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata*)mlmat->data;
278   PetscErrorCode        ierr;
279   PetscInt              m       =mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = NULL,nz_max;
280   PetscInt              *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i;
281   PetscScalar           *ml_vals=matdata->values,*aa;
282 
283   PetscFunctionBegin;
284   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
285   if (m != n) { /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */
286     if (reuse) {
287       Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data;
288       aij->i = ml_rowptr;
289       aij->j = ml_cols;
290       aij->a = ml_vals;
291     } else {
292       /* sort ml_cols and ml_vals */
293       ierr = PetscMalloc1(m+1,&nnz);
294       for (i=0; i<m; i++) nnz[i] = ml_rowptr[i+1] - ml_rowptr[i];
295       aj = ml_cols; aa = ml_vals;
296       for (i=0; i<m; i++) {
297         ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr);
298         aj  += nnz[i]; aa += nnz[i];
299       }
300       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr);
301       ierr = PetscFree(nnz);CHKERRQ(ierr);
302     }
303     PetscFunctionReturn(0);
304   }
305 
306   nz_max = PetscMax(1,mlmat->max_nz_per_row);
307   ierr   = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
308   if (!reuse) {
309     ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr);
310     ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
311     ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr);
312     /* keep track of block size for A matrices */
313     ierr = MatSetBlockSize (*newmat, mlmat->num_PDEs);CHKERRQ(ierr);
314 
315     ierr = PetscMalloc1(m,&nnz);CHKERRQ(ierr);
316     for (i=0; i<m; i++) {
317       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
318     }
319     ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr);
320   }
321   for (i=0; i<m; i++) {
322     PetscInt ncols;
323 
324     PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
325     ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
326   }
327   ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
328   ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
329 
330   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
331   ierr = PetscFree(nnz);CHKERRQ(ierr);
332   PetscFunctionReturn(0);
333 }
334 
335 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
336 {
337   PetscErrorCode ierr;
338   PetscInt       m,n;
339   ML_Comm        *MLcomm;
340   Mat_MLShell    *shellctx;
341 
342   PetscFunctionBegin;
343   m = mlmat->outvec_leng;
344   n = mlmat->invec_leng;
345 
346   if (reuse) {
347     ierr            = MatShellGetContext(*newmat,(void**)&shellctx);CHKERRQ(ierr);
348     shellctx->mlmat = mlmat;
349     PetscFunctionReturn(0);
350   }
351 
352   MLcomm = mlmat->comm;
353 
354   ierr = PetscNew(&shellctx);CHKERRQ(ierr);
355   ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr);
356   ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr);
357   ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr);
358   ierr = MatShellSetOperation(*newmat,MATOP_DESTROY,(void(*)(void))MatDestroy_ML);CHKERRQ(ierr);
359 
360   shellctx->A         = *newmat;
361   shellctx->mlmat     = mlmat;
362   shellctx->work      = NULL;
363 
364   ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr);
365   ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr);
366   ierr = VecSetType(shellctx->y,VECSTANDARD);CHKERRQ(ierr);
367   PetscFunctionReturn(0);
368 }
369 
370 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat)
371 {
372   PetscInt       *aj;
373   PetscScalar    *aa;
374   PetscErrorCode ierr;
375   PetscInt       i,j,*gordering;
376   PetscInt       m=mlmat->outvec_leng,n,nz_max,row;
377   Mat            A;
378 
379   PetscFunctionBegin;
380   if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL");
381   n = mlmat->invec_leng;
382   if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n);
383 
384   /* create global row numbering for a ML_Operator */
385   PetscStackCall("ML_build_global_numbering",ML_build_global_numbering(mlmat,&gordering,"rows"));
386 
387   nz_max = PetscMax(1,mlmat->max_nz_per_row) + 1;
388   ierr = PetscMalloc2(nz_max,&aa,nz_max,&aj);CHKERRQ(ierr);
389   if (reuse) {
390     A = *newmat;
391   } else {
392     PetscInt *nnzA,*nnzB,*nnz;
393     PetscInt rstart;
394     ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr);
395     ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
396     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
397     /* keep track of block size for A matrices */
398     ierr = MatSetBlockSize (A,mlmat->num_PDEs);CHKERRQ(ierr);
399     ierr = PetscMalloc3(m,&nnzA,m,&nnzB,m,&nnz);CHKERRQ(ierr);
400     ierr = MPI_Scan(&m,&rstart,1,MPIU_INT,MPI_SUM,mlmat->comm->USR_comm);CHKERRQ(ierr);
401     rstart -= m;
402 
403     for (i=0; i<m; i++) {
404       row = gordering[i] - rstart;
405       PetscStackCall("ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&nnz[i]));
406       nnzA[row] = 0;
407       for (j=0; j<nnz[i]; j++) {
408         if (aj[j] < m) nnzA[row]++;
409       }
410       nnzB[row] = nnz[i] - nnzA[row];
411     }
412     ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr);
413     ierr = PetscFree3(nnzA,nnzB,nnz);
414   }
415   for (i=0; i<m; i++) {
416     PetscInt ncols;
417     row = gordering[i];
418 
419     PetscStackCall(",ML_Operator_Getrow",ML_Operator_Getrow(mlmat,1,&i,nz_max,aj,aa,&ncols));
420     for (j = 0; j < ncols; j++) aj[j] = gordering[aj[j]];
421     ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr);
422   }
423   PetscStackCall("ML_free",ML_free(gordering));
424   ierr    = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
425   ierr    = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
426   *newmat = A;
427 
428   ierr = PetscFree2(aa,aj);CHKERRQ(ierr);
429   PetscFunctionReturn(0);
430 }
431 
432 /* -------------------------------------------------------------------------- */
433 /*
434    PCSetCoordinates_ML
435 
436    Input Parameter:
437    .  pc - the preconditioner context
438 */
439 static PetscErrorCode PCSetCoordinates_ML(PC pc, PetscInt ndm, PetscInt a_nloc, PetscReal *coords)
440 {
441   PC_MG          *mg    = (PC_MG*)pc->data;
442   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
443   PetscErrorCode ierr;
444   PetscInt       arrsz,oldarrsz,bs,my0,kk,ii,nloc,Iend,aloc;
445   Mat            Amat = pc->pmat;
446 
447   /* this function copied and modified from PCSetCoordinates_GEO -TGI */
448   PetscFunctionBegin;
449   PetscValidHeaderSpecific(Amat, MAT_CLASSID, 1);
450   ierr = MatGetBlockSize(Amat, &bs);CHKERRQ(ierr);
451 
452   ierr = MatGetOwnershipRange(Amat, &my0, &Iend);CHKERRQ(ierr);
453   aloc = (Iend-my0);
454   nloc = (Iend-my0)/bs;
455 
456   if (nloc!=a_nloc && aloc != a_nloc) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Number of local blocks %D must be %D or %D.",a_nloc,nloc,aloc);
457 
458   oldarrsz    = pc_ml->dim * pc_ml->nloc;
459   pc_ml->dim  = ndm;
460   pc_ml->nloc = nloc;
461   arrsz       = ndm * nloc;
462 
463   /* create data - syntactic sugar that should be refactored at some point */
464   if (pc_ml->coords==0 || (oldarrsz != arrsz)) {
465     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
466     ierr = PetscMalloc1(arrsz, &pc_ml->coords);CHKERRQ(ierr);
467   }
468   for (kk=0; kk<arrsz; kk++) pc_ml->coords[kk] = -999.;
469   /* copy data in - column oriented */
470   if (nloc == a_nloc) {
471     for (kk = 0; kk < nloc; kk++) {
472       for (ii = 0; ii < ndm; ii++) {
473         pc_ml->coords[ii*nloc + kk] =  coords[kk*ndm + ii];
474       }
475     }
476   } else { /* assumes the coordinates are blocked */
477     for (kk = 0; kk < nloc; kk++) {
478       for (ii = 0; ii < ndm; ii++) {
479         pc_ml->coords[ii*nloc + kk] =  coords[bs*kk*ndm + ii];
480       }
481     }
482   }
483   PetscFunctionReturn(0);
484 }
485 
486 /* -----------------------------------------------------------------------------*/
487 extern PetscErrorCode PCReset_MG(PC);
488 PetscErrorCode PCReset_ML(PC pc)
489 {
490   PetscErrorCode ierr;
491   PC_MG          *mg    = (PC_MG*)pc->data;
492   PC_ML          *pc_ml = (PC_ML*)mg->innerctx;
493   PetscInt       level,fine_level=pc_ml->Nlevels-1,dim=pc_ml->dim;
494 
495   PetscFunctionBegin;
496   if (dim) {
497     ML_Aggregate_Viz_Stats * grid_info = (ML_Aggregate_Viz_Stats*) pc_ml->ml_object->Grid[0].Grid;
498 
499     for (level=0; level<=fine_level; level++) {
500       ierr = VecDestroy(&pc_ml->gridctx[level].coords);CHKERRQ(ierr);
501     }
502 
503     grid_info->x = 0; /* do this so ML doesn't try to free coordinates */
504     grid_info->y = 0;
505     grid_info->z = 0;
506 
507     PetscStackCall("ML_Operator_Getrow",ML_Aggregate_VizAndStats_Clean(pc_ml->ml_object));
508   }
509   PetscStackCall("ML_Aggregate_Destroy",ML_Aggregate_Destroy(&pc_ml->agg_object));
510   PetscStackCall("ML_Aggregate_Destroy",ML_Destroy(&pc_ml->ml_object));
511 
512   if (pc_ml->PetscMLdata) {
513     ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr);
514     ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);
515     ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr);
516     ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr);
517   }
518   ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr);
519 
520   if (pc_ml->gridctx) {
521     for (level=0; level<fine_level; level++) {
522       if (pc_ml->gridctx[level].A) {ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);}
523       if (pc_ml->gridctx[level].P) {ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);}
524       if (pc_ml->gridctx[level].R) {ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);}
525       if (pc_ml->gridctx[level].x) {ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);}
526       if (pc_ml->gridctx[level].b) {ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);}
527       if (pc_ml->gridctx[level+1].r) {ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);}
528     }
529   }
530   ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr);
531   ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
532 
533   pc_ml->dim  = 0;
534   pc_ml->nloc = 0;
535   ierr = PCReset_MG(pc);CHKERRQ(ierr);
536   PetscFunctionReturn(0);
537 }
538 /* -------------------------------------------------------------------------- */
539 /*
540    PCSetUp_ML - Prepares for the use of the ML preconditioner
541                     by setting data structures and options.
542 
543    Input Parameter:
544 .  pc - the preconditioner context
545 
546    Application Interface Routine: PCSetUp()
547 
548    Notes:
549    The interface routine PCSetUp() is not usually called directly by
550    the user, but instead is called by PCApply() if necessary.
551 */
552 extern PetscErrorCode PCSetFromOptions_MG(PetscOptionItems *PetscOptionsObject,PC);
553 extern PetscErrorCode PCReset_MG(PC);
554 
555 PetscErrorCode PCSetUp_ML(PC pc)
556 {
557   PetscErrorCode   ierr;
558   PetscMPIInt      size;
559   FineGridCtx      *PetscMLdata;
560   ML               *ml_object;
561   ML_Aggregate     *agg_object;
562   ML_Operator      *mlmat;
563   PetscInt         nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
564   Mat              A,Aloc;
565   GridCtx          *gridctx;
566   PC_MG            *mg    = (PC_MG*)pc->data;
567   PC_ML            *pc_ml = (PC_ML*)mg->innerctx;
568   PetscBool        isSeq, isMPI;
569   KSP              smoother;
570   PC               subpc;
571   PetscInt         mesh_level, old_mesh_level;
572   MatInfo          info;
573   static PetscBool cite = PETSC_FALSE;
574 
575   PetscFunctionBegin;
576   ierr = PetscCitationsRegister("@TechReport{ml_users_guide,\n  author = {M. Sala and J.J. Hu and R.S. Tuminaro},\n  title = {{ML}3.1 {S}moothed {A}ggregation {U}ser's {G}uide},\n  institution =  {Sandia National Laboratories},\n  number = {SAND2004-4821},\n  year = 2004\n}\n",&cite);CHKERRQ(ierr);
577   A    = pc->pmat;
578   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
579 
580   if (pc->setupcalled) {
581     if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
582       /*
583        Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
584        multiple solves in which the matrix is not changing too quickly.
585        */
586       ml_object             = pc_ml->ml_object;
587       gridctx               = pc_ml->gridctx;
588       Nlevels               = pc_ml->Nlevels;
589       fine_level            = Nlevels - 1;
590       gridctx[fine_level].A = A;
591 
592       ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
593       ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
594       if (isMPI) {
595         ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
596       } else if (isSeq) {
597         Aloc = A;
598         ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
599       } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
600 
601       ierr              = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
602       PetscMLdata       = pc_ml->PetscMLdata;
603       ierr              = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
604       PetscMLdata->A    = A;
605       PetscMLdata->Aloc = Aloc;
606       PetscStackCall("ML_Aggregate_Destroy",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
607       PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
608 
609       mesh_level = ml_object->ML_finest_level;
610       while (ml_object->SingleLevel[mesh_level].Rmat->to) {
611         old_mesh_level = mesh_level;
612         mesh_level     = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;
613 
614         /* clean and regenerate A */
615         mlmat = &(ml_object->Amat[mesh_level]);
616         PetscStackCall("ML_Operator_Clean",ML_Operator_Clean(mlmat));
617         PetscStackCall("ML_Operator_Init",ML_Operator_Init(mlmat,ml_object->comm));
618         PetscStackCall("ML_Gen_AmatrixRAP",ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level));
619       }
620 
621       level = fine_level - 1;
622       if (size == 1) { /* convert ML P, R and A into seqaij format */
623         for (mllevel=1; mllevel<Nlevels; mllevel++) {
624           mlmat = &(ml_object->Amat[mllevel]);
625           ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
626           level--;
627         }
628       } else { /* convert ML P and R into shell format, ML A into mpiaij format */
629         for (mllevel=1; mllevel<Nlevels; mllevel++) {
630           mlmat  = &(ml_object->Amat[mllevel]);
631           ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
632           level--;
633         }
634       }
635 
636       for (level=0; level<fine_level; level++) {
637         if (level > 0) {
638           ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
639         }
640         ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
641       }
642       ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
643       ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
644 
645       ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
646       PetscFunctionReturn(0);
647     } else {
648       /* since ML can change the size of vectors/matrices at any level we must destroy everything */
649       ierr = PCReset_ML(pc);CHKERRQ(ierr);
650     }
651   }
652 
653   /* setup special features of PCML */
654   /*--------------------------------*/
655   /* covert A to Aloc to be used by ML at fine grid */
656   pc_ml->size = size;
657   ierr        = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
658   ierr        = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
659   if (isMPI) {
660     ierr = MatConvert_MPIAIJ_ML(A,NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
661   } else if (isSeq) {
662     Aloc = A;
663     ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
664   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);
665 
666   /* create and initialize struct 'PetscMLdata' */
667   ierr               = PetscNewLog(pc,&PetscMLdata);CHKERRQ(ierr);
668   pc_ml->PetscMLdata = PetscMLdata;
669   ierr               = PetscMalloc1(Aloc->cmap->n+1,&PetscMLdata->pwork);CHKERRQ(ierr);
670 
671   ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);
672   ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
673   ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr);
674 
675   ierr              = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr);
676   ierr              = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
677   ierr              = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
678   PetscMLdata->A    = A;
679   PetscMLdata->Aloc = Aloc;
680   if (pc_ml->dim) { /* create vecs around the coordinate data given */
681     PetscInt  i,j,dim=pc_ml->dim;
682     PetscInt  nloc = pc_ml->nloc,nlocghost;
683     PetscReal *ghostedcoords;
684 
685     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
686     nlocghost = Aloc->cmap->n / bs;
687     ierr      = PetscMalloc1(dim*nlocghost,&ghostedcoords);CHKERRQ(ierr);
688     for (i = 0; i < dim; i++) {
689       /* copy coordinate values into first component of pwork */
690       for (j = 0; j < nloc; j++) {
691         PetscMLdata->pwork[bs * j] = pc_ml->coords[nloc * i + j];
692       }
693       /* get the ghost values */
694       ierr = PetscML_comm(PetscMLdata->pwork,PetscMLdata);CHKERRQ(ierr);
695       /* write into the vector */
696       for (j = 0; j < nlocghost; j++) {
697         ghostedcoords[i * nlocghost + j] = PetscMLdata->pwork[bs * j];
698       }
699     }
700     /* replace the original coords with the ghosted coords, because these are
701      * what ML needs */
702     ierr = PetscFree(pc_ml->coords);CHKERRQ(ierr);
703     pc_ml->coords = ghostedcoords;
704   }
705 
706   /* create ML discretization matrix at fine grid */
707   /* ML requires input of fine-grid matrix. It determines nlevels. */
708   ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
709   ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
710   PetscStackCall("ML_Create",ML_Create(&ml_object,pc_ml->MaxNlevels));
711   PetscStackCall("ML_Comm_Set_UsrComm",ML_Comm_Set_UsrComm(ml_object->comm,PetscObjectComm((PetscObject)A)));
712   pc_ml->ml_object = ml_object;
713   PetscStackCall("ML_Init_Amatrix",ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata));
714   PetscStackCall("ML_Set_Amatrix_Getrow",ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols));
715   PetscStackCall("ML_Set_Amatrix_Matvec",ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec));
716 
717   PetscStackCall("ML_Set_Symmetrize",ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO));
718 
719   /* aggregation */
720   PetscStackCall("ML_Aggregate_Create",ML_Aggregate_Create(&agg_object));
721   pc_ml->agg_object = agg_object;
722 
723   {
724     MatNullSpace mnull;
725     ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
726     if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
727       if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
728       else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
729       else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
730     }
731     switch (pc_ml->nulltype) {
732     case PCML_NULLSPACE_USER: {
733       PetscScalar       *nullvec;
734       const PetscScalar *v;
735       PetscBool         has_const;
736       PetscInt          i,j,mlocal,nvec,M;
737       const Vec         *vecs;
738 
739       if (!mnull) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
740       ierr = MatGetSize(A,&M,NULL);CHKERRQ(ierr);
741       ierr = MatGetLocalSize(Aloc,&mlocal,NULL);CHKERRQ(ierr);
742       ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
743       ierr = PetscMalloc1((nvec+!!has_const)*mlocal,&nullvec);CHKERRQ(ierr);
744       if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
745       for (i=0; i<nvec; i++) {
746         ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
747         for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
748         ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
749       }
750       PetscStackCall("ML_Aggregate_Create",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr));
751       ierr = PetscFree(nullvec);CHKERRQ(ierr);
752     } break;
753     case PCML_NULLSPACE_BLOCK:
754       PetscStackCall("ML_Aggregate_Set_NullSpace",ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr));
755       break;
756     case PCML_NULLSPACE_SCALAR:
757       break;
758     default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP,"Unknown null space type");
759     }
760   }
761   PetscStackCall("ML_Aggregate_Set_MaxCoarseSize",ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize));
762   /* set options */
763   switch (pc_ml->CoarsenScheme) {
764   case 1:
765     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_Coupled",ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object));break;
766   case 2:
767     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_MIS",ML_Aggregate_Set_CoarsenScheme_MIS(agg_object));break;
768   case 3:
769     PetscStackCall("ML_Aggregate_Set_CoarsenScheme_METIS",ML_Aggregate_Set_CoarsenScheme_METIS(agg_object));break;
770   }
771   PetscStackCall("ML_Aggregate_Set_Threshold",ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold));
772   PetscStackCall("ML_Aggregate_Set_DampingFactor",ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor));
773   if (pc_ml->SpectralNormScheme_Anorm) {
774     PetscStackCall("ML_Set_SpectralNormScheme_Anorm",ML_Set_SpectralNormScheme_Anorm(ml_object));
775   }
776   agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
777   agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
778   agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
779   agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
780   agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
781   agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;
782 
783   if (pc_ml->Aux) {
784     if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"Auxiliary matrix requires coordinates");
785     ml_object->Amat[0].aux_data->threshold = pc_ml->AuxThreshold;
786     ml_object->Amat[0].aux_data->enable    = 1;
787     ml_object->Amat[0].aux_data->max_level = 10;
788     ml_object->Amat[0].num_PDEs            = bs;
789   }
790 
791   ierr = MatGetInfo(A,MAT_LOCAL,&info);CHKERRQ(ierr);
792   ml_object->Amat[0].N_nonzeros = (int) info.nz_used;
793 
794   if (pc_ml->dim) {
795     PetscInt               i,dim = pc_ml->dim;
796     ML_Aggregate_Viz_Stats *grid_info;
797     PetscInt               nlocghost;
798 
799     ierr      = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
800     nlocghost = Aloc->cmap->n / bs;
801 
802     PetscStackCall("ML_Aggregate_VizAndStats_Setup(",ML_Aggregate_VizAndStats_Setup(ml_object)); /* create ml info for coords */
803     grid_info = (ML_Aggregate_Viz_Stats*) ml_object->Grid[0].Grid;
804     for (i = 0; i < dim; i++) {
805       /* set the finest level coordinates to point to the column-order array
806        * in pc_ml */
807       /* NOTE: must point away before VizAndStats_Clean so ML doesn't free */
808       switch (i) {
809       case 0: grid_info->x = pc_ml->coords + nlocghost * i; break;
810       case 1: grid_info->y = pc_ml->coords + nlocghost * i; break;
811       case 2: grid_info->z = pc_ml->coords + nlocghost * i; break;
812       default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
813       }
814     }
815     grid_info->Ndim = dim;
816   }
817 
818   /* repartitioning */
819   if (pc_ml->Repartition) {
820     PetscStackCall("ML_Repartition_Activate",ML_Repartition_Activate(ml_object));
821     PetscStackCall("ML_Repartition_Set_LargestMinMaxRatio",ML_Repartition_Set_LargestMinMaxRatio(ml_object,pc_ml->MaxMinRatio));
822     PetscStackCall("ML_Repartition_Set_MinPerProc",ML_Repartition_Set_MinPerProc(ml_object,pc_ml->MinPerProc));
823     PetscStackCall("ML_Repartition_Set_PutOnSingleProc",ML_Repartition_Set_PutOnSingleProc(ml_object,pc_ml->PutOnSingleProc));
824 #if 0                           /* Function not yet defined in ml-6.2 */
825     /* I'm not sure what compatibility issues might crop up if we partitioned
826      * on the finest level, so to be safe repartition starts on the next
827      * finest level (reflection default behavior in
828      * ml_MultiLevelPreconditioner) */
829     PetscStackCall("ML_Repartition_Set_StartLevel",ML_Repartition_Set_StartLevel(ml_object,1));
830 #endif
831 
832     if (!pc_ml->RepartitionType) {
833       PetscInt i;
834 
835       if (!pc_ml->dim) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_USER,"ML Zoltan repartitioning requires coordinates");
836       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEZOLTAN));
837       PetscStackCall("ML_Aggregate_Set_Dimensions",ML_Aggregate_Set_Dimensions(agg_object, pc_ml->dim));
838 
839       for (i = 0; i < ml_object->ML_num_levels; i++) {
840         ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Grid[i].Grid;
841         grid_info->zoltan_type = pc_ml->ZoltanScheme + 1; /* ml numbers options 1, 2, 3 */
842         /* defaults from ml_agg_info.c */
843         grid_info->zoltan_estimated_its = 40; /* only relevant to hypergraph / fast hypergraph */
844         grid_info->zoltan_timers        = 0;
845         grid_info->smoothing_steps      = 4;  /* only relevant to hypergraph / fast hypergraph */
846       }
847     } else {
848       PetscStackCall("ML_Repartition_Set_Partitioner",ML_Repartition_Set_Partitioner(ml_object,ML_USEPARMETIS));
849     }
850   }
851 
852   if (pc_ml->OldHierarchy) {
853     PetscStackCall("ML_Gen_MGHierarchy_UsingAggregation",Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
854   } else {
855     PetscStackCall("ML_Gen_MultiLevelHierarchy_UsingAggregation",Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object));
856   }
857   if (Nlevels<=0) SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
858   pc_ml->Nlevels = Nlevels;
859   fine_level     = Nlevels - 1;
860 
861   ierr = PCMGSetLevels(pc,Nlevels,NULL);CHKERRQ(ierr);
862   /* set default smoothers */
863   for (level=1; level<=fine_level; level++) {
864     ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
865     ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
866     ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
867     ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
868   }
869   ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
870   ierr = PCSetFromOptions_MG(PetscOptionsObject,pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
871   ierr = PetscOptionsEnd();CHKERRQ(ierr);
872 
873   ierr = PetscMalloc1(Nlevels,&gridctx);CHKERRQ(ierr);
874 
875   pc_ml->gridctx = gridctx;
876 
877   /* wrap ML matrices by PETSc shell matrices at coarsened grids.
878      Level 0 is the finest grid for ML, but coarsest for PETSc! */
879   gridctx[fine_level].A = A;
880 
881   level = fine_level - 1;
882   if (size == 1) { /* convert ML P, R and A into seqaij format */
883     for (mllevel=1; mllevel<Nlevels; mllevel++) {
884       mlmat = &(ml_object->Pmat[mllevel]);
885       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
886       mlmat = &(ml_object->Rmat[mllevel-1]);
887       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
888 
889       mlmat = &(ml_object->Amat[mllevel]);
890       ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
891       level--;
892     }
893   } else { /* convert ML P and R into shell format, ML A into mpiaij format */
894     for (mllevel=1; mllevel<Nlevels; mllevel++) {
895       mlmat  = &(ml_object->Pmat[mllevel]);
896       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
897       mlmat  = &(ml_object->Rmat[mllevel-1]);
898       ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
899 
900       mlmat  = &(ml_object->Amat[mllevel]);
901       ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
902       level--;
903     }
904   }
905 
906   /* create vectors and ksp at all levels */
907   for (level=0; level<fine_level; level++) {
908     level1 = level + 1;
909     ierr   = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr);
910     ierr   = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
911     ierr   = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr);
912     ierr   = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr);
913 
914     ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr);
915     ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
916     ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr);
917     ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr);
918 
919     ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr);
920     ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
921     ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr);
922     ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);
923 
924     if (level == 0) {
925       ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
926     } else {
927       ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
928     }
929   }
930   ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);
931 
932   /* create coarse level and the interpolation between the levels */
933   for (level=0; level<fine_level; level++) {
934     level1 = level + 1;
935     ierr   = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
936     ierr   = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);
937     if (level > 0) {
938       ierr = PCMGSetResidual(pc,level,PCMGResidualDefault,gridctx[level].A);CHKERRQ(ierr);
939     }
940     ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A);CHKERRQ(ierr);
941   }
942   ierr = PCMGSetResidual(pc,fine_level,PCMGResidualDefault,gridctx[fine_level].A);CHKERRQ(ierr);
943   ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A);CHKERRQ(ierr);
944 
945   /* put coordinate info in levels */
946   if (pc_ml->dim) {
947     PetscInt  i,j,dim = pc_ml->dim;
948     PetscInt  bs, nloc;
949     PC        subpc;
950     PetscReal *array;
951 
952     level = fine_level;
953     for (mllevel = 0; mllevel < Nlevels; mllevel++) {
954       ML_Aggregate_Viz_Stats *grid_info = (ML_Aggregate_Viz_Stats*)ml_object->Amat[mllevel].to->Grid->Grid;
955       MPI_Comm               comm       = ((PetscObject)gridctx[level].A)->comm;
956 
957       ierr  = MatGetBlockSize (gridctx[level].A, &bs);CHKERRQ(ierr);
958       ierr  = MatGetLocalSize (gridctx[level].A, NULL, &nloc);CHKERRQ(ierr);
959       nloc /= bs; /* number of local nodes */
960 
961       ierr = VecCreate(comm,&gridctx[level].coords);CHKERRQ(ierr);
962       ierr = VecSetSizes(gridctx[level].coords,dim * nloc,PETSC_DECIDE);CHKERRQ(ierr);
963       ierr = VecSetType(gridctx[level].coords,VECMPI);CHKERRQ(ierr);
964       ierr = VecGetArray(gridctx[level].coords,&array);CHKERRQ(ierr);
965       for (j = 0; j < nloc; j++) {
966         for (i = 0; i < dim; i++) {
967           switch (i) {
968           case 0: array[dim * j + i] = grid_info->x[j]; break;
969           case 1: array[dim * j + i] = grid_info->y[j]; break;
970           case 2: array[dim * j + i] = grid_info->z[j]; break;
971           default: SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_SIZ,"PCML coordinate dimension must be <= 3");
972           }
973         }
974       }
975 
976       /* passing coordinates to smoothers/coarse solver, should they need them */
977       ierr = KSPGetPC(gridctx[level].ksp,&subpc);CHKERRQ(ierr);
978       ierr = PCSetCoordinates(subpc,dim,nloc,array);CHKERRQ(ierr);
979       ierr = VecRestoreArray(gridctx[level].coords,&array);CHKERRQ(ierr);
980       level--;
981     }
982   }
983 
984   /* setupcalled is set to 0 so that MG is setup from scratch */
985   pc->setupcalled = 0;
986   ierr            = PCSetUp_MG(pc);CHKERRQ(ierr);
987   PetscFunctionReturn(0);
988 }
989 
990 /* -------------------------------------------------------------------------- */
991 /*
992    PCDestroy_ML - Destroys the private context for the ML preconditioner
993    that was created with PCCreate_ML().
994 
995    Input Parameter:
996 .  pc - the preconditioner context
997 
998    Application Interface Routine: PCDestroy()
999 */
1000 PetscErrorCode PCDestroy_ML(PC pc)
1001 {
1002   PetscErrorCode ierr;
1003   PC_MG          *mg   = (PC_MG*)pc->data;
1004   PC_ML          *pc_ml= (PC_ML*)mg->innerctx;
1005 
1006   PetscFunctionBegin;
1007   ierr = PCReset_ML(pc);CHKERRQ(ierr);
1008   ierr = PetscFree(pc_ml);CHKERRQ(ierr);
1009   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
1010   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",NULL);CHKERRQ(ierr);
1011   PetscFunctionReturn(0);
1012 }
1013 
1014 PetscErrorCode PCSetFromOptions_ML(PetscOptionItems *PetscOptionsObject,PC pc)
1015 {
1016   PetscErrorCode ierr;
1017   PetscInt       indx,PrintLevel,partindx;
1018   const char     *scheme[] = {"Uncoupled","Coupled","MIS","METIS"};
1019   const char     *part[]   = {"Zoltan","ParMETIS"};
1020 #if defined(HAVE_ML_ZOLTAN)
1021   const char *zscheme[] = {"RCB","hypergraph","fast_hypergraph"};
1022 #endif
1023   PC_MG       *mg    = (PC_MG*)pc->data;
1024   PC_ML       *pc_ml = (PC_ML*)mg->innerctx;
1025   PetscMPIInt size;
1026   MPI_Comm    comm;
1027 
1028   PetscFunctionBegin;
1029   ierr = PetscObjectGetComm((PetscObject)pc,&comm);CHKERRQ(ierr);
1030   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1031   ierr = PetscOptionsHead(PetscOptionsObject,"ML options");CHKERRQ(ierr);
1032 
1033   PrintLevel = 0;
1034   indx       = 0;
1035   partindx   = 0;
1036 
1037   ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,NULL);CHKERRQ(ierr);
1038   PetscStackCall("ML_Set_PrintLeve",ML_Set_PrintLevel(PrintLevel));
1039   ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,NULL);CHKERRQ(ierr);
1040   ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,NULL);CHKERRQ(ierr);
1041   ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,NULL);CHKERRQ(ierr);
1042 
1043   pc_ml->CoarsenScheme = indx;
1044 
1045   ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,NULL);CHKERRQ(ierr);
1046   ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,NULL);CHKERRQ(ierr);
1047   ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,NULL);CHKERRQ(ierr);
1048   ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,NULL);CHKERRQ(ierr);
1049   ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,NULL);CHKERRQ(ierr);
1050   ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,NULL);CHKERRQ(ierr);
1051   ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,NULL);CHKERRQ(ierr);
1052   ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,NULL);CHKERRQ(ierr);
1053   /*
1054     The following checks a number of conditions.  If we let this stuff slip by, then ML's error handling will take over.
1055     This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions.
1056 
1057     We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working
1058     combination of options and ML's exit(1) explanations don't help matters.
1059   */
1060   if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4");
1061   if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel");
1062   if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2\n");CHKERRQ(ierr);}
1063   if (pc_ml->EnergyMinimization) {
1064     ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,NULL);CHKERRQ(ierr);
1065   }
1066   if (pc_ml->EnergyMinimization == 2) {
1067     /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */
1068     ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,NULL);CHKERRQ(ierr);
1069   }
1070   /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */
1071   if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE;
1072   ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,NULL);CHKERRQ(ierr);
1073   /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */
1074   if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE;
1075   ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,NULL);CHKERRQ(ierr);
1076   /*
1077     ML's C API is severely underdocumented and lacks significant functionality.  The C++ API calls
1078     ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function
1079     ML_Gen_MGHierarchy_UsingAggregation().  This modification, however, does not provide a strict superset of the
1080     functionality in the old function, so some users may still want to use it.  Note that many options are ignored in
1081     this context, but ML doesn't provide a way to find out which ones.
1082    */
1083   ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,NULL);CHKERRQ(ierr);
1084   ierr = PetscOptionsBool("-pc_ml_repartition", "Allow ML to repartition levels of the heirarchy","ML_Repartition_Activate",pc_ml->Repartition,&pc_ml->Repartition,NULL);CHKERRQ(ierr);
1085   if (pc_ml->Repartition) {
1086     ierr = PetscOptionsReal("-pc_ml_repartitionMaxMinRatio", "Acceptable ratio of repartitioned sizes","ML_Repartition_Set_LargestMinMaxRatio",pc_ml->MaxMinRatio,&pc_ml->MaxMinRatio,NULL);CHKERRQ(ierr);
1087     ierr = PetscOptionsInt("-pc_ml_repartitionMinPerProc", "Smallest repartitioned size","ML_Repartition_Set_MinPerProc",pc_ml->MinPerProc,&pc_ml->MinPerProc,NULL);CHKERRQ(ierr);
1088     ierr = PetscOptionsInt("-pc_ml_repartitionPutOnSingleProc", "Problem size automatically repartitioned to one processor","ML_Repartition_Set_PutOnSingleProc",pc_ml->PutOnSingleProc,&pc_ml->PutOnSingleProc,NULL);CHKERRQ(ierr);
1089 #if defined(HAVE_ML_ZOLTAN)
1090     partindx = 0;
1091     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[0],&partindx,NULL);CHKERRQ(ierr);
1092 
1093     pc_ml->RepartitionType = partindx;
1094     if (!partindx) {
1095       PetscInt zindx = 0;
1096 
1097       ierr = PetscOptionsEList("-pc_ml_repartitionZoltanScheme", "Repartitioning scheme to use","None",zscheme,3,zscheme[0],&zindx,NULL);CHKERRQ(ierr);
1098 
1099       pc_ml->ZoltanScheme = zindx;
1100     }
1101 #else
1102     partindx = 1;
1103     ierr     = PetscOptionsEList("-pc_ml_repartitionType", "Repartitioning library to use","ML_Repartition_Set_Partitioner",part,2,part[1],&partindx,NULL);CHKERRQ(ierr);
1104     pc_ml->RepartitionType = partindx;
1105     if (!partindx) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_SUP_SYS,"ML not compiled with Zoltan");
1106 #endif
1107     ierr = PetscOptionsBool("-pc_ml_Aux","Aggregate using auxiliary coordinate-based laplacian","None",pc_ml->Aux,&pc_ml->Aux,NULL);CHKERRQ(ierr);
1108     ierr = PetscOptionsReal("-pc_ml_AuxThreshold","Auxiliary smoother drop tol","None",pc_ml->AuxThreshold,&pc_ml->AuxThreshold,NULL);CHKERRQ(ierr);
1109   }
1110   ierr = PetscOptionsTail();CHKERRQ(ierr);
1111   PetscFunctionReturn(0);
1112 }
1113 
1114 /* -------------------------------------------------------------------------- */
1115 /*
1116    PCCreate_ML - Creates a ML preconditioner context, PC_ML,
1117    and sets this as the private data within the generic preconditioning
1118    context, PC, that was created within PCCreate().
1119 
1120    Input Parameter:
1121 .  pc - the preconditioner context
1122 
1123    Application Interface Routine: PCCreate()
1124 */
1125 
1126 /*MC
1127      PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide
1128        fine grid discretization matrix. The coarser grid matrices and restriction/interpolation
1129        operators are computed by ML, with the matrices coverted to PETSc matrices in aij format
1130        and the restriction/interpolation operators wrapped as PETSc shell matrices.
1131 
1132    Options Database Key:
1133    Multigrid options(inherited):
1134 +  -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles)
1135 .  -pc_mg_distinct_smoothup: Should one configure the up and down smoothers separately (PCMGSetDistinctSmoothUp)
1136 -  -pc_mg_type <multiplicative>: (one of) additive multiplicative full kascade
1137    ML options:
1138 +  -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel)
1139 .  -pc_ml_maxNlevels <10>: Maximum number of levels (None)
1140 .  -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize)
1141 .  -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS
1142 .  -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor)
1143 .  -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold)
1144 .  -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm)
1145 .  -pc_ml_repartition <false>: Allow ML to repartition levels of the heirarchy (ML_Repartition_Activate)
1146 .  -pc_ml_repartitionMaxMinRatio <1.3>: Acceptable ratio of repartitioned sizes (ML_Repartition_Set_LargestMinMaxRatio)
1147 .  -pc_ml_repartitionMinPerProc <512>: Smallest repartitioned size (ML_Repartition_Set_MinPerProc)
1148 .  -pc_ml_repartitionPutOnSingleProc <5000>: Problem size automatically repartitioned to one processor (ML_Repartition_Set_PutOnSingleProc)
1149 .  -pc_ml_repartitionType <Zoltan>: Repartitioning library to use (ML_Repartition_Set_Partitioner)
1150 .  -pc_ml_repartitionZoltanScheme <RCB>: Repartitioning scheme to use (None)
1151 .  -pc_ml_Aux <false>: Aggregate using auxiliary coordinate-based laplacian (None)
1152 -  -pc_ml_AuxThreshold <0.0>: Auxiliary smoother drop tol (None)
1153 
1154    Level: intermediate
1155 
1156   Concepts: multigrid
1157 
1158 .seealso:  PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType,
1159            PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetDistinctSmoothUp(),
1160            PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(),
1161            PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(),
1162            PCMGSetCycleTypeOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR()
1163 M*/
1164 
1165 PETSC_EXTERN PetscErrorCode PCCreate_ML(PC pc)
1166 {
1167   PetscErrorCode ierr;
1168   PC_ML          *pc_ml;
1169   PC_MG          *mg;
1170 
1171   PetscFunctionBegin;
1172   /* PCML is an inherited class of PCMG. Initialize pc as PCMG */
1173   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */
1174   ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr);
1175   /* Since PCMG tries to use DM assocated with PC must delete it */
1176   ierr = DMDestroy(&pc->dm);CHKERRQ(ierr);
1177   ierr = PCMGSetGalerkin(pc,PC_MG_GALERKIN_EXTERNAL);CHKERRQ(ierr);
1178   mg   = (PC_MG*)pc->data;
1179 
1180   /* create a supporting struct and attach it to pc */
1181   ierr         = PetscNewLog(pc,&pc_ml);CHKERRQ(ierr);
1182   mg->innerctx = pc_ml;
1183 
1184   pc_ml->ml_object                = 0;
1185   pc_ml->agg_object               = 0;
1186   pc_ml->gridctx                  = 0;
1187   pc_ml->PetscMLdata              = 0;
1188   pc_ml->Nlevels                  = -1;
1189   pc_ml->MaxNlevels               = 10;
1190   pc_ml->MaxCoarseSize            = 1;
1191   pc_ml->CoarsenScheme            = 1;
1192   pc_ml->Threshold                = 0.0;
1193   pc_ml->DampingFactor            = 4.0/3.0;
1194   pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE;
1195   pc_ml->size                     = 0;
1196   pc_ml->dim                      = 0;
1197   pc_ml->nloc                     = 0;
1198   pc_ml->coords                   = 0;
1199   pc_ml->Repartition              = PETSC_FALSE;
1200   pc_ml->MaxMinRatio              = 1.3;
1201   pc_ml->MinPerProc               = 512;
1202   pc_ml->PutOnSingleProc          = 5000;
1203   pc_ml->RepartitionType          = 0;
1204   pc_ml->ZoltanScheme             = 0;
1205   pc_ml->Aux                      = PETSC_FALSE;
1206   pc_ml->AuxThreshold             = 0.0;
1207 
1208   /* allow for coordinates to be passed */
1209   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCSetCoordinates_C",PCSetCoordinates_ML);CHKERRQ(ierr);
1210 
1211   /* overwrite the pointers of PCMG by the functions of PCML */
1212   pc->ops->setfromoptions = PCSetFromOptions_ML;
1213   pc->ops->setup          = PCSetUp_ML;
1214   pc->ops->reset          = PCReset_ML;
1215   pc->ops->destroy        = PCDestroy_ML;
1216   PetscFunctionReturn(0);
1217 }
1218