1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscpcmg.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 12 #include <math.h> 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 EXTERN_C_END 20 21 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 22 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 23 24 /* The context (data structure) at each grid level */ 25 typedef struct { 26 Vec x,b,r; /* global vectors */ 27 Mat A,P,R; 28 KSP ksp; 29 } GridCtx; 30 31 /* The context used to input PETSc matrix into ML at fine grid */ 32 typedef struct { 33 Mat A; /* Petsc matrix in aij format */ 34 Mat Aloc; /* local portion of A to be used by ML */ 35 Vec x,y; 36 ML_Operator *mlmat; 37 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 38 } FineGridCtx; 39 40 /* The context associates a ML matrix with a PETSc shell matrix */ 41 typedef struct { 42 Mat A; /* PETSc shell matrix associated with mlmat */ 43 ML_Operator *mlmat; /* ML matrix assorciated with A */ 44 Vec y, work; 45 } Mat_MLShell; 46 47 /* Private context for the ML preconditioner */ 48 typedef struct { 49 ML *ml_object; 50 ML_Aggregate *agg_object; 51 GridCtx *gridctx; 52 FineGridCtx *PetscMLdata; 53 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 54 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 55 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 56 PetscBool reuse_interpolation; 57 PCMLNullSpaceType nulltype; 58 PetscMPIInt size; /* size of communicator for pc->pmat */ 59 } PC_ML; 60 61 #undef __FUNCT__ 62 #define __FUNCT__ "PetscML_getrow" 63 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 64 { 65 PetscErrorCode ierr; 66 PetscInt m,i,j,k=0,row,*aj; 67 PetscScalar *aa; 68 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 69 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 70 71 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 72 for (i = 0; i<N_requested_rows; i++) { 73 row = requested_rows[i]; 74 row_lengths[i] = a->ilen[row]; 75 if (allocated_space < k+row_lengths[i]) return(0); 76 if ( (row >= 0) || (row <= (m-1)) ) { 77 aj = a->j + a->i[row]; 78 aa = a->a + a->i[row]; 79 for (j=0; j<row_lengths[i]; j++){ 80 columns[k] = aj[j]; 81 values[k++] = aa[j]; 82 } 83 } 84 } 85 return(1); 86 } 87 88 #undef __FUNCT__ 89 #define __FUNCT__ "PetscML_comm" 90 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 91 { 92 PetscErrorCode ierr; 93 FineGridCtx *ml=(FineGridCtx*)ML_data; 94 Mat A=ml->A; 95 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 96 PetscMPIInt size; 97 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 98 PetscScalar *array; 99 100 PetscFunctionBegin; 101 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 102 if (size == 1) return 0; 103 104 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 105 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 106 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 107 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 108 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 109 for (i=in_length; i<out_length; i++){ 110 p[i] = array[i-in_length]; 111 } 112 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 113 PetscFunctionReturn(0); 114 } 115 116 #undef __FUNCT__ 117 #define __FUNCT__ "PetscML_matvec" 118 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 119 { 120 PetscErrorCode ierr; 121 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 122 Mat A=ml->A, Aloc=ml->Aloc; 123 PetscMPIInt size; 124 PetscScalar *pwork=ml->pwork; 125 PetscInt i; 126 127 PetscFunctionBegin; 128 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 129 if (size == 1){ 130 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 131 } else { 132 for (i=0; i<in_length; i++) pwork[i] = p[i]; 133 PetscML_comm(pwork,ml); 134 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 135 } 136 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 137 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 138 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 139 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 140 PetscFunctionReturn(0); 141 } 142 143 #undef __FUNCT__ 144 #define __FUNCT__ "MatMult_ML" 145 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 146 { 147 PetscErrorCode ierr; 148 Mat_MLShell *shell; 149 PetscScalar *xarray,*yarray; 150 PetscInt x_length,y_length; 151 152 PetscFunctionBegin; 153 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 154 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 155 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 156 x_length = shell->mlmat->invec_leng; 157 y_length = shell->mlmat->outvec_leng; 158 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 159 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 160 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 161 PetscFunctionReturn(0); 162 } 163 164 #undef __FUNCT__ 165 #define __FUNCT__ "MatMultAdd_ML" 166 /* Computes y = w + A * x 167 It is possible that w == y, but not x == y 168 */ 169 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 170 { 171 Mat_MLShell *shell; 172 PetscScalar *xarray,*yarray; 173 PetscInt x_length,y_length; 174 PetscErrorCode ierr; 175 176 PetscFunctionBegin; 177 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 178 if (y == w) { 179 if (!shell->work) { 180 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 181 } 182 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 183 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 184 x_length = shell->mlmat->invec_leng; 185 y_length = shell->mlmat->outvec_leng; 186 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 187 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 188 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 189 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 190 } else { 191 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 192 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 193 x_length = shell->mlmat->invec_leng; 194 y_length = shell->mlmat->outvec_leng; 195 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 196 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 197 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 198 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 199 } 200 PetscFunctionReturn(0); 201 } 202 203 /* newtype is ignored since only handles one case */ 204 #undef __FUNCT__ 205 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 206 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 207 { 208 PetscErrorCode ierr; 209 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 210 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 211 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 212 PetscScalar *aa=a->a,*ba=b->a,*ca; 213 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 214 PetscInt *ci,*cj,ncols; 215 216 PetscFunctionBegin; 217 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 218 219 if (scall == MAT_INITIAL_MATRIX){ 220 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 221 ci[0] = 0; 222 for (i=0; i<am; i++){ 223 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 224 } 225 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 226 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 227 228 k = 0; 229 for (i=0; i<am; i++){ 230 /* diagonal portion of A */ 231 ncols = ai[i+1] - ai[i]; 232 for (j=0; j<ncols; j++) { 233 cj[k] = *aj++; 234 ca[k++] = *aa++; 235 } 236 /* off-diagonal portion of A */ 237 ncols = bi[i+1] - bi[i]; 238 for (j=0; j<ncols; j++) { 239 cj[k] = an + (*bj); bj++; 240 ca[k++] = *ba++; 241 } 242 } 243 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 244 245 /* put together the new matrix */ 246 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 247 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 248 249 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 250 /* Since these are PETSc arrays, change flags to free them as necessary. */ 251 mat = (Mat_SeqAIJ*)(*Aloc)->data; 252 mat->free_a = PETSC_TRUE; 253 mat->free_ij = PETSC_TRUE; 254 255 mat->nonew = 0; 256 } else if (scall == MAT_REUSE_MATRIX){ 257 mat=(Mat_SeqAIJ*)(*Aloc)->data; 258 ci = mat->i; cj = mat->j; ca = mat->a; 259 for (i=0; i<am; i++) { 260 /* diagonal portion of A */ 261 ncols = ai[i+1] - ai[i]; 262 for (j=0; j<ncols; j++) *ca++ = *aa++; 263 /* off-diagonal portion of A */ 264 ncols = bi[i+1] - bi[i]; 265 for (j=0; j<ncols; j++) *ca++ = *ba++; 266 } 267 } else SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 268 PetscFunctionReturn(0); 269 } 270 271 extern PetscErrorCode MatDestroy_Shell(Mat); 272 #undef __FUNCT__ 273 #define __FUNCT__ "MatDestroy_ML" 274 static PetscErrorCode MatDestroy_ML(Mat A) 275 { 276 PetscErrorCode ierr; 277 Mat_MLShell *shell; 278 279 PetscFunctionBegin; 280 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 281 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 282 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 283 ierr = PetscFree(shell);CHKERRQ(ierr); 284 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 285 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 286 PetscFunctionReturn(0); 287 } 288 289 #undef __FUNCT__ 290 #define __FUNCT__ "MatWrapML_SeqAIJ" 291 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 292 { 293 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 294 PetscErrorCode ierr; 295 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max; 296 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 297 PetscScalar *ml_vals=matdata->values,*aa; 298 299 PetscFunctionBegin; 300 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 301 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 302 if (reuse){ 303 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 304 aij->i = ml_rowptr; 305 aij->j = ml_cols; 306 aij->a = ml_vals; 307 } else { 308 /* sort ml_cols and ml_vals */ 309 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 310 for (i=0; i<m; i++) { 311 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 312 } 313 aj = ml_cols; aa = ml_vals; 314 for (i=0; i<m; i++){ 315 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 316 aj += nnz[i]; aa += nnz[i]; 317 } 318 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 319 ierr = PetscFree(nnz);CHKERRQ(ierr); 320 } 321 PetscFunctionReturn(0); 322 } 323 324 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 325 if (reuse) { 326 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 327 } else { 328 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 329 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 330 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 331 332 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 333 nz_max = 1; 334 for (i=0; i<m; i++) { 335 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 336 if (nnz[i] > nz_max) nz_max = nnz[i]; 337 } 338 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 339 } 340 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 341 for (i=0; i<m; i++) { 342 PetscInt ncols; 343 k = 0; 344 /* diagonal entry */ 345 aj[k] = i; aa[k++] = ml_vals[i]; 346 /* off diagonal entries */ 347 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 348 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 349 } 350 ncols = ml_cols[i+1] - ml_cols[i] + 1; 351 /* sort aj and aa */ 352 ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr); 353 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 354 } 355 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 356 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 357 358 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 359 ierr = PetscFree(nnz);CHKERRQ(ierr); 360 PetscFunctionReturn(0); 361 } 362 363 #undef __FUNCT__ 364 #define __FUNCT__ "MatWrapML_SHELL" 365 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 366 { 367 PetscErrorCode ierr; 368 PetscInt m,n; 369 ML_Comm *MLcomm; 370 Mat_MLShell *shellctx; 371 372 PetscFunctionBegin; 373 m = mlmat->outvec_leng; 374 n = mlmat->invec_leng; 375 if (!m || !n){ 376 newmat = PETSC_NULL; 377 PetscFunctionReturn(0); 378 } 379 380 if (reuse){ 381 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 382 shellctx->mlmat = mlmat; 383 PetscFunctionReturn(0); 384 } 385 386 MLcomm = mlmat->comm; 387 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 388 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 389 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 390 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 391 shellctx->A = *newmat; 392 shellctx->mlmat = mlmat; 393 shellctx->work = PETSC_NULL; 394 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 395 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 396 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 397 (*newmat)->ops->destroy = MatDestroy_ML; 398 PetscFunctionReturn(0); 399 } 400 401 #undef __FUNCT__ 402 #define __FUNCT__ "MatWrapML_MPIAIJ" 403 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 404 { 405 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 406 PetscInt *ml_cols=matdata->columns,*aj; 407 PetscScalar *ml_vals=matdata->values,*aa; 408 PetscErrorCode ierr; 409 PetscInt i,j,k,*gordering; 410 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 411 Mat A; 412 413 PetscFunctionBegin; 414 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 415 n = mlmat->invec_leng; 416 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 417 418 if (reuse) { 419 A = *newmat; 420 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 421 } else { 422 PetscInt *nnzA,*nnzB,*nnz; 423 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 424 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 425 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 426 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 427 428 nz_max = 0; 429 for (i=0; i<m; i++){ 430 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 431 if (nz_max < nnz[i]) nz_max = nnz[i]; 432 nnzA[i] = 1; /* diag */ 433 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 434 if (ml_cols[j] < m) nnzA[i]++; 435 } 436 nnzB[i] = nnz[i] - nnzA[i]; 437 } 438 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 439 ierr = PetscFree3(nnzA,nnzB,nnz); 440 } 441 442 /* insert mat values -- remap row and column indices */ 443 nz_max++; 444 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 445 /* create global row numbering for a ML_Operator */ 446 ML_build_global_numbering(mlmat,&gordering,"rows"); 447 for (i=0; i<m; i++) { 448 PetscInt ncols; 449 row = gordering[i]; 450 k = 0; 451 /* diagonal entry */ 452 aj[k] = row; aa[k++] = ml_vals[i]; 453 /* off diagonal entries */ 454 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 455 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 456 } 457 ncols = ml_cols[i+1] - ml_cols[i] + 1; 458 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 459 } 460 ML_free(gordering); 461 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 462 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 463 *newmat = A; 464 465 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 466 PetscFunctionReturn(0); 467 } 468 469 /* -----------------------------------------------------------------------------*/ 470 #undef __FUNCT__ 471 #define __FUNCT__ "PCReset_ML" 472 PetscErrorCode PCReset_ML(PC pc) 473 { 474 PetscErrorCode ierr; 475 PC_MG *mg = (PC_MG*)pc->data; 476 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 477 PetscInt level,fine_level=pc_ml->Nlevels-1; 478 479 PetscFunctionBegin; 480 ML_Aggregate_Destroy(&pc_ml->agg_object); 481 ML_Destroy(&pc_ml->ml_object); 482 483 if (pc_ml->PetscMLdata) { 484 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 485 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 486 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 487 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 488 } 489 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 490 491 if (pc_ml->gridctx) { 492 for (level=0; level<fine_level; level++){ 493 if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 494 if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 495 if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 496 if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 497 if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 498 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 499 } 500 } 501 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 502 PetscFunctionReturn(0); 503 } 504 /* -------------------------------------------------------------------------- */ 505 /* 506 PCSetUp_ML - Prepares for the use of the ML preconditioner 507 by setting data structures and options. 508 509 Input Parameter: 510 . pc - the preconditioner context 511 512 Application Interface Routine: PCSetUp() 513 514 Notes: 515 The interface routine PCSetUp() is not usually called directly by 516 the user, but instead is called by PCApply() if necessary. 517 */ 518 extern PetscErrorCode PCSetFromOptions_MG(PC); 519 extern PetscErrorCode PCReset_MG(PC); 520 521 #undef __FUNCT__ 522 #define __FUNCT__ "PCSetUp_ML" 523 PetscErrorCode PCSetUp_ML(PC pc) 524 { 525 PetscErrorCode ierr; 526 PetscMPIInt size; 527 FineGridCtx *PetscMLdata; 528 ML *ml_object; 529 ML_Aggregate *agg_object; 530 ML_Operator *mlmat; 531 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 532 Mat A,Aloc; 533 GridCtx *gridctx; 534 PC_MG *mg = (PC_MG*)pc->data; 535 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 536 PetscBool isSeq, isMPI; 537 KSP smoother; 538 PC subpc; 539 PetscInt mesh_level, old_mesh_level; 540 541 PetscFunctionBegin; 542 A = pc->pmat; 543 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 544 545 if (pc->setupcalled) { 546 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 547 /* 548 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 549 multiple solves in which the matrix is not changing too quickly. 550 */ 551 ml_object = pc_ml->ml_object; 552 gridctx = pc_ml->gridctx; 553 Nlevels = pc_ml->Nlevels; 554 fine_level = Nlevels - 1; 555 gridctx[fine_level].A = A; 556 557 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 558 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 559 if (isMPI){ 560 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 561 } else if (isSeq) { 562 Aloc = A; 563 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 564 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 565 566 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 567 PetscMLdata = pc_ml->PetscMLdata; 568 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 569 PetscMLdata->A = A; 570 PetscMLdata->Aloc = Aloc; 571 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 572 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 573 574 mesh_level = ml_object->ML_finest_level; 575 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 576 old_mesh_level = mesh_level; 577 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 578 579 /* clean and regenerate A */ 580 mlmat = &(ml_object->Amat[mesh_level]); 581 ML_Operator_Clean(mlmat); 582 ML_Operator_Init(mlmat,ml_object->comm); 583 ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); 584 } 585 586 level = fine_level - 1; 587 if (size == 1) { /* convert ML P, R and A into seqaij format */ 588 for (mllevel=1; mllevel<Nlevels; mllevel++){ 589 mlmat = &(ml_object->Amat[mllevel]); 590 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 591 level--; 592 } 593 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 594 for (mllevel=1; mllevel<Nlevels; mllevel++){ 595 mlmat = &(ml_object->Amat[mllevel]); 596 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 597 level--; 598 } 599 } 600 601 for (level=0; level<fine_level; level++) { 602 if (level > 0){ 603 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 604 } 605 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 606 } 607 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 608 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 609 610 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 611 PetscFunctionReturn(0); 612 } else { 613 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 614 ierr = PCReset_ML(pc);CHKERRQ(ierr); 615 ierr = PCReset_MG(pc);CHKERRQ(ierr); 616 } 617 } 618 619 /* setup special features of PCML */ 620 /*--------------------------------*/ 621 /* covert A to Aloc to be used by ML at fine grid */ 622 pc_ml->size = size; 623 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 624 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 625 if (isMPI){ 626 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 627 } else if (isSeq) { 628 Aloc = A; 629 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 630 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 631 632 /* create and initialize struct 'PetscMLdata' */ 633 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 634 pc_ml->PetscMLdata = PetscMLdata; 635 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 636 637 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 638 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 639 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 640 641 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 642 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 643 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 644 PetscMLdata->A = A; 645 PetscMLdata->Aloc = Aloc; 646 647 /* create ML discretization matrix at fine grid */ 648 /* ML requires input of fine-grid matrix. It determines nlevels. */ 649 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 650 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 651 ML_Create(&ml_object,pc_ml->MaxNlevels); 652 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 653 pc_ml->ml_object = ml_object; 654 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 655 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 656 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 657 658 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 659 660 /* aggregation */ 661 ML_Aggregate_Create(&agg_object); 662 pc_ml->agg_object = agg_object; 663 664 { 665 MatNullSpace mnull; 666 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 667 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 668 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 669 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 670 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 671 } 672 switch (pc_ml->nulltype) { 673 case PCML_NULLSPACE_USER: { 674 PetscScalar *nullvec; 675 const PetscScalar *v; 676 PetscBool has_const; 677 PetscInt i,j,mlocal,nvec,M; 678 const Vec *vecs; 679 if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 680 ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr); 681 ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr); 682 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 683 ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr); 684 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 685 for (i=0; i<nvec; i++) { 686 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 687 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 688 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 689 } 690 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr); 691 ierr = PetscFree(nullvec);CHKERRQ(ierr); 692 } break; 693 case PCML_NULLSPACE_BLOCK: 694 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 695 break; 696 case PCML_NULLSPACE_SCALAR: 697 break; 698 default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type"); 699 } 700 } 701 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 702 /* set options */ 703 switch (pc_ml->CoarsenScheme) { 704 case 1: 705 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 706 case 2: 707 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 708 case 3: 709 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 710 } 711 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 712 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 713 if (pc_ml->SpectralNormScheme_Anorm){ 714 ML_Set_SpectralNormScheme_Anorm(ml_object); 715 } 716 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 717 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 718 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 719 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 720 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 721 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 722 723 if (pc_ml->OldHierarchy) { 724 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 725 } else { 726 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 727 } 728 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 729 pc_ml->Nlevels = Nlevels; 730 fine_level = Nlevels - 1; 731 732 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 733 /* set default smoothers */ 734 for (level=1; level<=fine_level; level++){ 735 if (size == 1){ 736 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 737 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 738 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 739 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 740 } else { 741 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 742 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 743 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 744 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 745 } 746 } 747 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 748 749 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 750 pc_ml->gridctx = gridctx; 751 752 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 753 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 754 gridctx[fine_level].A = A; 755 756 level = fine_level - 1; 757 if (size == 1){ /* convert ML P, R and A into seqaij format */ 758 for (mllevel=1; mllevel<Nlevels; mllevel++){ 759 mlmat = &(ml_object->Pmat[mllevel]); 760 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 761 mlmat = &(ml_object->Rmat[mllevel-1]); 762 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 763 764 mlmat = &(ml_object->Amat[mllevel]); 765 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 766 level--; 767 } 768 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 769 for (mllevel=1; mllevel<Nlevels; mllevel++){ 770 mlmat = &(ml_object->Pmat[mllevel]); 771 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 772 mlmat = &(ml_object->Rmat[mllevel-1]); 773 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 774 775 mlmat = &(ml_object->Amat[mllevel]); 776 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 777 level--; 778 } 779 } 780 781 /* create vectors and ksp at all levels */ 782 for (level=0; level<fine_level; level++){ 783 level1 = level + 1; 784 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 785 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 786 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 787 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 788 789 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 790 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 791 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 792 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 793 794 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 795 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 796 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 797 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 798 799 if (level == 0){ 800 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 801 } else { 802 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 803 } 804 } 805 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 806 807 /* create coarse level and the interpolation between the levels */ 808 for (level=0; level<fine_level; level++){ 809 level1 = level + 1; 810 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 811 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 812 if (level > 0){ 813 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 814 } 815 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 816 } 817 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 818 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 819 820 /* setupcalled is set to 0 so that MG is setup from scratch */ 821 pc->setupcalled = 0; 822 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 823 PetscFunctionReturn(0); 824 } 825 826 /* -------------------------------------------------------------------------- */ 827 /* 828 PCDestroy_ML - Destroys the private context for the ML preconditioner 829 that was created with PCCreate_ML(). 830 831 Input Parameter: 832 . pc - the preconditioner context 833 834 Application Interface Routine: PCDestroy() 835 */ 836 #undef __FUNCT__ 837 #define __FUNCT__ "PCDestroy_ML" 838 PetscErrorCode PCDestroy_ML(PC pc) 839 { 840 PetscErrorCode ierr; 841 PC_MG *mg = (PC_MG*)pc->data; 842 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 843 844 PetscFunctionBegin; 845 ierr = PCReset_ML(pc);CHKERRQ(ierr); 846 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 847 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 848 PetscFunctionReturn(0); 849 } 850 851 #undef __FUNCT__ 852 #define __FUNCT__ "PCSetFromOptions_ML" 853 PetscErrorCode PCSetFromOptions_ML(PC pc) 854 { 855 PetscErrorCode ierr; 856 PetscInt indx,PrintLevel; 857 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 858 PC_MG *mg = (PC_MG*)pc->data; 859 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 860 PetscMPIInt size; 861 MPI_Comm comm = ((PetscObject)pc)->comm; 862 863 PetscFunctionBegin; 864 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 865 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 866 PrintLevel = 0; 867 indx = 0; 868 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 869 ML_Set_PrintLevel(PrintLevel); 870 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 871 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 872 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 873 pc_ml->CoarsenScheme = indx; 874 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 875 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 876 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 877 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 878 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 879 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr); 880 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 881 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr); 882 /* 883 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 884 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 885 886 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 887 combination of options and ML's exit(1) explanations don't help matters. 888 */ 889 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 890 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 891 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 892 if (pc_ml->EnergyMinimization) { 893 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 894 } 895 if (pc_ml->EnergyMinimization == 2) { 896 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 897 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 898 } 899 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 900 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 901 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 902 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 903 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 904 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 905 /* 906 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 907 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 908 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 909 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 910 this context, but ML doesn't provide a way to find out which ones. 911 */ 912 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 913 ierr = PetscOptionsTail();CHKERRQ(ierr); 914 PetscFunctionReturn(0); 915 } 916 917 /* -------------------------------------------------------------------------- */ 918 /* 919 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 920 and sets this as the private data within the generic preconditioning 921 context, PC, that was created within PCCreate(). 922 923 Input Parameter: 924 . pc - the preconditioner context 925 926 Application Interface Routine: PCCreate() 927 */ 928 929 /*MC 930 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 931 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 932 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 933 and the restriction/interpolation operators wrapped as PETSc shell matrices. 934 935 Options Database Key: 936 Multigrid options(inherited) 937 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 938 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 939 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 940 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 941 ML options: 942 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 943 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 944 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 945 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 946 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 947 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 948 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 949 950 Level: intermediate 951 952 Concepts: multigrid 953 954 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 955 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 956 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 957 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 958 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 959 M*/ 960 961 EXTERN_C_BEGIN 962 #undef __FUNCT__ 963 #define __FUNCT__ "PCCreate_ML" 964 PetscErrorCode PCCreate_ML(PC pc) 965 { 966 PetscErrorCode ierr; 967 PC_ML *pc_ml; 968 PC_MG *mg; 969 970 PetscFunctionBegin; 971 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 972 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 973 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 974 /* Since PCMG tries to use DM assocated with PC must delete it */ 975 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 976 mg = (PC_MG*)pc->data; 977 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 978 979 /* create a supporting struct and attach it to pc */ 980 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 981 mg->innerctx = pc_ml; 982 983 pc_ml->ml_object = 0; 984 pc_ml->agg_object = 0; 985 pc_ml->gridctx = 0; 986 pc_ml->PetscMLdata = 0; 987 pc_ml->Nlevels = -1; 988 pc_ml->MaxNlevels = 10; 989 pc_ml->MaxCoarseSize = 1; 990 pc_ml->CoarsenScheme = 1; 991 pc_ml->Threshold = 0.0; 992 pc_ml->DampingFactor = 4.0/3.0; 993 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 994 pc_ml->size = 0; 995 996 /* overwrite the pointers of PCMG by the functions of PCML */ 997 pc->ops->setfromoptions = PCSetFromOptions_ML; 998 pc->ops->setup = PCSetUp_ML; 999 pc->ops->reset = PCReset_ML; 1000 pc->ops->destroy = PCDestroy_ML; 1001 PetscFunctionReturn(0); 1002 } 1003 EXTERN_C_END 1004