1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <petsc-private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscpcmg.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 12 #include <math.h> 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 EXTERN_C_END 20 21 typedef enum {PCML_NULLSPACE_AUTO,PCML_NULLSPACE_USER,PCML_NULLSPACE_BLOCK,PCML_NULLSPACE_SCALAR} PCMLNullSpaceType; 22 static const char *const PCMLNullSpaceTypes[] = {"AUTO","USER","BLOCK","SCALAR","PCMLNullSpaceType","PCML_NULLSPACE_",0}; 23 24 /* The context (data structure) at each grid level */ 25 typedef struct { 26 Vec x,b,r; /* global vectors */ 27 Mat A,P,R; 28 KSP ksp; 29 } GridCtx; 30 31 /* The context used to input PETSc matrix into ML at fine grid */ 32 typedef struct { 33 Mat A; /* Petsc matrix in aij format */ 34 Mat Aloc; /* local portion of A to be used by ML */ 35 Vec x,y; 36 ML_Operator *mlmat; 37 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 38 } FineGridCtx; 39 40 /* The context associates a ML matrix with a PETSc shell matrix */ 41 typedef struct { 42 Mat A; /* PETSc shell matrix associated with mlmat */ 43 ML_Operator *mlmat; /* ML matrix assorciated with A */ 44 Vec y, work; 45 } Mat_MLShell; 46 47 /* Private context for the ML preconditioner */ 48 typedef struct { 49 ML *ml_object; 50 ML_Aggregate *agg_object; 51 GridCtx *gridctx; 52 FineGridCtx *PetscMLdata; 53 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 54 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 55 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 56 PetscBool reuse_interpolation; 57 PCMLNullSpaceType nulltype; 58 PetscMPIInt size; /* size of communicator for pc->pmat */ 59 } PC_ML; 60 61 #undef __FUNCT__ 62 #define __FUNCT__ "PetscML_getrow" 63 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 64 { 65 PetscErrorCode ierr; 66 PetscInt m,i,j,k=0,row,*aj; 67 PetscScalar *aa; 68 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 69 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 70 71 72 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 73 for (i = 0; i<N_requested_rows; i++) { 74 row = requested_rows[i]; 75 row_lengths[i] = a->ilen[row]; 76 if (allocated_space < k+row_lengths[i]) return(0); 77 if ( (row >= 0) || (row <= (m-1)) ) { 78 aj = a->j + a->i[row]; 79 aa = a->a + a->i[row]; 80 for (j=0; j<row_lengths[i]; j++){ 81 columns[k] = aj[j]; 82 values[k++] = aa[j]; 83 } 84 } 85 } 86 return(1); 87 } 88 89 #undef __FUNCT__ 90 #define __FUNCT__ "PetscML_comm" 91 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 92 { 93 PetscErrorCode ierr; 94 FineGridCtx *ml=(FineGridCtx*)ML_data; 95 Mat A=ml->A; 96 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 97 PetscMPIInt size; 98 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 99 PetscScalar *array; 100 101 PetscFunctionBegin; 102 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 103 if (size == 1) return 0; 104 105 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 106 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 107 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 108 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 109 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 110 for (i=in_length; i<out_length; i++){ 111 p[i] = array[i-in_length]; 112 } 113 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 114 PetscFunctionReturn(0); 115 } 116 117 #undef __FUNCT__ 118 #define __FUNCT__ "PetscML_matvec" 119 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 120 { 121 PetscErrorCode ierr; 122 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 123 Mat A=ml->A, Aloc=ml->Aloc; 124 PetscMPIInt size; 125 PetscScalar *pwork=ml->pwork; 126 PetscInt i; 127 128 PetscFunctionBegin; 129 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 130 if (size == 1){ 131 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 132 } else { 133 for (i=0; i<in_length; i++) pwork[i] = p[i]; 134 PetscML_comm(pwork,ml); 135 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 136 } 137 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 138 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 139 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 140 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 141 PetscFunctionReturn(0); 142 } 143 144 #undef __FUNCT__ 145 #define __FUNCT__ "MatMult_ML" 146 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 147 { 148 PetscErrorCode ierr; 149 Mat_MLShell *shell; 150 PetscScalar *xarray,*yarray; 151 PetscInt x_length,y_length; 152 153 PetscFunctionBegin; 154 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 155 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 156 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 157 x_length = shell->mlmat->invec_leng; 158 y_length = shell->mlmat->outvec_leng; 159 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 160 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 161 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 162 PetscFunctionReturn(0); 163 } 164 165 #undef __FUNCT__ 166 #define __FUNCT__ "MatMultAdd_ML" 167 /* Computes y = w + A * x 168 It is possible that w == y, but not x == y 169 */ 170 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 171 { 172 Mat_MLShell *shell; 173 PetscScalar *xarray,*yarray; 174 PetscInt x_length,y_length; 175 PetscErrorCode ierr; 176 177 PetscFunctionBegin; 178 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 179 if (y == w) { 180 if (!shell->work) { 181 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 182 } 183 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 184 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 185 x_length = shell->mlmat->invec_leng; 186 y_length = shell->mlmat->outvec_leng; 187 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 188 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 189 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 190 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 191 } else { 192 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 193 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 194 x_length = shell->mlmat->invec_leng; 195 y_length = shell->mlmat->outvec_leng; 196 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 197 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 198 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 199 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 200 } 201 PetscFunctionReturn(0); 202 } 203 204 /* newtype is ignored because "ml" is not listed under Petsc MatType */ 205 #undef __FUNCT__ 206 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 207 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 208 { 209 PetscErrorCode ierr; 210 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 211 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 212 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 213 PetscScalar *aa=a->a,*ba=b->a,*ca; 214 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 215 PetscInt *ci,*cj,ncols; 216 217 PetscFunctionBegin; 218 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 219 220 if (scall == MAT_INITIAL_MATRIX){ 221 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 222 ci[0] = 0; 223 for (i=0; i<am; i++){ 224 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 225 } 226 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 227 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 228 229 k = 0; 230 for (i=0; i<am; i++){ 231 /* diagonal portion of A */ 232 ncols = ai[i+1] - ai[i]; 233 for (j=0; j<ncols; j++) { 234 cj[k] = *aj++; 235 ca[k++] = *aa++; 236 } 237 /* off-diagonal portion of A */ 238 ncols = bi[i+1] - bi[i]; 239 for (j=0; j<ncols; j++) { 240 cj[k] = an + (*bj); bj++; 241 ca[k++] = *ba++; 242 } 243 } 244 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 245 246 /* put together the new matrix */ 247 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 248 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 249 250 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 251 /* Since these are PETSc arrays, change flags to free them as necessary. */ 252 mat = (Mat_SeqAIJ*)(*Aloc)->data; 253 mat->free_a = PETSC_TRUE; 254 mat->free_ij = PETSC_TRUE; 255 256 mat->nonew = 0; 257 } else if (scall == MAT_REUSE_MATRIX){ 258 mat=(Mat_SeqAIJ*)(*Aloc)->data; 259 ci = mat->i; cj = mat->j; ca = mat->a; 260 for (i=0; i<am; i++) { 261 /* diagonal portion of A */ 262 ncols = ai[i+1] - ai[i]; 263 for (j=0; j<ncols; j++) *ca++ = *aa++; 264 /* off-diagonal portion of A */ 265 ncols = bi[i+1] - bi[i]; 266 for (j=0; j<ncols; j++) *ca++ = *ba++; 267 } 268 } else { 269 SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 270 } 271 PetscFunctionReturn(0); 272 } 273 274 extern PetscErrorCode MatDestroy_Shell(Mat); 275 #undef __FUNCT__ 276 #define __FUNCT__ "MatDestroy_ML" 277 static PetscErrorCode MatDestroy_ML(Mat A) 278 { 279 PetscErrorCode ierr; 280 Mat_MLShell *shell; 281 282 PetscFunctionBegin; 283 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 284 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 285 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 286 ierr = PetscFree(shell);CHKERRQ(ierr); 287 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 288 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 289 PetscFunctionReturn(0); 290 } 291 292 #undef __FUNCT__ 293 #define __FUNCT__ "MatWrapML_SeqAIJ" 294 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 295 { 296 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 297 PetscErrorCode ierr; 298 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max; 299 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 300 PetscScalar *ml_vals=matdata->values,*aa; 301 302 PetscFunctionBegin; 303 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 304 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 305 if (reuse){ 306 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 307 aij->i = ml_rowptr; 308 aij->j = ml_cols; 309 aij->a = ml_vals; 310 } else { 311 /* sort ml_cols and ml_vals */ 312 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 313 for (i=0; i<m; i++) { 314 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 315 } 316 aj = ml_cols; aa = ml_vals; 317 for (i=0; i<m; i++){ 318 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 319 aj += nnz[i]; aa += nnz[i]; 320 } 321 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 322 ierr = PetscFree(nnz);CHKERRQ(ierr); 323 } 324 PetscFunctionReturn(0); 325 } 326 327 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 328 if (reuse) { 329 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 330 } else { 331 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 332 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 333 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 334 335 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 336 nz_max = 1; 337 for (i=0; i<m; i++) { 338 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 339 if (nnz[i] > nz_max) nz_max = nnz[i]; 340 } 341 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 342 } 343 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 344 for (i=0; i<m; i++) { 345 PetscInt ncols; 346 k = 0; 347 /* diagonal entry */ 348 aj[k] = i; aa[k++] = ml_vals[i]; 349 /* off diagonal entries */ 350 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 351 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 352 } 353 ncols = ml_cols[i+1] - ml_cols[i] + 1; 354 /* sort aj and aa */ 355 ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr); 356 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 357 } 358 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 359 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 360 361 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 362 ierr = PetscFree(nnz);CHKERRQ(ierr); 363 PetscFunctionReturn(0); 364 } 365 366 #undef __FUNCT__ 367 #define __FUNCT__ "MatWrapML_SHELL" 368 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 369 { 370 PetscErrorCode ierr; 371 PetscInt m,n; 372 ML_Comm *MLcomm; 373 Mat_MLShell *shellctx; 374 375 PetscFunctionBegin; 376 m = mlmat->outvec_leng; 377 n = mlmat->invec_leng; 378 if (!m || !n){ 379 newmat = PETSC_NULL; 380 PetscFunctionReturn(0); 381 } 382 383 if (reuse){ 384 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 385 shellctx->mlmat = mlmat; 386 PetscFunctionReturn(0); 387 } 388 389 MLcomm = mlmat->comm; 390 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 391 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 392 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 393 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 394 shellctx->A = *newmat; 395 shellctx->mlmat = mlmat; 396 shellctx->work = PETSC_NULL; 397 ierr = VecCreate(MLcomm->USR_comm,&shellctx->y);CHKERRQ(ierr); 398 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 399 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 400 (*newmat)->ops->destroy = MatDestroy_ML; 401 PetscFunctionReturn(0); 402 } 403 404 #undef __FUNCT__ 405 #define __FUNCT__ "MatWrapML_MPIAIJ" 406 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 407 { 408 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 409 PetscInt *ml_cols=matdata->columns,*aj; 410 PetscScalar *ml_vals=matdata->values,*aa; 411 PetscErrorCode ierr; 412 PetscInt i,j,k,*gordering; 413 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 414 Mat A; 415 416 PetscFunctionBegin; 417 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 418 n = mlmat->invec_leng; 419 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 420 421 if (reuse) { 422 A = *newmat; 423 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 424 } else { 425 PetscInt *nnzA,*nnzB,*nnz; 426 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 427 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 428 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 429 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 430 431 nz_max = 0; 432 for (i=0; i<m; i++){ 433 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 434 if (nz_max < nnz[i]) nz_max = nnz[i]; 435 nnzA[i] = 1; /* diag */ 436 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 437 if (ml_cols[j] < m) nnzA[i]++; 438 } 439 nnzB[i] = nnz[i] - nnzA[i]; 440 } 441 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 442 ierr = PetscFree3(nnzA,nnzB,nnz); 443 } 444 445 /* insert mat values -- remap row and column indices */ 446 nz_max++; 447 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 448 /* create global row numbering for a ML_Operator */ 449 ML_build_global_numbering(mlmat,&gordering,"rows"); 450 for (i=0; i<m; i++) { 451 PetscInt ncols; 452 row = gordering[i]; 453 k = 0; 454 /* diagonal entry */ 455 aj[k] = row; aa[k++] = ml_vals[i]; 456 /* off diagonal entries */ 457 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 458 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 459 } 460 ncols = ml_cols[i+1] - ml_cols[i] + 1; 461 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 462 } 463 ML_free(gordering); 464 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 465 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 466 *newmat = A; 467 468 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 469 PetscFunctionReturn(0); 470 } 471 472 /* -----------------------------------------------------------------------------*/ 473 #undef __FUNCT__ 474 #define __FUNCT__ "PCReset_ML" 475 PetscErrorCode PCReset_ML(PC pc) 476 { 477 PetscErrorCode ierr; 478 PC_MG *mg = (PC_MG*)pc->data; 479 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 480 PetscInt level,fine_level=pc_ml->Nlevels-1; 481 482 PetscFunctionBegin; 483 ML_Aggregate_Destroy(&pc_ml->agg_object); 484 ML_Destroy(&pc_ml->ml_object); 485 486 if (pc_ml->PetscMLdata) { 487 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 488 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 489 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 490 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 491 } 492 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 493 494 if (pc_ml->gridctx) { 495 for (level=0; level<fine_level; level++){ 496 if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 497 if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 498 if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 499 if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 500 if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 501 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 502 } 503 } 504 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 505 PetscFunctionReturn(0); 506 } 507 /* -------------------------------------------------------------------------- */ 508 /* 509 PCSetUp_ML - Prepares for the use of the ML preconditioner 510 by setting data structures and options. 511 512 Input Parameter: 513 . pc - the preconditioner context 514 515 Application Interface Routine: PCSetUp() 516 517 Notes: 518 The interface routine PCSetUp() is not usually called directly by 519 the user, but instead is called by PCApply() if necessary. 520 */ 521 extern PetscErrorCode PCSetFromOptions_MG(PC); 522 extern PetscErrorCode PCReset_MG(PC); 523 524 #undef __FUNCT__ 525 #define __FUNCT__ "PCSetUp_ML" 526 PetscErrorCode PCSetUp_ML(PC pc) 527 { 528 PetscErrorCode ierr; 529 PetscMPIInt size; 530 FineGridCtx *PetscMLdata; 531 ML *ml_object; 532 ML_Aggregate *agg_object; 533 ML_Operator *mlmat; 534 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 535 Mat A,Aloc; 536 GridCtx *gridctx; 537 PC_MG *mg = (PC_MG*)pc->data; 538 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 539 PetscBool isSeq, isMPI; 540 KSP smoother; 541 PC subpc; 542 PetscInt mesh_level, old_mesh_level; 543 544 545 PetscFunctionBegin; 546 A = pc->pmat; 547 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 548 549 if (pc->setupcalled) { 550 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 551 /* 552 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 553 multiple solves in which the matrix is not changing too quickly. 554 */ 555 ml_object = pc_ml->ml_object; 556 gridctx = pc_ml->gridctx; 557 Nlevels = pc_ml->Nlevels; 558 fine_level = Nlevels - 1; 559 gridctx[fine_level].A = A; 560 561 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 562 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 563 if (isMPI){ 564 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 565 } else if (isSeq) { 566 Aloc = A; 567 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 568 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 569 570 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 571 PetscMLdata = pc_ml->PetscMLdata; 572 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 573 PetscMLdata->A = A; 574 PetscMLdata->Aloc = Aloc; 575 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 576 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 577 578 mesh_level = ml_object->ML_finest_level; 579 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 580 old_mesh_level = mesh_level; 581 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 582 583 /* clean and regenerate A */ 584 mlmat = &(ml_object->Amat[mesh_level]); 585 ML_Operator_Clean(mlmat); 586 ML_Operator_Init(mlmat,ml_object->comm); 587 ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); 588 } 589 590 level = fine_level - 1; 591 if (size == 1) { /* convert ML P, R and A into seqaij format */ 592 for (mllevel=1; mllevel<Nlevels; mllevel++){ 593 mlmat = &(ml_object->Amat[mllevel]); 594 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 595 level--; 596 } 597 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 598 for (mllevel=1; mllevel<Nlevels; mllevel++){ 599 mlmat = &(ml_object->Amat[mllevel]); 600 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 601 level--; 602 } 603 } 604 605 for (level=0; level<fine_level; level++) { 606 if (level > 0){ 607 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 608 } 609 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 610 } 611 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 612 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 613 614 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 615 PetscFunctionReturn(0); 616 } else { 617 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 618 ierr = PCReset_ML(pc);CHKERRQ(ierr); 619 ierr = PCReset_MG(pc);CHKERRQ(ierr); 620 } 621 } 622 623 /* setup special features of PCML */ 624 /*--------------------------------*/ 625 /* covert A to Aloc to be used by ML at fine grid */ 626 pc_ml->size = size; 627 ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 628 ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 629 if (isMPI){ 630 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 631 } else if (isSeq) { 632 Aloc = A; 633 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 634 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 635 636 /* create and initialize struct 'PetscMLdata' */ 637 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 638 pc_ml->PetscMLdata = PetscMLdata; 639 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 640 641 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 642 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 643 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 644 645 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 646 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 647 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 648 PetscMLdata->A = A; 649 PetscMLdata->Aloc = Aloc; 650 651 /* create ML discretization matrix at fine grid */ 652 /* ML requires input of fine-grid matrix. It determines nlevels. */ 653 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 654 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 655 ML_Create(&ml_object,pc_ml->MaxNlevels); 656 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 657 pc_ml->ml_object = ml_object; 658 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 659 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 660 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 661 662 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 663 664 /* aggregation */ 665 ML_Aggregate_Create(&agg_object); 666 pc_ml->agg_object = agg_object; 667 668 { 669 MatNullSpace mnull; 670 ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr); 671 if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) { 672 if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER; 673 else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK; 674 else pc_ml->nulltype = PCML_NULLSPACE_SCALAR; 675 } 676 switch (pc_ml->nulltype) { 677 case PCML_NULLSPACE_USER: { 678 PetscScalar *nullvec; 679 const PetscScalar *v; 680 PetscBool has_const; 681 PetscInt i,j,mlocal,nvec,M; 682 const Vec *vecs; 683 if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space"); 684 ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr); 685 ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr); 686 ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr); 687 ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr); 688 if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M; 689 for (i=0; i<nvec; i++) { 690 ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr); 691 for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j]; 692 ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr); 693 } 694 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr); 695 ierr = PetscFree(nullvec);CHKERRQ(ierr); 696 } break; 697 case PCML_NULLSPACE_BLOCK: 698 ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 699 break; 700 case PCML_NULLSPACE_SCALAR: 701 break; 702 default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type"); 703 } 704 } 705 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 706 /* set options */ 707 switch (pc_ml->CoarsenScheme) { 708 case 1: 709 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 710 case 2: 711 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 712 case 3: 713 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 714 } 715 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 716 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 717 if (pc_ml->SpectralNormScheme_Anorm){ 718 ML_Set_SpectralNormScheme_Anorm(ml_object); 719 } 720 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 721 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 722 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 723 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 724 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 725 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 726 727 if (pc_ml->OldHierarchy) { 728 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 729 } else { 730 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 731 } 732 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 733 pc_ml->Nlevels = Nlevels; 734 fine_level = Nlevels - 1; 735 736 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 737 /* set default smoothers */ 738 for (level=1; level<=fine_level; level++){ 739 if (size == 1){ 740 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 741 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 742 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 743 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 744 } else { 745 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 746 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 747 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 748 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 749 } 750 } 751 ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr); 752 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 753 ierr = PetscOptionsEnd();CHKERRQ(ierr); 754 755 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 756 pc_ml->gridctx = gridctx; 757 758 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 759 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 760 gridctx[fine_level].A = A; 761 762 level = fine_level - 1; 763 if (size == 1){ /* convert ML P, R and A into seqaij format */ 764 for (mllevel=1; mllevel<Nlevels; mllevel++){ 765 mlmat = &(ml_object->Pmat[mllevel]); 766 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 767 mlmat = &(ml_object->Rmat[mllevel-1]); 768 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 769 770 mlmat = &(ml_object->Amat[mllevel]); 771 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 772 level--; 773 } 774 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 775 for (mllevel=1; mllevel<Nlevels; mllevel++){ 776 mlmat = &(ml_object->Pmat[mllevel]); 777 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 778 mlmat = &(ml_object->Rmat[mllevel-1]); 779 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 780 781 mlmat = &(ml_object->Amat[mllevel]); 782 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 783 level--; 784 } 785 } 786 787 /* create vectors and ksp at all levels */ 788 for (level=0; level<fine_level; level++){ 789 level1 = level + 1; 790 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 791 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 792 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 793 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 794 795 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 796 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 797 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 798 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 799 800 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 801 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 802 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 803 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 804 805 if (level == 0){ 806 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 807 } else { 808 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 809 } 810 } 811 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 812 813 /* create coarse level and the interpolation between the levels */ 814 for (level=0; level<fine_level; level++){ 815 level1 = level + 1; 816 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 817 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 818 if (level > 0){ 819 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 820 } 821 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 822 } 823 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 824 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 825 826 /* setupcalled is set to 0 so that MG is setup from scratch */ 827 pc->setupcalled = 0; 828 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 829 PetscFunctionReturn(0); 830 } 831 832 /* -------------------------------------------------------------------------- */ 833 /* 834 PCDestroy_ML - Destroys the private context for the ML preconditioner 835 that was created with PCCreate_ML(). 836 837 Input Parameter: 838 . pc - the preconditioner context 839 840 Application Interface Routine: PCDestroy() 841 */ 842 #undef __FUNCT__ 843 #define __FUNCT__ "PCDestroy_ML" 844 PetscErrorCode PCDestroy_ML(PC pc) 845 { 846 PetscErrorCode ierr; 847 PC_MG *mg = (PC_MG*)pc->data; 848 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 849 850 PetscFunctionBegin; 851 ierr = PCReset_ML(pc);CHKERRQ(ierr); 852 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 853 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 854 PetscFunctionReturn(0); 855 } 856 857 #undef __FUNCT__ 858 #define __FUNCT__ "PCSetFromOptions_ML" 859 PetscErrorCode PCSetFromOptions_ML(PC pc) 860 { 861 PetscErrorCode ierr; 862 PetscInt indx,PrintLevel; 863 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 864 PC_MG *mg = (PC_MG*)pc->data; 865 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 866 PetscMPIInt size; 867 MPI_Comm comm = ((PetscObject)pc)->comm; 868 869 PetscFunctionBegin; 870 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 871 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 872 PrintLevel = 0; 873 indx = 0; 874 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 875 ML_Set_PrintLevel(PrintLevel); 876 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 877 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 878 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 879 pc_ml->CoarsenScheme = indx; 880 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 881 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 882 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 883 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 884 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 885 ierr = PetscOptionsEnum("-pc_ml_nullspace","Which type of null space information to use","None",PCMLNullSpaceTypes,(PetscEnum)pc_ml->nulltype,(PetscEnum*)&pc_ml->nulltype,PETSC_NULL);CHKERRQ(ierr); 886 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 887 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr); 888 /* 889 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 890 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 891 892 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 893 combination of options and ML's exit(1) explanations don't help matters. 894 */ 895 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 896 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 897 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 898 if (pc_ml->EnergyMinimization) { 899 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 900 } 901 if (pc_ml->EnergyMinimization == 2) { 902 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 903 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 904 } 905 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 906 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 907 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 908 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 909 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 910 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 911 /* 912 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 913 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 914 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 915 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 916 this context, but ML doesn't provide a way to find out which ones. 917 */ 918 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 919 ierr = PetscOptionsTail();CHKERRQ(ierr); 920 PetscFunctionReturn(0); 921 } 922 923 /* -------------------------------------------------------------------------- */ 924 /* 925 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 926 and sets this as the private data within the generic preconditioning 927 context, PC, that was created within PCCreate(). 928 929 Input Parameter: 930 . pc - the preconditioner context 931 932 Application Interface Routine: PCCreate() 933 */ 934 935 /*MC 936 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 937 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 938 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 939 and the restriction/interpolation operators wrapped as PETSc shell matrices. 940 941 Options Database Key: 942 Multigrid options(inherited) 943 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 944 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 945 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 946 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 947 ML options: 948 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 949 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 950 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 951 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 952 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 953 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 954 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 955 956 Level: intermediate 957 958 Concepts: multigrid 959 960 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 961 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 962 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 963 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 964 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 965 M*/ 966 967 EXTERN_C_BEGIN 968 #undef __FUNCT__ 969 #define __FUNCT__ "PCCreate_ML" 970 PetscErrorCode PCCreate_ML(PC pc) 971 { 972 PetscErrorCode ierr; 973 PC_ML *pc_ml; 974 PC_MG *mg; 975 976 PetscFunctionBegin; 977 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 978 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 979 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 980 /* Since PCMG tries to use DM assocated with PC must delete it */ 981 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 982 mg = (PC_MG*)pc->data; 983 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 984 985 /* create a supporting struct and attach it to pc */ 986 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 987 mg->innerctx = pc_ml; 988 989 pc_ml->ml_object = 0; 990 pc_ml->agg_object = 0; 991 pc_ml->gridctx = 0; 992 pc_ml->PetscMLdata = 0; 993 pc_ml->Nlevels = -1; 994 pc_ml->MaxNlevels = 10; 995 pc_ml->MaxCoarseSize = 1; 996 pc_ml->CoarsenScheme = 1; 997 pc_ml->Threshold = 0.0; 998 pc_ml->DampingFactor = 4.0/3.0; 999 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 1000 pc_ml->size = 0; 1001 1002 /* overwrite the pointers of PCMG by the functions of PCML */ 1003 pc->ops->setfromoptions = PCSetFromOptions_ML; 1004 pc->ops->setup = PCSetUp_ML; 1005 pc->ops->reset = PCReset_ML; 1006 pc->ops->destroy = PCDestroy_ML; 1007 PetscFunctionReturn(0); 1008 } 1009 EXTERN_C_END 1010