1 2 /* 3 Provides an interface to the ML smoothed Aggregation 4 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 5 Jed Brown, see [PETSC #18321, #18449]. 6 */ 7 #include <private/pcimpl.h> /*I "petscpc.h" I*/ 8 #include <../src/ksp/pc/impls/mg/mgimpl.h> /*I "petscpcmg.h" I*/ 9 #include <../src/mat/impls/aij/seq/aij.h> 10 #include <../src/mat/impls/aij/mpi/mpiaij.h> 11 12 #include <math.h> 13 EXTERN_C_BEGIN 14 /* HAVE_CONFIG_H flag is required by ML include files */ 15 #if !defined(HAVE_CONFIG_H) 16 #define HAVE_CONFIG_H 17 #endif 18 #include <ml_include.h> 19 EXTERN_C_END 20 21 /* The context (data structure) at each grid level */ 22 typedef struct { 23 Vec x,b,r; /* global vectors */ 24 Mat A,P,R; 25 KSP ksp; 26 } GridCtx; 27 28 /* The context used to input PETSc matrix into ML at fine grid */ 29 typedef struct { 30 Mat A; /* Petsc matrix in aij format */ 31 Mat Aloc; /* local portion of A to be used by ML */ 32 Vec x,y; 33 ML_Operator *mlmat; 34 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 35 } FineGridCtx; 36 37 /* The context associates a ML matrix with a PETSc shell matrix */ 38 typedef struct { 39 Mat A; /* PETSc shell matrix associated with mlmat */ 40 ML_Operator *mlmat; /* ML matrix assorciated with A */ 41 Vec y, work; 42 } Mat_MLShell; 43 44 /* Private context for the ML preconditioner */ 45 typedef struct { 46 ML *ml_object; 47 ML_Aggregate *agg_object; 48 GridCtx *gridctx; 49 FineGridCtx *PetscMLdata; 50 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme,EnergyMinimization; 51 PetscReal Threshold,DampingFactor,EnergyMinimizationDropTol; 52 PetscBool SpectralNormScheme_Anorm,BlockScaling,EnergyMinimizationCheap,Symmetrize,OldHierarchy,KeepAggInfo,Reusable; 53 PetscBool reuse_interpolation; 54 PetscMPIInt size; /* size of communicator for pc->pmat */ 55 } PC_ML; 56 57 #undef __FUNCT__ 58 #define __FUNCT__ "PetscML_getrow" 59 static int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 60 { 61 PetscErrorCode ierr; 62 PetscInt m,i,j,k=0,row,*aj; 63 PetscScalar *aa; 64 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 65 Mat_SeqAIJ *a = (Mat_SeqAIJ*)ml->Aloc->data; 66 67 68 ierr = MatGetSize(ml->Aloc,&m,PETSC_NULL); if (ierr) return(0); 69 for (i = 0; i<N_requested_rows; i++) { 70 row = requested_rows[i]; 71 row_lengths[i] = a->ilen[row]; 72 if (allocated_space < k+row_lengths[i]) return(0); 73 if ( (row >= 0) || (row <= (m-1)) ) { 74 aj = a->j + a->i[row]; 75 aa = a->a + a->i[row]; 76 for (j=0; j<row_lengths[i]; j++){ 77 columns[k] = aj[j]; 78 values[k++] = aa[j]; 79 } 80 } 81 } 82 return(1); 83 } 84 85 #undef __FUNCT__ 86 #define __FUNCT__ "PetscML_comm" 87 static PetscErrorCode PetscML_comm(double p[],void *ML_data) 88 { 89 PetscErrorCode ierr; 90 FineGridCtx *ml=(FineGridCtx*)ML_data; 91 Mat A=ml->A; 92 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 93 PetscMPIInt size; 94 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 95 PetscScalar *array; 96 97 PetscFunctionBegin; 98 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 99 if (size == 1) return 0; 100 101 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 102 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 103 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 104 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 105 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 106 for (i=in_length; i<out_length; i++){ 107 p[i] = array[i-in_length]; 108 } 109 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 110 PetscFunctionReturn(0); 111 } 112 113 #undef __FUNCT__ 114 #define __FUNCT__ "PetscML_matvec" 115 static int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 116 { 117 PetscErrorCode ierr; 118 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 119 Mat A=ml->A, Aloc=ml->Aloc; 120 PetscMPIInt size; 121 PetscScalar *pwork=ml->pwork; 122 PetscInt i; 123 124 PetscFunctionBegin; 125 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 126 if (size == 1){ 127 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 128 } else { 129 for (i=0; i<in_length; i++) pwork[i] = p[i]; 130 PetscML_comm(pwork,ml); 131 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 132 } 133 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 134 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 135 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 136 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 137 PetscFunctionReturn(0); 138 } 139 140 #undef __FUNCT__ 141 #define __FUNCT__ "MatMult_ML" 142 static PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 143 { 144 PetscErrorCode ierr; 145 Mat_MLShell *shell; 146 PetscScalar *xarray,*yarray; 147 PetscInt x_length,y_length; 148 149 PetscFunctionBegin; 150 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 151 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 152 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 153 x_length = shell->mlmat->invec_leng; 154 y_length = shell->mlmat->outvec_leng; 155 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 156 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 157 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 158 PetscFunctionReturn(0); 159 } 160 161 #undef __FUNCT__ 162 #define __FUNCT__ "MatMultAdd_ML" 163 /* Computes y = w + A * x 164 It is possible that w == y, but not x == y 165 */ 166 static PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 167 { 168 Mat_MLShell *shell; 169 PetscScalar *xarray,*yarray; 170 PetscInt x_length,y_length; 171 PetscErrorCode ierr; 172 173 PetscFunctionBegin; 174 ierr = MatShellGetContext(A, (void **) &shell);CHKERRQ(ierr); 175 if (y == w) { 176 if (!shell->work) { 177 ierr = VecDuplicate(y, &shell->work);CHKERRQ(ierr); 178 } 179 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 180 ierr = VecGetArray(shell->work, &yarray);CHKERRQ(ierr); 181 x_length = shell->mlmat->invec_leng; 182 y_length = shell->mlmat->outvec_leng; 183 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 184 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 185 ierr = VecRestoreArray(shell->work, &yarray);CHKERRQ(ierr); 186 ierr = VecAXPY(y, 1.0, shell->work);CHKERRQ(ierr); 187 } else { 188 ierr = VecGetArray(x, &xarray);CHKERRQ(ierr); 189 ierr = VecGetArray(y, &yarray);CHKERRQ(ierr); 190 x_length = shell->mlmat->invec_leng; 191 y_length = shell->mlmat->outvec_leng; 192 ML_Operator_Apply(shell->mlmat, x_length, xarray, y_length, yarray); 193 ierr = VecRestoreArray(x, &xarray);CHKERRQ(ierr); 194 ierr = VecRestoreArray(y, &yarray);CHKERRQ(ierr); 195 ierr = VecAXPY(y, 1.0, w);CHKERRQ(ierr); 196 } 197 PetscFunctionReturn(0); 198 } 199 200 /* newtype is ignored because "ml" is not listed under Petsc MatType */ 201 #undef __FUNCT__ 202 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 203 static PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 204 { 205 PetscErrorCode ierr; 206 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 207 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 208 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 209 PetscScalar *aa=a->a,*ba=b->a,*ca; 210 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 211 PetscInt *ci,*cj,ncols; 212 213 PetscFunctionBegin; 214 if (am != an) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 215 216 if (scall == MAT_INITIAL_MATRIX){ 217 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 218 ci[0] = 0; 219 for (i=0; i<am; i++){ 220 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 221 } 222 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 223 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 224 225 k = 0; 226 for (i=0; i<am; i++){ 227 /* diagonal portion of A */ 228 ncols = ai[i+1] - ai[i]; 229 for (j=0; j<ncols; j++) { 230 cj[k] = *aj++; 231 ca[k++] = *aa++; 232 } 233 /* off-diagonal portion of A */ 234 ncols = bi[i+1] - bi[i]; 235 for (j=0; j<ncols; j++) { 236 cj[k] = an + (*bj); bj++; 237 ca[k++] = *ba++; 238 } 239 } 240 if (k != ci[am]) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 241 242 /* put together the new matrix */ 243 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 244 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 245 246 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 247 /* Since these are PETSc arrays, change flags to free them as necessary. */ 248 mat = (Mat_SeqAIJ*)(*Aloc)->data; 249 mat->free_a = PETSC_TRUE; 250 mat->free_ij = PETSC_TRUE; 251 252 mat->nonew = 0; 253 } else if (scall == MAT_REUSE_MATRIX){ 254 mat=(Mat_SeqAIJ*)(*Aloc)->data; 255 ci = mat->i; cj = mat->j; ca = mat->a; 256 for (i=0; i<am; i++) { 257 /* diagonal portion of A */ 258 ncols = ai[i+1] - ai[i]; 259 for (j=0; j<ncols; j++) *ca++ = *aa++; 260 /* off-diagonal portion of A */ 261 ncols = bi[i+1] - bi[i]; 262 for (j=0; j<ncols; j++) *ca++ = *ba++; 263 } 264 } else { 265 SETERRQ1(((PetscObject)A)->comm,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 266 } 267 PetscFunctionReturn(0); 268 } 269 270 extern PetscErrorCode MatDestroy_Shell(Mat); 271 #undef __FUNCT__ 272 #define __FUNCT__ "MatDestroy_ML" 273 static PetscErrorCode MatDestroy_ML(Mat A) 274 { 275 PetscErrorCode ierr; 276 Mat_MLShell *shell; 277 278 PetscFunctionBegin; 279 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 280 ierr = VecDestroy(&shell->y);CHKERRQ(ierr); 281 if (shell->work) {ierr = VecDestroy(&shell->work);CHKERRQ(ierr);} 282 ierr = PetscFree(shell);CHKERRQ(ierr); 283 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 284 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 285 PetscFunctionReturn(0); 286 } 287 288 #undef __FUNCT__ 289 #define __FUNCT__ "MatWrapML_SeqAIJ" 290 static PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 291 { 292 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 293 PetscErrorCode ierr; 294 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz = PETSC_NULL,nz_max; 295 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 296 PetscScalar *ml_vals=matdata->values,*aa; 297 298 PetscFunctionBegin; 299 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 300 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 301 if (reuse){ 302 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 303 aij->i = ml_rowptr; 304 aij->j = ml_cols; 305 aij->a = ml_vals; 306 } else { 307 /* sort ml_cols and ml_vals */ 308 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 309 for (i=0; i<m; i++) { 310 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 311 } 312 aj = ml_cols; aa = ml_vals; 313 for (i=0; i<m; i++){ 314 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 315 aj += nnz[i]; aa += nnz[i]; 316 } 317 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 318 ierr = PetscFree(nnz);CHKERRQ(ierr); 319 } 320 PetscFunctionReturn(0); 321 } 322 323 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 324 if (reuse) { 325 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 326 } else { 327 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 328 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 329 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 330 331 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 332 nz_max = 1; 333 for (i=0; i<m; i++) { 334 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 335 if (nnz[i] > nz_max) nz_max = nnz[i]; 336 } 337 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 338 } 339 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 340 for (i=0; i<m; i++) { 341 PetscInt ncols; 342 k = 0; 343 /* diagonal entry */ 344 aj[k] = i; aa[k++] = ml_vals[i]; 345 /* off diagonal entries */ 346 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 347 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 348 } 349 ncols = ml_cols[i+1] - ml_cols[i] + 1; 350 /* sort aj and aa */ 351 ierr = PetscSortIntWithScalarArray(ncols,aj,aa);CHKERRQ(ierr); 352 ierr = MatSetValues(*newmat,1,&i,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 353 } 354 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 355 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 356 357 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 358 ierr = PetscFree(nnz);CHKERRQ(ierr); 359 PetscFunctionReturn(0); 360 } 361 362 #undef __FUNCT__ 363 #define __FUNCT__ "MatWrapML_SHELL" 364 static PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 365 { 366 PetscErrorCode ierr; 367 PetscInt m,n; 368 ML_Comm *MLcomm; 369 Mat_MLShell *shellctx; 370 371 PetscFunctionBegin; 372 m = mlmat->outvec_leng; 373 n = mlmat->invec_leng; 374 if (!m || !n){ 375 newmat = PETSC_NULL; 376 PetscFunctionReturn(0); 377 } 378 379 if (reuse){ 380 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 381 shellctx->mlmat = mlmat; 382 PetscFunctionReturn(0); 383 } 384 385 MLcomm = mlmat->comm; 386 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 387 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 388 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 389 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 390 shellctx->A = *newmat; 391 shellctx->mlmat = mlmat; 392 shellctx->work = PETSC_NULL; 393 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 394 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 395 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 396 (*newmat)->ops->destroy = MatDestroy_ML; 397 PetscFunctionReturn(0); 398 } 399 400 #undef __FUNCT__ 401 #define __FUNCT__ "MatWrapML_MPIAIJ" 402 static PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 403 { 404 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 405 PetscInt *ml_cols=matdata->columns,*aj; 406 PetscScalar *ml_vals=matdata->values,*aa; 407 PetscErrorCode ierr; 408 PetscInt i,j,k,*gordering; 409 PetscInt m=mlmat->outvec_leng,n,nz_max,row; 410 Mat A; 411 412 PetscFunctionBegin; 413 if (!mlmat->getrow) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 414 n = mlmat->invec_leng; 415 if (m != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 416 417 if (reuse) { 418 A = *newmat; 419 for (nz_max=0,i=0; i<m; i++) nz_max = PetscMax(nz_max,ml_cols[i+1] - ml_cols[i] + 1); 420 } else { 421 PetscInt *nnzA,*nnzB,*nnz; 422 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 423 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 424 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 425 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 426 427 nz_max = 0; 428 for (i=0; i<m; i++){ 429 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 430 if (nz_max < nnz[i]) nz_max = nnz[i]; 431 nnzA[i] = 1; /* diag */ 432 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 433 if (ml_cols[j] < m) nnzA[i]++; 434 } 435 nnzB[i] = nnz[i] - nnzA[i]; 436 } 437 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 438 ierr = PetscFree3(nnzA,nnzB,nnz); 439 } 440 441 /* insert mat values -- remap row and column indices */ 442 nz_max++; 443 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 444 /* create global row numbering for a ML_Operator */ 445 ML_build_global_numbering(mlmat,&gordering,"rows"); 446 for (i=0; i<m; i++) { 447 PetscInt ncols; 448 row = gordering[i]; 449 k = 0; 450 /* diagonal entry */ 451 aj[k] = row; aa[k++] = ml_vals[i]; 452 /* off diagonal entries */ 453 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 454 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 455 } 456 ncols = ml_cols[i+1] - ml_cols[i] + 1; 457 ierr = MatSetValues(A,1,&row,ncols,aj,aa,INSERT_VALUES);CHKERRQ(ierr); 458 } 459 ML_free(gordering); 460 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 461 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 462 *newmat = A; 463 464 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 465 PetscFunctionReturn(0); 466 } 467 468 /* -----------------------------------------------------------------------------*/ 469 #undef __FUNCT__ 470 #define __FUNCT__ "PCReset_ML" 471 PetscErrorCode PCReset_ML(PC pc) 472 { 473 PetscErrorCode ierr; 474 PC_MG *mg = (PC_MG*)pc->data; 475 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 476 PetscInt level,fine_level=pc_ml->Nlevels-1; 477 478 PetscFunctionBegin; 479 ML_Aggregate_Destroy(&pc_ml->agg_object); 480 ML_Destroy(&pc_ml->ml_object); 481 482 if (pc_ml->PetscMLdata) { 483 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 484 ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); 485 ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); 486 ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); 487 } 488 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 489 490 if (pc_ml->gridctx) { 491 for (level=0; level<fine_level; level++){ 492 if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} 493 if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} 494 if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} 495 if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} 496 if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} 497 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 498 } 499 } 500 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 501 PetscFunctionReturn(0); 502 } 503 /* -------------------------------------------------------------------------- */ 504 /* 505 PCSetUp_ML - Prepares for the use of the ML preconditioner 506 by setting data structures and options. 507 508 Input Parameter: 509 . pc - the preconditioner context 510 511 Application Interface Routine: PCSetUp() 512 513 Notes: 514 The interface routine PCSetUp() is not usually called directly by 515 the user, but instead is called by PCApply() if necessary. 516 */ 517 extern PetscErrorCode PCSetFromOptions_MG(PC); 518 extern PetscErrorCode PCReset_MG(PC); 519 520 #undef __FUNCT__ 521 #define __FUNCT__ "PCSetUp_ML" 522 PetscErrorCode PCSetUp_ML(PC pc) 523 { 524 PetscErrorCode ierr; 525 PetscMPIInt size; 526 FineGridCtx *PetscMLdata; 527 ML *ml_object; 528 ML_Aggregate *agg_object; 529 ML_Operator *mlmat; 530 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 531 Mat A,Aloc; 532 GridCtx *gridctx; 533 PC_MG *mg = (PC_MG*)pc->data; 534 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 535 PetscBool isSeq, isMPI; 536 KSP smoother; 537 PC subpc; 538 PetscInt mesh_level, old_mesh_level; 539 540 541 PetscFunctionBegin; 542 /* Since PCMG tries to use DM assocated with PC must delete it */ 543 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 544 545 A = pc->pmat; 546 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 547 548 if (pc->setupcalled) { 549 if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) { 550 /* 551 Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for 552 multiple solves in which the matrix is not changing too quickly. 553 */ 554 ml_object = pc_ml->ml_object; 555 gridctx = pc_ml->gridctx; 556 Nlevels = pc_ml->Nlevels; 557 fine_level = Nlevels - 1; 558 gridctx[fine_level].A = A; 559 560 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 561 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 562 if (isMPI){ 563 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 564 } else if (isSeq) { 565 Aloc = A; 566 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 567 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 568 569 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 570 PetscMLdata = pc_ml->PetscMLdata; 571 ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr); 572 PetscMLdata->A = A; 573 PetscMLdata->Aloc = Aloc; 574 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 575 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 576 577 mesh_level = ml_object->ML_finest_level; 578 while (ml_object->SingleLevel[mesh_level].Rmat->to) { 579 old_mesh_level = mesh_level; 580 mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum; 581 582 /* clean and regenerate A */ 583 mlmat = &(ml_object->Amat[mesh_level]); 584 ML_Operator_Clean(mlmat); 585 ML_Operator_Init(mlmat,ml_object->comm); 586 ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level); 587 } 588 589 #if 0 590 /* 591 The wrapped data structures are being reused, so wrappers do not need to be regenerated. 592 We increase state here in case another object is caching based on state. 593 */ 594 for (level=0; level<fine_level; level++) { 595 ierr = PetscObjectStateIncrease((PetscObject)gridctx[level].A);CHKERRQ(ierr); 596 } 597 #else 598 level = fine_level - 1; 599 if (size == 1) { /* convert ML P, R and A into seqaij format */ 600 for (mllevel=1; mllevel<Nlevels; mllevel++){ 601 mlmat = &(ml_object->Amat[mllevel]); 602 //ierr = MatDestroy(&gridctx[level].A);CHKERRQ(ierr); 603 //ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 604 ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 605 level--; 606 } 607 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 608 for (mllevel=1; mllevel<Nlevels; mllevel++){ 609 mlmat = &(ml_object->Amat[mllevel]); 610 ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 611 level--; 612 } 613 } 614 #endif 615 616 for (level=0; level<fine_level; level++) { 617 if (level > 0){ 618 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 619 } 620 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 621 } 622 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 623 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr); 624 625 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 626 PetscFunctionReturn(0); 627 } else { 628 /* since ML can change the size of vectors/matrices at any level we must destroy everything */ 629 ierr = PCReset_ML(pc);CHKERRQ(ierr); 630 ierr = PCReset_MG(pc);CHKERRQ(ierr); 631 } 632 } 633 634 /* setup special features of PCML */ 635 /*--------------------------------*/ 636 /* covert A to Aloc to be used by ML at fine grid */ 637 pc_ml->size = size; 638 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 639 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 640 if (isMPI){ 641 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr); 642 } else if (isSeq) { 643 Aloc = A; 644 ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr); 645 } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name); 646 647 /* create and initialize struct 'PetscMLdata' */ 648 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 649 pc_ml->PetscMLdata = PetscMLdata; 650 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 651 652 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 653 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 654 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 655 656 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 657 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 658 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 659 PetscMLdata->A = A; 660 PetscMLdata->Aloc = Aloc; 661 662 /* create ML discretization matrix at fine grid */ 663 /* ML requires input of fine-grid matrix. It determines nlevels. */ 664 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 665 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 666 ML_Create(&ml_object,pc_ml->MaxNlevels); 667 ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm); 668 pc_ml->ml_object = ml_object; 669 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 670 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 671 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 672 673 ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO); 674 675 /* aggregation */ 676 ML_Aggregate_Create(&agg_object); 677 pc_ml->agg_object = agg_object; 678 679 ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 680 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 681 /* set options */ 682 switch (pc_ml->CoarsenScheme) { 683 case 1: 684 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 685 case 2: 686 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 687 case 3: 688 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 689 } 690 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 691 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 692 if (pc_ml->SpectralNormScheme_Anorm){ 693 ML_Set_SpectralNormScheme_Anorm(ml_object); 694 } 695 agg_object->keep_agg_information = (int)pc_ml->KeepAggInfo; 696 agg_object->keep_P_tentative = (int)pc_ml->Reusable; 697 agg_object->block_scaled_SA = (int)pc_ml->BlockScaling; 698 agg_object->minimizing_energy = (int)pc_ml->EnergyMinimization; 699 agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol; 700 agg_object->cheap_minimizing_energy = (int)pc_ml->EnergyMinimizationCheap; 701 702 if (pc_ml->OldHierarchy) { 703 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 704 } else { 705 Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 706 } 707 if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 708 pc_ml->Nlevels = Nlevels; 709 fine_level = Nlevels - 1; 710 711 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 712 /* set default smoothers */ 713 for (level=1; level<=fine_level; level++){ 714 if (size == 1){ 715 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 716 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 717 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 718 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 719 } else { 720 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 721 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 722 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 723 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 724 } 725 } 726 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 727 728 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 729 pc_ml->gridctx = gridctx; 730 731 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 732 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 733 gridctx[fine_level].A = A; 734 735 level = fine_level - 1; 736 if (size == 1){ /* convert ML P, R and A into seqaij format */ 737 for (mllevel=1; mllevel<Nlevels; mllevel++){ 738 mlmat = &(ml_object->Pmat[mllevel]); 739 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 740 mlmat = &(ml_object->Rmat[mllevel-1]); 741 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 742 743 mlmat = &(ml_object->Amat[mllevel]); 744 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 745 level--; 746 } 747 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 748 for (mllevel=1; mllevel<Nlevels; mllevel++){ 749 mlmat = &(ml_object->Pmat[mllevel]); 750 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr); 751 mlmat = &(ml_object->Rmat[mllevel-1]); 752 ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr); 753 754 mlmat = &(ml_object->Amat[mllevel]); 755 ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 756 level--; 757 } 758 } 759 760 /* create vectors and ksp at all levels */ 761 for (level=0; level<fine_level; level++){ 762 level1 = level + 1; 763 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 764 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 765 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 766 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 767 768 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 769 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 770 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 771 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 772 773 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 774 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 775 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 776 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 777 778 if (level == 0){ 779 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 780 } else { 781 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 782 } 783 } 784 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 785 786 /* create coarse level and the interpolation between the levels */ 787 for (level=0; level<fine_level; level++){ 788 level1 = level + 1; 789 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 790 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 791 if (level > 0){ 792 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 793 } 794 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 795 } 796 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 797 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 798 799 /* setupcalled is set to 0 so that MG is setup from scratch */ 800 pc->setupcalled = 0; 801 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 802 PetscFunctionReturn(0); 803 } 804 805 /* -------------------------------------------------------------------------- */ 806 /* 807 PCDestroy_ML - Destroys the private context for the ML preconditioner 808 that was created with PCCreate_ML(). 809 810 Input Parameter: 811 . pc - the preconditioner context 812 813 Application Interface Routine: PCDestroy() 814 */ 815 #undef __FUNCT__ 816 #define __FUNCT__ "PCDestroy_ML" 817 PetscErrorCode PCDestroy_ML(PC pc) 818 { 819 PetscErrorCode ierr; 820 PC_MG *mg = (PC_MG*)pc->data; 821 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 822 823 PetscFunctionBegin; 824 ierr = PCReset_ML(pc);CHKERRQ(ierr); 825 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 826 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 827 PetscFunctionReturn(0); 828 } 829 830 #undef __FUNCT__ 831 #define __FUNCT__ "PCSetFromOptions_ML" 832 PetscErrorCode PCSetFromOptions_ML(PC pc) 833 { 834 PetscErrorCode ierr; 835 PetscInt indx,PrintLevel; 836 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 837 PC_MG *mg = (PC_MG*)pc->data; 838 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 839 PetscMPIInt size; 840 MPI_Comm comm = ((PetscObject)pc)->comm; 841 842 PetscFunctionBegin; 843 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 844 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 845 PrintLevel = 0; 846 indx = 0; 847 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 848 ML_Set_PrintLevel(PrintLevel); 849 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 850 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 851 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 852 pc_ml->CoarsenScheme = indx; 853 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 854 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 855 ierr = PetscOptionsBool("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL);CHKERRQ(ierr); 856 ierr = PetscOptionsBool("-pc_ml_Symmetrize","Symmetrize aggregation","ML_Set_Symmetrize",pc_ml->Symmetrize,&pc_ml->Symmetrize,PETSC_NULL);CHKERRQ(ierr); 857 ierr = PetscOptionsBool("-pc_ml_BlockScaling","Scale all dofs at each node together","None",pc_ml->BlockScaling,&pc_ml->BlockScaling,PETSC_NULL);CHKERRQ(ierr); 858 ierr = PetscOptionsInt("-pc_ml_EnergyMinimization","Energy minimization norm type (0=no minimization; see ML manual for 1,2,3; -1 and 4 undocumented)","None",pc_ml->EnergyMinimization,&pc_ml->EnergyMinimization,PETSC_NULL);CHKERRQ(ierr); 859 ierr = PetscOptionsBool("-pc_ml_reuse_interpolation","Reuse the interpolation operators when possible (cheaper, weaker when matrix entries change a lot)","None",pc_ml->reuse_interpolation,&pc_ml->reuse_interpolation,PETSC_NULL);CHKERRQ(ierr); 860 /* 861 The following checks a number of conditions. If we let this stuff slip by, then ML's error handling will take over. 862 This is suboptimal because it amounts to calling exit(1) so we check for the most common conditions. 863 864 We also try to set some sane defaults when energy minimization is activated, otherwise it's hard to find a working 865 combination of options and ML's exit(1) explanations don't help matters. 866 */ 867 if (pc_ml->EnergyMinimization < -1 || pc_ml->EnergyMinimization > 4) SETERRQ(comm,PETSC_ERR_ARG_OUTOFRANGE,"EnergyMinimization must be in range -1..4"); 868 if (pc_ml->EnergyMinimization == 4 && size > 1) SETERRQ(comm,PETSC_ERR_SUP,"Energy minimization type 4 does not work in parallel"); 869 if (pc_ml->EnergyMinimization == 4) {ierr = PetscInfo(pc,"Mandel's energy minimization scheme is experimental and broken in ML-6.2");CHKERRQ(ierr);} 870 if (pc_ml->EnergyMinimization) { 871 ierr = PetscOptionsReal("-pc_ml_EnergyMinimizationDropTol","Energy minimization drop tolerance","None",pc_ml->EnergyMinimizationDropTol,&pc_ml->EnergyMinimizationDropTol,PETSC_NULL);CHKERRQ(ierr); 872 } 873 if (pc_ml->EnergyMinimization == 2) { 874 /* According to ml_MultiLevelPreconditioner.cpp, this option is only meaningful for norm type (2) */ 875 ierr = PetscOptionsBool("-pc_ml_EnergyMinimizationCheap","Use cheaper variant of norm type 2","None",pc_ml->EnergyMinimizationCheap,&pc_ml->EnergyMinimizationCheap,PETSC_NULL);CHKERRQ(ierr); 876 } 877 /* energy minimization sometimes breaks if this is turned off, the more classical stuff should be okay without it */ 878 if (pc_ml->EnergyMinimization) pc_ml->KeepAggInfo = PETSC_TRUE; 879 ierr = PetscOptionsBool("-pc_ml_KeepAggInfo","Allows the preconditioner to be reused, or auxilliary matrices to be generated","None",pc_ml->KeepAggInfo,&pc_ml->KeepAggInfo,PETSC_NULL);CHKERRQ(ierr); 880 /* Option (-1) doesn't work at all (calls exit(1)) if the tentative restriction operator isn't stored. */ 881 if (pc_ml->EnergyMinimization == -1) pc_ml->Reusable = PETSC_TRUE; 882 ierr = PetscOptionsBool("-pc_ml_Reusable","Store intermedaiate data structures so that the multilevel hierarchy is reusable","None",pc_ml->Reusable,&pc_ml->Reusable,PETSC_NULL);CHKERRQ(ierr); 883 /* 884 ML's C API is severely underdocumented and lacks significant functionality. The C++ API calls 885 ML_Gen_MultiLevelHierarchy_UsingAggregation() which is a modified copy (!?) of the documented function 886 ML_Gen_MGHierarchy_UsingAggregation(). This modification, however, does not provide a strict superset of the 887 functionality in the old function, so some users may still want to use it. Note that many options are ignored in 888 this context, but ML doesn't provide a way to find out which ones. 889 */ 890 ierr = PetscOptionsBool("-pc_ml_OldHierarchy","Use old routine to generate hierarchy","None",pc_ml->OldHierarchy,&pc_ml->OldHierarchy,PETSC_NULL);CHKERRQ(ierr); 891 ierr = PetscOptionsTail();CHKERRQ(ierr); 892 PetscFunctionReturn(0); 893 } 894 895 /* -------------------------------------------------------------------------- */ 896 /* 897 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 898 and sets this as the private data within the generic preconditioning 899 context, PC, that was created within PCCreate(). 900 901 Input Parameter: 902 . pc - the preconditioner context 903 904 Application Interface Routine: PCCreate() 905 */ 906 907 /*MC 908 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 909 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 910 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 911 and the restriction/interpolation operators wrapped as PETSc shell matrices. 912 913 Options Database Key: 914 Multigrid options(inherited) 915 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 916 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 917 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 918 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 919 ML options: 920 . -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 921 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 922 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 923 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 924 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 925 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 926 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 927 928 Level: intermediate 929 930 Concepts: multigrid 931 932 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 933 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 934 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 935 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 936 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 937 M*/ 938 939 EXTERN_C_BEGIN 940 #undef __FUNCT__ 941 #define __FUNCT__ "PCCreate_ML" 942 PetscErrorCode PCCreate_ML(PC pc) 943 { 944 PetscErrorCode ierr; 945 PC_ML *pc_ml; 946 PC_MG *mg; 947 948 PetscFunctionBegin; 949 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 950 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 951 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 952 /* Since PCMG tries to use DM assocated with PC must delete it */ 953 ierr = DMDestroy(&pc->dm);CHKERRQ(ierr); 954 mg = (PC_MG*)pc->data; 955 mg->galerkin = 2; /* Use Galerkin, but it is computed externally */ 956 957 /* create a supporting struct and attach it to pc */ 958 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 959 mg->innerctx = pc_ml; 960 961 pc_ml->ml_object = 0; 962 pc_ml->agg_object = 0; 963 pc_ml->gridctx = 0; 964 pc_ml->PetscMLdata = 0; 965 pc_ml->Nlevels = -1; 966 pc_ml->MaxNlevels = 10; 967 pc_ml->MaxCoarseSize = 1; 968 pc_ml->CoarsenScheme = 1; 969 pc_ml->Threshold = 0.0; 970 pc_ml->DampingFactor = 4.0/3.0; 971 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 972 pc_ml->size = 0; 973 974 /* overwrite the pointers of PCMG by the functions of PCML */ 975 pc->ops->setfromoptions = PCSetFromOptions_ML; 976 pc->ops->setup = PCSetUp_ML; 977 pc->ops->reset = PCReset_ML; 978 pc->ops->destroy = PCDestroy_ML; 979 PetscFunctionReturn(0); 980 } 981 EXTERN_C_END 982