1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML smoothed Aggregation 5 Note: Something non-obvious breaks -pc_mg_type ADDITIVE for parallel runs 6 Jed Brown, see [PETSC #18321, #18449]. 7 */ 8 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 9 #include "../src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 10 #include "../src/mat/impls/aij/seq/aij.h" 11 #include "../src/mat/impls/aij/mpi/mpiaij.h" 12 13 #include <math.h> 14 EXTERN_C_BEGIN 15 /* HAVE_CONFIG_H flag is required by ML include files */ 16 #if !defined(HAVE_CONFIG_H) 17 #define HAVE_CONFIG_H 18 #endif 19 #include "ml_include.h" 20 EXTERN_C_END 21 22 /* The context (data structure) at each grid level */ 23 typedef struct { 24 Vec x,b,r; /* global vectors */ 25 Mat A,P,R; 26 KSP ksp; 27 } GridCtx; 28 29 /* The context used to input PETSc matrix into ML at fine grid */ 30 typedef struct { 31 Mat A; /* Petsc matrix in aij format */ 32 Mat Aloc; /* local portion of A to be used by ML */ 33 Vec x,y; 34 ML_Operator *mlmat; 35 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 36 } FineGridCtx; 37 38 /* The context associates a ML matrix with a PETSc shell matrix */ 39 typedef struct { 40 Mat A; /* PETSc shell matrix associated with mlmat */ 41 ML_Operator *mlmat; /* ML matrix assorciated with A */ 42 Vec y; 43 } Mat_MLShell; 44 45 /* Private context for the ML preconditioner */ 46 typedef struct { 47 ML *ml_object; 48 ML_Aggregate *agg_object; 49 GridCtx *gridctx; 50 FineGridCtx *PetscMLdata; 51 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme; 52 PetscReal Threshold,DampingFactor; 53 PetscTruth SpectralNormScheme_Anorm; 54 PetscMPIInt size; /* size of communicator for pc->pmat */ 55 } PC_ML; 56 57 extern int PetscML_getrow(ML_Operator*,int,int[],int,int[],double[],int[]); 58 extern int PetscML_matvec(ML_Operator*, int, double[], int,double[]); 59 extern int PetscML_comm(double[], void *); 60 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 61 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 62 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*); 63 extern PetscErrorCode MatDestroy_ML(Mat); 64 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,MatReuse,Mat*); 65 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*); 66 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,MatReuse,Mat*); 67 68 #undef __FUNCT__ 69 #define __FUNCT__ "PCDestroy_ML_Private" 70 PetscErrorCode PCDestroy_ML_Private(void *ptr) 71 { 72 PetscErrorCode ierr; 73 PC_ML *pc_ml = (PC_ML*)ptr; 74 PetscInt level,fine_level=pc_ml->Nlevels-1; 75 76 PetscFunctionBegin; 77 ML_Aggregate_Destroy(&pc_ml->agg_object); 78 ML_Destroy(&pc_ml->ml_object); 79 80 if (pc_ml->PetscMLdata) { 81 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 82 if (pc_ml->size > 1) {ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 83 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 84 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 85 } 86 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 87 88 for (level=0; level<fine_level; level++){ 89 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 90 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 91 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 92 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 93 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 94 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 95 } 96 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 97 PetscFunctionReturn(0); 98 } 99 /* -------------------------------------------------------------------------- */ 100 /* 101 PCSetUp_ML - Prepares for the use of the ML preconditioner 102 by setting data structures and options. 103 104 Input Parameter: 105 . pc - the preconditioner context 106 107 Application Interface Routine: PCSetUp() 108 109 Notes: 110 The interface routine PCSetUp() is not usually called directly by 111 the user, but instead is called by PCApply() if necessary. 112 */ 113 extern PetscErrorCode PCSetFromOptions_MG(PC); 114 #undef __FUNCT__ 115 #define __FUNCT__ "PCSetUp_ML" 116 PetscErrorCode PCSetUp_ML(PC pc) 117 { 118 PetscErrorCode ierr; 119 PetscMPIInt size; 120 FineGridCtx *PetscMLdata; 121 ML *ml_object; 122 ML_Aggregate *agg_object; 123 ML_Operator *mlmat; 124 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs; 125 Mat A,Aloc; 126 GridCtx *gridctx; 127 PC_MG *mg = (PC_MG*)pc->data; 128 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 129 MatReuse reuse = MAT_INITIAL_MATRIX; 130 PetscTruth isSeq, isMPI; 131 132 PetscFunctionBegin; 133 if (pc->setupcalled){ 134 if (pc->flag == SAME_NONZERO_PATTERN){ 135 reuse = MAT_REUSE_MATRIX; 136 PetscMLdata = pc_ml->PetscMLdata; 137 gridctx = pc_ml->gridctx; 138 /* ML objects cannot be reused */ 139 ML_Destroy(&pc_ml->ml_object); 140 ML_Aggregate_Destroy(&pc_ml->agg_object); 141 } else { 142 ML_Destroy(&pc_ml->ml_object); 143 ML_Aggregate_Destroy(&pc_ml->agg_object); 144 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 145 } 146 } 147 148 /* setup special features of PCML */ 149 /*--------------------------------*/ 150 /* covert A to Aloc to be used by ML at fine grid */ 151 A = pc->pmat; 152 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 153 pc_ml->size = size; 154 ierr = PetscTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr); 155 ierr = PetscTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr); 156 if (isMPI){ 157 if (reuse) Aloc = PetscMLdata->Aloc; 158 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);CHKERRQ(ierr); 159 } else if (isSeq) { 160 Aloc = A; 161 } else { 162 SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid matrix type for ML. ML can only handle AIJ matrices."); 163 } 164 165 /* create and initialize struct 'PetscMLdata' */ 166 if (!reuse){ 167 ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 168 pc_ml->PetscMLdata = PetscMLdata; 169 ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 170 171 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 172 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr); 173 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 174 175 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 176 ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 177 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 178 } 179 PetscMLdata->A = A; 180 PetscMLdata->Aloc = Aloc; 181 182 /* create ML discretization matrix at fine grid */ 183 /* ML requires input of fine-grid matrix. It determines nlevels. */ 184 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 185 ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr); 186 ML_Create(&ml_object,pc_ml->MaxNlevels); 187 pc_ml->ml_object = ml_object; 188 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 189 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 190 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 191 192 /* aggregation */ 193 ML_Aggregate_Create(&agg_object); 194 pc_ml->agg_object = agg_object; 195 196 ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr); 197 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 198 /* set options */ 199 switch (pc_ml->CoarsenScheme) { 200 case 1: 201 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 202 case 2: 203 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 204 case 3: 205 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 206 } 207 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 208 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 209 if (pc_ml->SpectralNormScheme_Anorm){ 210 ML_Set_SpectralNormScheme_Anorm(ml_object); 211 } 212 213 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 214 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 215 if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels); 216 pc_ml->Nlevels = Nlevels; 217 fine_level = Nlevels - 1; 218 if (!pc->setupcalled){ 219 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 220 /* set default smoothers */ 221 KSP smoother; 222 PC subpc; 223 for (level=1; level<=fine_level; level++){ 224 if (size == 1){ 225 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 226 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 227 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 228 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 229 } else { 230 ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr); 231 ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr); 232 ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr); 233 ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr); 234 } 235 } 236 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 237 } 238 239 if (!reuse){ 240 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 241 pc_ml->gridctx = gridctx; 242 } 243 244 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 245 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 246 gridctx[fine_level].A = A; 247 248 level = fine_level - 1; 249 if (size == 1){ /* convert ML P, R and A into seqaij format */ 250 for (mllevel=1; mllevel<Nlevels; mllevel++){ 251 mlmat = &(ml_object->Pmat[mllevel]); 252 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 253 mlmat = &(ml_object->Rmat[mllevel-1]); 254 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 255 256 mlmat = &(ml_object->Amat[mllevel]); 257 if (reuse){ 258 /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */ 259 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 260 } 261 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 262 level--; 263 } 264 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 265 for (mllevel=1; mllevel<Nlevels; mllevel++){ 266 mlmat = &(ml_object->Pmat[mllevel]); 267 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 268 mlmat = &(ml_object->Rmat[mllevel-1]); 269 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 270 271 mlmat = &(ml_object->Amat[mllevel]); 272 if (reuse){ 273 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 274 } 275 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 276 level--; 277 } 278 } 279 280 /* create vectors and ksp at all levels */ 281 if (!reuse){ 282 for (level=0; level<fine_level; level++){ 283 level1 = level + 1; 284 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 285 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr); 286 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 287 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 288 289 ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 290 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 291 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 292 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 293 294 ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 295 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr); 296 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 297 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 298 299 if (level == 0){ 300 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 301 } else { 302 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 303 } 304 } 305 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 306 } 307 308 /* create coarse level and the interpolation between the levels */ 309 for (level=0; level<fine_level; level++){ 310 level1 = level + 1; 311 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 312 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 313 if (level > 0){ 314 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 315 } 316 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 317 } 318 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 319 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 320 321 ierr = PCSetUp_MG(pc);CHKERRQ(ierr); 322 PetscFunctionReturn(0); 323 } 324 325 /* -------------------------------------------------------------------------- */ 326 /* 327 PCDestroy_ML - Destroys the private context for the ML preconditioner 328 that was created with PCCreate_ML(). 329 330 Input Parameter: 331 . pc - the preconditioner context 332 333 Application Interface Routine: PCDestroy() 334 */ 335 #undef __FUNCT__ 336 #define __FUNCT__ "PCDestroy_ML" 337 PetscErrorCode PCDestroy_ML(PC pc) 338 { 339 PetscErrorCode ierr; 340 PC_MG *mg = (PC_MG*)pc->data; 341 PC_ML *pc_ml= (PC_ML*)mg->innerctx; 342 343 PetscFunctionBegin; 344 ierr = PCDestroy_ML_Private(pc_ml);CHKERRQ(ierr); 345 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 346 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 347 PetscFunctionReturn(0); 348 } 349 350 #undef __FUNCT__ 351 #define __FUNCT__ "PCSetFromOptions_ML" 352 PetscErrorCode PCSetFromOptions_ML(PC pc) 353 { 354 PetscErrorCode ierr; 355 PetscInt indx,PrintLevel; 356 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 357 PC_MG *mg = (PC_MG*)pc->data; 358 PC_ML *pc_ml = (PC_ML*)mg->innerctx; 359 360 PetscFunctionBegin; 361 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 362 PrintLevel = 0; 363 indx = 0; 364 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 365 ML_Set_PrintLevel(PrintLevel); 366 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 367 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 368 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 369 pc_ml->CoarsenScheme = indx; 370 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 371 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 372 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL); 373 ierr = PetscOptionsTail();CHKERRQ(ierr); 374 PetscFunctionReturn(0); 375 } 376 377 /* -------------------------------------------------------------------------- */ 378 /* 379 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 380 and sets this as the private data within the generic preconditioning 381 context, PC, that was created within PCCreate(). 382 383 Input Parameter: 384 . pc - the preconditioner context 385 386 Application Interface Routine: PCCreate() 387 */ 388 389 /*MC 390 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 391 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 392 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 393 and the restriction/interpolation operators wrapped as PETSc shell matrices. 394 395 Options Database Key: 396 Multigrid options(inherited) 397 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 398 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 399 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 400 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 401 402 ML options: 403 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 404 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 405 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 406 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 407 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 408 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 409 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Set_SpectralNormScheme_Anorm) 410 411 Level: intermediate 412 413 Concepts: multigrid 414 415 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 416 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 417 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 418 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 419 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 420 M*/ 421 422 EXTERN_C_BEGIN 423 #undef __FUNCT__ 424 #define __FUNCT__ "PCCreate_ML" 425 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 426 { 427 PetscErrorCode ierr; 428 PC_ML *pc_ml; 429 PC_MG *mg; 430 431 PetscFunctionBegin; 432 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 433 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCML);CHKERRQ(ierr); 434 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 435 436 /* create a supporting struct and attach it to pc */ 437 ierr = PetscNewLog(pc,PC_ML,&pc_ml);CHKERRQ(ierr); 438 mg = (PC_MG*)pc->data; 439 mg->innerctx = pc_ml; 440 441 pc_ml->ml_object = 0; 442 pc_ml->agg_object = 0; 443 pc_ml->gridctx = 0; 444 pc_ml->PetscMLdata = 0; 445 pc_ml->Nlevels = -1; 446 pc_ml->MaxNlevels = 10; 447 pc_ml->MaxCoarseSize = 1; 448 pc_ml->CoarsenScheme = 1; 449 pc_ml->Threshold = 0.0; 450 pc_ml->DampingFactor = 4.0/3.0; 451 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 452 pc_ml->size = 0; 453 454 /* overwrite the pointers of PCMG by the functions of PCML */ 455 pc->ops->setfromoptions = PCSetFromOptions_ML; 456 pc->ops->setup = PCSetUp_ML; 457 pc->ops->destroy = PCDestroy_ML; 458 PetscFunctionReturn(0); 459 } 460 EXTERN_C_END 461 462 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[],int allocated_space, int columns[], double values[], int row_lengths[]) 463 { 464 PetscErrorCode ierr; 465 Mat Aloc; 466 Mat_SeqAIJ *a; 467 PetscInt m,i,j,k=0,row,*aj; 468 PetscScalar *aa; 469 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 470 471 Aloc = ml->Aloc; 472 a = (Mat_SeqAIJ*)Aloc->data; 473 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 474 475 for (i = 0; i<N_requested_rows; i++) { 476 row = requested_rows[i]; 477 row_lengths[i] = a->ilen[row]; 478 if (allocated_space < k+row_lengths[i]) return(0); 479 if ( (row >= 0) || (row <= (m-1)) ) { 480 aj = a->j + a->i[row]; 481 aa = a->a + a->i[row]; 482 for (j=0; j<row_lengths[i]; j++){ 483 columns[k] = aj[j]; 484 values[k++] = aa[j]; 485 } 486 } 487 } 488 return(1); 489 } 490 491 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 492 { 493 PetscErrorCode ierr; 494 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 495 Mat A=ml->A, Aloc=ml->Aloc; 496 PetscMPIInt size; 497 PetscScalar *pwork=ml->pwork; 498 PetscInt i; 499 500 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 501 if (size == 1){ 502 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 503 } else { 504 for (i=0; i<in_length; i++) pwork[i] = p[i]; 505 PetscML_comm(pwork,ml); 506 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 507 } 508 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 509 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 510 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 511 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 512 return 0; 513 } 514 515 int PetscML_comm(double p[],void *ML_data) 516 { 517 PetscErrorCode ierr; 518 FineGridCtx *ml=(FineGridCtx*)ML_data; 519 Mat A=ml->A; 520 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 521 PetscMPIInt size; 522 PetscInt i,in_length=A->rmap->n,out_length=ml->Aloc->cmap->n; 523 PetscScalar *array; 524 525 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 526 if (size == 1) return 0; 527 528 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 529 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 530 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 531 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 532 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 533 for (i=in_length; i<out_length; i++){ 534 p[i] = array[i-in_length]; 535 } 536 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 537 return 0; 538 } 539 #undef __FUNCT__ 540 #define __FUNCT__ "MatMult_ML" 541 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 542 { 543 PetscErrorCode ierr; 544 Mat_MLShell *shell; 545 PetscScalar *xarray,*yarray; 546 PetscInt x_length,y_length; 547 548 PetscFunctionBegin; 549 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 550 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 551 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 552 x_length = shell->mlmat->invec_leng; 553 y_length = shell->mlmat->outvec_leng; 554 555 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 556 557 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 558 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 559 PetscFunctionReturn(0); 560 } 561 #undef __FUNCT__ 562 #define __FUNCT__ "MatMultAdd_ML" 563 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 564 { 565 PetscErrorCode ierr; 566 Mat_MLShell *shell; 567 PetscScalar *xarray,*yarray; 568 PetscInt x_length,y_length; 569 570 PetscFunctionBegin; 571 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 572 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 573 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 574 575 x_length = shell->mlmat->invec_leng; 576 y_length = shell->mlmat->outvec_leng; 577 578 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 579 580 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 581 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 582 ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr); 583 584 PetscFunctionReturn(0); 585 } 586 587 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 588 #undef __FUNCT__ 589 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 590 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 591 { 592 PetscErrorCode ierr; 593 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 594 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 595 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 596 PetscScalar *aa=a->a,*ba=b->a,*ca; 597 PetscInt am=A->rmap->n,an=A->cmap->n,i,j,k; 598 PetscInt *ci,*cj,ncols; 599 600 PetscFunctionBegin; 601 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 602 603 if (scall == MAT_INITIAL_MATRIX){ 604 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 605 ci[0] = 0; 606 for (i=0; i<am; i++){ 607 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 608 } 609 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 610 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 611 612 k = 0; 613 for (i=0; i<am; i++){ 614 /* diagonal portion of A */ 615 ncols = ai[i+1] - ai[i]; 616 for (j=0; j<ncols; j++) { 617 cj[k] = *aj++; 618 ca[k++] = *aa++; 619 } 620 /* off-diagonal portion of A */ 621 ncols = bi[i+1] - bi[i]; 622 for (j=0; j<ncols; j++) { 623 cj[k] = an + (*bj); bj++; 624 ca[k++] = *ba++; 625 } 626 } 627 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 628 629 /* put together the new matrix */ 630 an = mpimat->A->cmap->n+mpimat->B->cmap->n; 631 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 632 633 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 634 /* Since these are PETSc arrays, change flags to free them as necessary. */ 635 mat = (Mat_SeqAIJ*)(*Aloc)->data; 636 mat->free_a = PETSC_TRUE; 637 mat->free_ij = PETSC_TRUE; 638 639 mat->nonew = 0; 640 } else if (scall == MAT_REUSE_MATRIX){ 641 mat=(Mat_SeqAIJ*)(*Aloc)->data; 642 ci = mat->i; cj = mat->j; ca = mat->a; 643 for (i=0; i<am; i++) { 644 /* diagonal portion of A */ 645 ncols = ai[i+1] - ai[i]; 646 for (j=0; j<ncols; j++) *ca++ = *aa++; 647 /* off-diagonal portion of A */ 648 ncols = bi[i+1] - bi[i]; 649 for (j=0; j<ncols; j++) *ca++ = *ba++; 650 } 651 } else { 652 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 653 } 654 PetscFunctionReturn(0); 655 } 656 extern PetscErrorCode MatDestroy_Shell(Mat); 657 #undef __FUNCT__ 658 #define __FUNCT__ "MatDestroy_ML" 659 PetscErrorCode MatDestroy_ML(Mat A) 660 { 661 PetscErrorCode ierr; 662 Mat_MLShell *shell; 663 664 PetscFunctionBegin; 665 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 666 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 667 ierr = PetscFree(shell);CHKERRQ(ierr); 668 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 669 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 670 PetscFunctionReturn(0); 671 } 672 673 #undef __FUNCT__ 674 #define __FUNCT__ "MatWrapML_SeqAIJ" 675 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 676 { 677 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 678 PetscErrorCode ierr; 679 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 680 PetscInt *ml_cols=matdata->columns,*ml_rowptr=matdata->rowptr,*aj,i,j,k; 681 PetscScalar *ml_vals=matdata->values,*aa; 682 683 PetscFunctionBegin; 684 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 685 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 686 if (reuse){ 687 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 688 aij->i = ml_rowptr; 689 aij->j = ml_cols; 690 aij->a = ml_vals; 691 } else { 692 /* sort ml_cols and ml_vals */ 693 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 694 for (i=0; i<m; i++) { 695 nnz[i] = ml_rowptr[i+1] - ml_rowptr[i]; 696 } 697 aj = ml_cols; aa = ml_vals; 698 for (i=0; i<m; i++){ 699 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 700 aj += nnz[i]; aa += nnz[i]; 701 } 702 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,ml_rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 703 ierr = PetscFree(nnz);CHKERRQ(ierr); 704 } 705 PetscFunctionReturn(0); 706 } 707 708 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 709 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 710 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 711 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 712 713 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 714 nz_max = 1; 715 for (i=0; i<m; i++) { 716 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 717 if (nnz[i] > nz_max) nz_max += nnz[i]; 718 } 719 720 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 721 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 722 for (i=0; i<m; i++){ 723 k = 0; 724 /* diagonal entry */ 725 aj[k] = i; aa[k++] = ml_vals[i]; 726 /* off diagonal entries */ 727 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 728 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 729 } 730 /* sort aj and aa */ 731 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 732 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 733 } 734 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 735 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 736 737 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 738 ierr = PetscFree(nnz);CHKERRQ(ierr); 739 PetscFunctionReturn(0); 740 } 741 742 #undef __FUNCT__ 743 #define __FUNCT__ "MatWrapML_SHELL" 744 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 745 { 746 PetscErrorCode ierr; 747 PetscInt m,n; 748 ML_Comm *MLcomm; 749 Mat_MLShell *shellctx; 750 751 PetscFunctionBegin; 752 m = mlmat->outvec_leng; 753 n = mlmat->invec_leng; 754 if (!m || !n){ 755 newmat = PETSC_NULL; 756 PetscFunctionReturn(0); 757 } 758 759 if (reuse){ 760 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 761 shellctx->mlmat = mlmat; 762 PetscFunctionReturn(0); 763 } 764 765 MLcomm = mlmat->comm; 766 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 767 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 768 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 769 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 770 shellctx->A = *newmat; 771 shellctx->mlmat = mlmat; 772 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 773 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 774 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 775 (*newmat)->ops->destroy = MatDestroy_ML; 776 PetscFunctionReturn(0); 777 } 778 779 #undef __FUNCT__ 780 #define __FUNCT__ "MatWrapML_MPIAIJ" 781 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 782 { 783 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 784 PetscInt *ml_cols=matdata->columns,*aj; 785 PetscScalar *ml_vals=matdata->values,*aa; 786 PetscErrorCode ierr; 787 PetscInt i,j,k,*gordering; 788 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 789 Mat A; 790 791 PetscFunctionBegin; 792 if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 793 n = mlmat->invec_leng; 794 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 795 796 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 797 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 798 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 799 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 800 801 nz_max = 0; 802 for (i=0; i<m; i++){ 803 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 804 if (nz_max < nnz[i]) nz_max = nnz[i]; 805 nnzA[i] = 1; /* diag */ 806 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 807 if (ml_cols[j] < m) nnzA[i]++; 808 } 809 nnzB[i] = nnz[i] - nnzA[i]; 810 } 811 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 812 813 /* insert mat values -- remap row and column indices */ 814 nz_max++; 815 ierr = PetscMalloc2(nz_max,PetscScalar,&aa,nz_max,PetscInt,&aj);CHKERRQ(ierr); 816 /* create global row numbering for a ML_Operator */ 817 ML_build_global_numbering(mlmat,&gordering,"rows"); 818 for (i=0; i<m; i++){ 819 row = gordering[i]; 820 k = 0; 821 /* diagonal entry */ 822 aj[k] = row; aa[k++] = ml_vals[i]; 823 /* off diagonal entries */ 824 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 825 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 826 } 827 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 828 } 829 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 830 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 831 *newmat = A; 832 833 ierr = PetscFree3(nnzA,nnzB,nnz); 834 ierr = PetscFree2(aa,aj);CHKERRQ(ierr); 835 PetscFunctionReturn(0); 836 } 837