1 /* 2 Provides an interface to the ML 3.0 smoothed Aggregation 3 */ 4 #include "src/ksp/pc/pcimpl.h" /*I "petscpc.h" I*/ 5 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 6 #include "src/mat/impls/aij/seq/aij.h" 7 #include "src/mat/impls/aij/mpi/mpiaij.h" 8 EXTERN_C_BEGIN 9 #include <math.h> 10 #include "ml_include.h" 11 EXTERN_C_END 12 13 /* The context (data structure) at each grid level */ 14 typedef struct { 15 Vec x,b,r; /* global vectors */ 16 Mat A,P,R; 17 KSP ksp; 18 } GridCtx; 19 20 /* The context used to input PETSc matrix into ML at fine grid */ 21 typedef struct { 22 Mat A,Aloc; 23 Vec x,y; 24 ML_Operator *mlmat; 25 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 26 PetscInt rlen_max,*cols; /* used by MatConvert_ML_SeqAIJ() */ 27 PetscScalar *vals; /* used by MatConvert_ML_SeqAIJ() */ 28 } FineGridCtx; 29 30 /* The context associates a ML matrix with a PETSc shell matrix */ 31 typedef struct { 32 Mat A; /* PETSc shell matrix associated with mlmat */ 33 ML_Operator *mlmat; /* ML matrix assorciated with A */ 34 Vec y; 35 } Mat_MLShell; 36 37 /* Private context for the ML preconditioner */ 38 typedef struct { 39 ML *ml_object; 40 ML_Aggregate *agg_object; 41 GridCtx *gridctx; 42 FineGridCtx *PetscMLdata; 43 PetscInt fine_level,MaxNlevels,MaxCoarseSize,CoarsenScheme; 44 PetscReal Threshold,DampingFactor; 45 PetscTruth SpectralNormScheme_Anorm; 46 PetscMPIInt size; 47 48 PetscErrorCode (*PCSetUp)(PC); 49 PetscErrorCode (*PCDestroy)(PC); 50 51 } PC_ML; 52 extern int PetscML_getrow(void *ML_data,int N_requested_rows,int requested_rows[], 53 int allocated_space,int columns[],double values[],int row_lengths[]); 54 extern int PetscML_matvec(void *ML_data, int in_length, double p[], int out_length,double ap[]); 55 extern int PetscML_comm(double x[], void *ML_data); 56 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 57 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 58 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,const MatType,Mat*); 59 extern PetscErrorCode MatDestroy_ML(Mat); 60 extern PetscErrorCode MatConvert_ML_SeqAIJ(FineGridCtx*,Mat*); 61 extern PetscErrorCode MatConvert_ML_SHELL(ML_Operator*,Mat*); 62 63 /* -------------------------------------------------------------------------- */ 64 /* 65 PCSetUp_ML - Prepares for the use of the ML preconditioner 66 by setting data structures and options. 67 68 Input Parameter: 69 . pc - the preconditioner context 70 71 Application Interface Routine: PCSetUp() 72 73 Notes: 74 The interface routine PCSetUp() is not usually called directly by 75 the user, but instead is called by PCApply() if necessary. 76 */ 77 78 #undef __FUNCT__ 79 #define __FUNCT__ "PCSetUp_ML" 80 static PetscErrorCode PCSetUp_ML(PC pc) 81 { 82 PetscErrorCode ierr; 83 PetscMPIInt size,rank; 84 FineGridCtx *PetscMLdata; 85 ML *ml_object; 86 ML_Aggregate *agg_object; 87 ML_Operator *mlmat; 88 PetscInt nlocal_allcols,Nlevels,mllevel,level,m,fine_level; 89 Mat A,Aloc; 90 GridCtx *gridctx; 91 PC pc_coarse; 92 PC_ML *pc_ml=PETSC_NULL; 93 PetscObjectContainer container; 94 95 PetscFunctionBegin; 96 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 97 if (container) { 98 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 99 } else { 100 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 101 } 102 103 /* setup special features of PCML */ 104 /*--------------------------------*/ 105 /* covert A to Aloc to be used by ML at fine grid */ 106 A = pc->pmat; 107 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 108 ierr = MPI_Comm_rank(A->comm,&rank);CHKERRQ(ierr); 109 pc_ml->size = size; 110 if (size > 1){ 111 Aloc = PETSC_NULL; 112 ierr = MatConvert_MPIAIJ_ML(A,0,&Aloc);CHKERRQ(ierr); 113 } else { 114 Aloc = A; 115 } 116 117 /* create and initialize struct 'PetscMLdata' */ 118 ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 119 ierr = PetscMemzero(PetscMLdata,sizeof(FineGridCtx));CHKERRQ(ierr); 120 PetscMLdata->A = A; 121 PetscMLdata->Aloc = Aloc; 122 ierr = PetscMalloc((Aloc->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 123 pc_ml->PetscMLdata = PetscMLdata; 124 125 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 126 if (size == 1){ 127 ierr = VecSetSizes(PetscMLdata->x,A->n,PETSC_DECIDE);CHKERRQ(ierr); 128 } else { 129 ierr = VecSetSizes(PetscMLdata->x,Aloc->n,PETSC_DECIDE);CHKERRQ(ierr); 130 } 131 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 132 133 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 134 ierr = VecSetSizes(PetscMLdata->y,A->m,PETSC_DECIDE);CHKERRQ(ierr); 135 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 136 137 /* create ML discretization matrix at fine grid */ 138 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 139 ML_Create(&ml_object,pc_ml->MaxNlevels); 140 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 141 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 142 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 143 144 /* aggregation */ 145 ML_Aggregate_Create(&agg_object); 146 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 147 /* set options */ 148 switch (pc_ml->CoarsenScheme) { 149 case 1: 150 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 151 case 2: 152 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 153 case 3: 154 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 155 } 156 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 157 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 158 if (pc_ml->SpectralNormScheme_Anorm){ 159 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 160 } 161 162 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 163 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 164 ierr = MGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 165 pc_ml->ml_object = ml_object; 166 pc_ml->agg_object = agg_object; 167 168 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 169 fine_level = Nlevels - 1; 170 pc_ml->gridctx = gridctx; 171 pc_ml->fine_level = fine_level; 172 173 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 174 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 175 gridctx[fine_level].A = A; 176 level = fine_level - 1; 177 if (size == 1){ /* convert ML mat format into petsc seqaij format */ 178 PetscMLdata->rlen_max = A->N; 179 ierr = PetscMalloc(PetscMLdata->rlen_max*(sizeof(PetscScalar)+sizeof(PetscInt)),&PetscMLdata->cols);CHKERRQ(ierr); 180 PetscMLdata->vals = (PetscScalar*)(PetscMLdata->cols + PetscMLdata->rlen_max); 181 for (mllevel=1; mllevel<Nlevels; mllevel++){ 182 PetscMLdata->mlmat = &(ml_object->Pmat[mllevel]); 183 ierr = MatConvert_ML_SeqAIJ(PetscMLdata,&gridctx[level].P);CHKERRQ(ierr); 184 PetscMLdata->mlmat = &(ml_object->Amat[mllevel]); 185 ierr = MatConvert_ML_SeqAIJ(PetscMLdata,&gridctx[level].A);CHKERRQ(ierr); 186 PetscMLdata->mlmat = &(ml_object->Rmat[mllevel-1]); 187 ierr = MatConvert_ML_SeqAIJ(PetscMLdata,&gridctx[level].R);CHKERRQ(ierr); 188 level--; 189 } 190 } else { /* convert ML mat format into petsc shell format */ 191 for (mllevel=1; mllevel<Nlevels; mllevel++){ 192 mlmat = &(ml_object->Pmat[mllevel]); 193 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].P);CHKERRQ(ierr); 194 mlmat = &(ml_object->Amat[mllevel]); 195 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].A);CHKERRQ(ierr); 196 mlmat = &(ml_object->Rmat[mllevel-1]); 197 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].R);CHKERRQ(ierr); 198 level--; 199 } 200 } 201 202 /* create coarse level and the interpolation between the levels */ 203 level = fine_level; 204 while ( level >= 0 ){ 205 if (level != fine_level){ 206 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr); 207 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->n,PETSC_DECIDE);CHKERRQ(ierr); 208 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 209 ierr = MGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 210 211 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr); 212 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr); 213 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 214 ierr = MGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 215 } 216 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].r);CHKERRQ(ierr); 217 ierr = VecSetSizes(gridctx[level].r,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr); 218 ierr = VecSetType(gridctx[level].r,VECMPI);CHKERRQ(ierr); 219 ierr = MGSetR(pc,level,gridctx[level].r);CHKERRQ(ierr); 220 221 if (level == 0){ 222 ierr = MGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 223 } else { 224 ierr = MGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 225 ierr = MGSetResidual(pc,level,MGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 226 if (level == fine_level){ 227 ierr = KSPSetOptionsPrefix(gridctx[level].ksp,"mg_fine_");CHKERRQ(ierr); 228 ierr = MGSetR(pc,level,gridctx[level].r);CHKERRQ(ierr); 229 } 230 } 231 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 232 233 if (level < fine_level){ 234 if (size > 1){ 235 ierr = KSPGetPC(gridctx[level].ksp,&pc_coarse);CHKERRQ(ierr); 236 ierr = PCSetType(pc_coarse,PCNONE);CHKERRQ(ierr); 237 } 238 ierr = MGSetInterpolate(pc,level+1,gridctx[level].P);CHKERRQ(ierr); 239 ierr = MGSetRestriction(pc,level+1,gridctx[level].R);CHKERRQ(ierr); 240 } 241 level--; 242 } 243 244 /* now call PCSetUp_MG() */ 245 /*--------------------------------*/ 246 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 247 PetscFunctionReturn(0); 248 } 249 250 #undef __FUNCT__ 251 #define __FUNCT__ "PetscObjectContainerDestroy_PC_ML" 252 PetscErrorCode PetscObjectContainerDestroy_PC_ML(void *ptr) 253 { 254 PetscErrorCode ierr; 255 PC_ML *pc_ml = (PC_ML*)ptr; 256 PetscInt level; 257 258 PetscFunctionBegin; 259 if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 260 ML_Aggregate_Destroy(&pc_ml->agg_object); 261 ML_Destroy(&pc_ml->ml_object); 262 263 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 264 ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr); 265 ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr); 266 if (pc_ml->size == 1){ierr = PetscFree(pc_ml->PetscMLdata->cols);CHKERRQ(ierr);} 267 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 268 269 level = pc_ml->fine_level; 270 while ( level>= 0 ){ 271 if (level != pc_ml->fine_level){ 272 ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr); 273 ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr); 274 ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr); 275 ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr); 276 ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr); 277 } 278 ierr = VecDestroy(pc_ml->gridctx[level].r);CHKERRQ(ierr); 279 level--; 280 } 281 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 282 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 283 PetscFunctionReturn(0); 284 } 285 /* -------------------------------------------------------------------------- */ 286 /* 287 PCDestroy_ML - Destroys the private context for the ML preconditioner 288 that was created with PCCreate_ML(). 289 290 Input Parameter: 291 . pc - the preconditioner context 292 293 Application Interface Routine: PCDestroy() 294 */ 295 #undef __FUNCT__ 296 #define __FUNCT__ "PCDestroy_ML" 297 static PetscErrorCode PCDestroy_ML(PC pc) 298 { 299 PetscErrorCode ierr; 300 PC_ML *pc_ml=PETSC_NULL; 301 PetscObjectContainer container; 302 303 PetscFunctionBegin; 304 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 305 if (container) { 306 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 307 pc->ops->destroy = pc_ml->PCDestroy; 308 } else { 309 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 310 } 311 /* detach pc and PC_ML and dereference container */ 312 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 313 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 314 315 ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr); 316 PetscFunctionReturn(0); 317 } 318 319 #undef __FUNCT__ 320 #define __FUNCT__ "PCSetFromOptions_ML" 321 static PetscErrorCode PCSetFromOptions_ML(PC pc) 322 { 323 PetscErrorCode ierr; 324 PetscInt indx,m,PrintLevel,MaxNlevels,MaxCoarseSize; 325 PetscReal Threshold,DampingFactor; 326 PetscTruth flg; 327 const char *type[] = {"additive","multiplicative","full","cascade","kascade"}; 328 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 329 PC_ML *pc_ml=PETSC_NULL; 330 PetscObjectContainer container; 331 332 PetscFunctionBegin; 333 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 334 if (container) { 335 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 336 } else { 337 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 338 } 339 ierr = PetscOptionsHead("MG options");CHKERRQ(ierr); 340 /* inherited MG options */ 341 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 342 if (flg) { 343 ierr = MGSetCycles(pc,m);CHKERRQ(ierr); 344 } 345 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 346 if (flg) { 347 ierr = MGSetNumberSmoothUp(pc,m);CHKERRQ(ierr); 348 } 349 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 350 if (flg) { 351 ierr = MGSetNumberSmoothDown(pc,m);CHKERRQ(ierr); 352 } 353 ierr = PetscOptionsEList("-pc_mg_type","Multigrid type","MGSetType",type,5,type[1],&indx,&flg);CHKERRQ(ierr); 354 if (flg) { 355 MGType mg = (MGType) 0; 356 switch (indx) { 357 case 0: 358 mg = MGADDITIVE; 359 break; 360 case 1: 361 mg = MGMULTIPLICATIVE; 362 break; 363 case 2: 364 mg = MGFULL; 365 break; 366 case 3: 367 mg = MGKASKADE; 368 break; 369 case 4: 370 mg = MGKASKADE; 371 break; 372 } 373 ierr = MGSetType(pc,mg);CHKERRQ(ierr); 374 } 375 ierr = PetscOptionsTail();CHKERRQ(ierr); 376 377 /* ML options */ 378 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 379 /* set defaults */ 380 PrintLevel = 0; 381 MaxNlevels = 10; 382 MaxCoarseSize = 1; 383 indx = 0; 384 Threshold = 0.0; 385 DampingFactor = 4.0/3.0; 386 387 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 388 ML_Set_PrintLevel(PrintLevel); 389 390 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",MaxNlevels,&MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 391 pc_ml->MaxNlevels = MaxNlevels; 392 393 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",MaxCoarseSize,&MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 394 pc_ml->MaxCoarseSize = MaxCoarseSize; 395 396 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 397 pc_ml->CoarsenScheme = indx; 398 399 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",DampingFactor,&DampingFactor,PETSC_NULL);CHKERRQ(ierr); 400 pc_ml->DampingFactor = DampingFactor; 401 402 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",Threshold,&Threshold,PETSC_NULL);CHKERRQ(ierr); 403 pc_ml->Threshold = Threshold; 404 405 ierr = PetscOptionsLogical("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",PETSC_FALSE,&pc_ml->SpectralNormScheme_Anorm,PETSC_FALSE); 406 407 ierr = PetscOptionsTail();CHKERRQ(ierr); 408 PetscFunctionReturn(0); 409 } 410 411 /* -------------------------------------------------------------------------- */ 412 /* 413 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 414 and sets this as the private data within the generic preconditioning 415 context, PC, that was created within PCCreate(). 416 417 Input Parameter: 418 . pc - the preconditioner context 419 420 Application Interface Routine: PCCreate() 421 */ 422 423 /*MC 424 PCML - Use geometric multigrid preconditioning. This preconditioner requires you provide 425 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 426 operators are computed by ML and wrapped as PETSc shell matrices. 427 428 Options Database Key: (not done yet!) 429 + -pc_mg_maxlevels <nlevels> - maximum number of levels including finest 430 . -pc_mg_cycles 1 or 2 - for V or W-cycle 431 . -pc_mg_smoothup <n> - number of smoothing steps after interpolation 432 . -pc_mg_smoothdown <n> - number of smoothing steps before applying restriction operator 433 . -pc_mg_type <additive,multiplicative,full,cascade> - multiplicative is the default 434 . -pc_mg_monitor - print information on the multigrid convergence 435 - -pc_mg_dump_matlab - dumps the matrices for each level and the restriction/interpolation matrices 436 to the Socket viewer for reading from Matlab. 437 438 Level: intermediate 439 440 Concepts: multigrid 441 442 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 443 MGSetLevels(), MGGetLevels(), MGSetType(), MPSetCycles(), MGSetNumberSmoothDown(), 444 MGSetNumberSmoothUp(), MGGetCoarseSolve(), MGSetResidual(), MGSetInterpolation(), 445 MGSetRestriction(), MGGetSmoother(), MGGetSmootherUp(), MGGetSmootherDown(), 446 MGSetCyclesOnLevel(), MGSetRhs(), MGSetX(), MGSetR() 447 M*/ 448 449 EXTERN_C_BEGIN 450 #undef __FUNCT__ 451 #define __FUNCT__ "PCCreate_ML" 452 PetscErrorCode PCCreate_ML(PC pc) 453 { 454 PetscErrorCode ierr; 455 PC_ML *pc_ml; 456 PetscObjectContainer container; 457 458 PetscFunctionBegin; 459 /* initialize pc as PCMG */ 460 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 461 462 /* create a supporting struct and attach it to pc */ 463 ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr); 464 ierr = PetscMemzero(pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 465 ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 466 ierr = PetscObjectContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 467 ierr = PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_PC_ML);CHKERRQ(ierr); 468 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 469 470 pc_ml->PCSetUp = pc->ops->setup; 471 pc_ml->PCDestroy = pc->ops->destroy; 472 473 /* overwrite the pointers of PCMG by the functions of PCML */ 474 pc->ops->setfromoptions = PCSetFromOptions_ML; 475 pc->ops->setup = PCSetUp_ML; 476 pc->ops->destroy = PCDestroy_ML; 477 PetscFunctionReturn(0); 478 } 479 EXTERN_C_END 480 481 int PetscML_getrow(void *ML_data, int N_requested_rows, int requested_rows[], 482 int allocated_space, int columns[], double values[], int row_lengths[]) 483 { 484 PetscErrorCode ierr; 485 Mat Aloc; 486 Mat_SeqAIJ *a; 487 PetscInt m,i,j,k=0,row,*aj; 488 PetscScalar *aa; 489 FineGridCtx *ml=(FineGridCtx*)ML_data; 490 491 Aloc = ml->Aloc; 492 a = (Mat_SeqAIJ*)Aloc->data; 493 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 494 495 for (i = 0; i<N_requested_rows; i++) { 496 row = requested_rows[i]; 497 row_lengths[i] = a->ilen[row]; 498 if (allocated_space < k+row_lengths[i]) return(0); 499 if ( (row >= 0) || (row <= (m-1)) ) { 500 aj = a->j + a->i[row]; 501 aa = a->a + a->i[row]; 502 for (j=0; j<row_lengths[i]; j++){ 503 columns[k] = aj[j]; 504 values[k++] = aa[j]; 505 } 506 } 507 } 508 return(1); 509 } 510 511 int PetscML_matvec(void *ML_data,int in_length,double p[],int out_length,double ap[]) 512 { 513 PetscErrorCode ierr; 514 FineGridCtx *ml=(FineGridCtx*)ML_data; 515 Mat A=ml->A, Aloc=ml->Aloc; 516 PetscMPIInt size; 517 PetscScalar *pwork=ml->pwork; 518 PetscInt i; 519 520 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 521 if (size == 1){ 522 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 523 } else { 524 for (i=0; i<in_length; i++) pwork[i] = p[i]; 525 PetscML_comm(pwork,ml); 526 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 527 } 528 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 529 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 530 return 0; 531 } 532 533 int PetscML_comm(double p[],void *ML_data) 534 { 535 PetscErrorCode ierr; 536 FineGridCtx *ml=(FineGridCtx*)ML_data; 537 Mat A=ml->A; 538 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 539 PetscMPIInt size; 540 PetscInt i,in_length=A->m,out_length=ml->Aloc->n; 541 PetscScalar *array; 542 543 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 544 if (size == 1) return 0; 545 546 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 547 ierr = VecScatterBegin(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 548 ierr = VecScatterEnd(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 549 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 550 for (i=in_length; i<out_length; i++){ 551 p[i] = array[i-in_length]; 552 } 553 return 0; 554 } 555 #undef __FUNCT__ 556 #define __FUNCT__ "MatMult_ML" 557 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 558 { 559 PetscErrorCode ierr; 560 Mat_MLShell *shell; 561 PetscScalar *xarray,*yarray; 562 PetscInt x_length,y_length; 563 564 PetscFunctionBegin; 565 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 566 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 567 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 568 x_length = shell->mlmat->invec_leng; 569 y_length = shell->mlmat->outvec_leng; 570 571 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 572 573 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 574 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 575 PetscFunctionReturn(0); 576 } 577 /* MatMultAdd_ML - Compute y = w + A*x */ 578 #undef __FUNCT__ 579 #define __FUNCT__ "MatMultAdd_ML" 580 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 581 { 582 PetscErrorCode ierr; 583 Mat_MLShell *shell; 584 PetscScalar *xarray,*yarray; 585 const PetscScalar one=1.0; 586 PetscInt x_length,y_length; 587 588 PetscFunctionBegin; 589 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 590 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 591 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 592 593 x_length = shell->mlmat->invec_leng; 594 y_length = shell->mlmat->outvec_leng; 595 596 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 597 598 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 599 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 600 ierr = VecAXPY(&one,w,y);CHKERRQ(ierr); 601 602 PetscFunctionReturn(0); 603 } 604 605 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 606 #undef __FUNCT__ 607 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 608 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,const MatType newtype,Mat *Aloc) 609 { 610 PetscErrorCode ierr; 611 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 612 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 613 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 614 PetscScalar *aa=a->a,*ba=b->a,*ca; 615 PetscInt am=A->m,an=A->n,i,j,k; 616 PetscInt *ci,*cj,ncols; 617 MatReuse scall=MAT_INITIAL_MATRIX; 618 619 PetscFunctionBegin; 620 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 621 622 if (*Aloc) scall = MAT_REUSE_MATRIX; 623 if (scall == MAT_INITIAL_MATRIX){ 624 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 625 ci[0] = 0; 626 for (i=0; i<am; i++){ 627 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 628 } 629 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 630 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 631 632 k = 0; 633 for (i=0; i<am; i++){ 634 /* diagonal portion of A */ 635 ncols = ai[i+1] - ai[i]; 636 for (j=0; j<ncols; j++) { 637 cj[k] = *aj++; 638 ca[k++] = *aa++; 639 } 640 /* off-diagonal portion of A */ 641 ncols = bi[i+1] - bi[i]; 642 for (j=0; j<ncols; j++) { 643 cj[k] = an + (*bj); bj++; 644 ca[k++] = *ba++; 645 } 646 } 647 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 648 649 /* put together the new matrix */ 650 an = mpimat->A->n+mpimat->B->n; 651 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 652 653 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 654 /* Since these are PETSc arrays, change flags to free them as necessary. */ 655 mat = (Mat_SeqAIJ*)(*Aloc)->data; 656 mat->freedata = PETSC_TRUE; 657 mat->nonew = 0; 658 } else if (scall == MAT_REUSE_MATRIX){ 659 mat=(Mat_SeqAIJ*)(*Aloc)->data; 660 ci = mat->i; cj = mat->j; ca = mat->a; 661 for (i=0; i<am; i++) { 662 /* diagonal portion of A */ 663 ncols = ai[i+1] - ai[i]; 664 for (j=0; j<ncols; j++) *ca++ = *aa++; 665 /* off-diagonal portion of A */ 666 ncols = bi[i+1] - bi[i]; 667 for (j=0; j<ncols; j++) *ca++ = *ba++; 668 } 669 } else { 670 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 671 } 672 PetscFunctionReturn(0); 673 } 674 extern PetscErrorCode MatDestroy_Shell(Mat); 675 #undef __FUNCT__ 676 #define __FUNCT__ "MatDestroy_ML" 677 PetscErrorCode MatDestroy_ML(Mat A) 678 { 679 PetscErrorCode ierr; 680 Mat_MLShell *shell; 681 682 PetscFunctionBegin; 683 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 684 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 685 ierr = PetscFree(shell);CHKERRQ(ierr); 686 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 687 PetscFunctionReturn(0); 688 } 689 690 #undef __FUNCT__ 691 #define __FUNCT__ "MatConvert_ML_SeqAIJ" 692 PetscErrorCode MatConvert_ML_SeqAIJ(FineGridCtx *ml,Mat *newmat) 693 { 694 PetscErrorCode ierr; 695 PetscInt i; 696 ML_Operator *mat=ml->mlmat; 697 PetscInt m=mat->outvec_leng,n= mat->invec_leng,nnz[m]; 698 struct ML_CSR_MSRdata *matdata=PETSC_NULL; 699 700 PetscFunctionBegin; 701 if ( mat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mat->getrow = NULL"); 702 if (m != n){ /* pass array pointers if ml->mlmat is Pmat or Rmat */ 703 matdata = (struct ML_CSR_MSRdata *)mat->data; 704 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,matdata->columns,matdata->values,newmat);CHKERRQ(ierr); 705 PetscFunctionReturn(0); 706 } 707 708 ierr = MatCreate(PETSC_COMM_SELF,m,n,PETSC_DECIDE,PETSC_DECIDE,newmat);CHKERRQ(ierr); 709 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 710 for (i=0; i<m; i++){ 711 ML_get_matrix_row(mat,1,&i,&ml->rlen_max,&ml->cols,&ml->vals,&nnz[i],0); 712 } 713 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 714 715 716 for (i=0; i<m; i++){ 717 ML_get_matrix_row(mat,1,&i,&ml->rlen_max,&ml->cols,&ml->vals,&nnz[i],0); 718 /* 719 if (m == n){ 720 PetscInt j; 721 printf(" row %d, nnz: %d \n",i,nnz[i]); 722 for (j=0; j<nnz[i]; j++){ 723 printf(" col %d, %d; val %g, %g\n",ml->cols[j],aj[ai[i]+j],ml->vals[j],aa[ai[i]+j]); 724 } 725 }*/ 726 ierr = MatSetValues(*newmat,1,&i,nnz[i],ml->cols,ml->vals,INSERT_VALUES);CHKERRQ(ierr); 727 } 728 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 729 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 730 PetscFunctionReturn(0); 731 } 732 733 #undef __FUNCT__ 734 #define __FUNCT__ "MatConvert_ML_SHELL" 735 PetscErrorCode MatConvert_ML_SHELL(ML_Operator *mlmat,Mat *newmat) 736 { 737 PetscErrorCode ierr; 738 PetscInt m,n; 739 ML_Comm *MLcomm; 740 Mat_MLShell *shellctx; 741 742 PetscFunctionBegin; 743 m = mlmat->outvec_leng; 744 n = mlmat->invec_leng; 745 if (!m || !n){ 746 newmat = PETSC_NULL; 747 } else { 748 MLcomm = mlmat->comm; 749 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 750 ierr = PetscMemzero(shellctx,sizeof(Mat_MLShell));CHKERRQ(ierr); 751 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 752 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void *)MatMult_ML);CHKERRQ(ierr); 753 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void *)MatMultAdd_ML);CHKERRQ(ierr); 754 shellctx->A = *newmat; 755 shellctx->mlmat = mlmat; 756 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 757 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 758 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 759 (*newmat)->ops->destroy = MatDestroy_ML; 760 } 761 PetscFunctionReturn(0); 762 } 763