1 #define PETSCKSP_DLL 2 3 /* 4 Provides an interface to the ML 4.0 smoothed Aggregation 5 */ 6 #include "private/pcimpl.h" /*I "petscpc.h" I*/ 7 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 8 #include "src/mat/impls/aij/seq/aij.h" 9 #include "src/mat/impls/aij/mpi/mpiaij.h" 10 11 #include <math.h> 12 EXTERN_C_BEGIN 13 /* HAVE_CONFIG_H flag is required by ML include files */ 14 #if !defined(HAVE_CONFIG_H) 15 #define HAVE_CONFIG_H 16 #endif 17 #include "ml_include.h" 18 EXTERN_C_END 19 20 /* The context (data structure) at each grid level */ 21 typedef struct { 22 Vec x,b,r; /* global vectors */ 23 Mat A,P,R; 24 KSP ksp; 25 } GridCtx; 26 27 /* The context used to input PETSc matrix into ML at fine grid */ 28 typedef struct { 29 Mat A; /* Petsc matrix in aij format */ 30 Mat Aloc; /* local portion of A to be used by ML */ 31 Vec x,y; 32 ML_Operator *mlmat; 33 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 34 } FineGridCtx; 35 36 /* The context associates a ML matrix with a PETSc shell matrix */ 37 typedef struct { 38 Mat A; /* PETSc shell matrix associated with mlmat */ 39 ML_Operator *mlmat; /* ML matrix assorciated with A */ 40 Vec y; 41 } Mat_MLShell; 42 43 /* Private context for the ML preconditioner */ 44 typedef struct { 45 ML *ml_object; 46 ML_Aggregate *agg_object; 47 GridCtx *gridctx; 48 FineGridCtx *PetscMLdata; 49 PetscInt Nlevels,MaxNlevels,MaxCoarseSize,CoarsenScheme; 50 PetscReal Threshold,DampingFactor; 51 PetscTruth SpectralNormScheme_Anorm; 52 PetscMPIInt size; /* size of communicator for pc->pmat */ 53 PetscErrorCode (*PCSetUp)(PC); 54 PetscErrorCode (*PCDestroy)(PC); 55 } PC_ML; 56 57 extern int PetscML_getrow(ML_Operator *ML_data,int N_requested_rows,int requested_rows[], 58 int allocated_space,int columns[],double values[],int row_lengths[]); 59 extern int PetscML_matvec(ML_Operator *ML_data, int in_length, double p[], int out_length,double ap[]); 60 extern int PetscML_comm(double x[], void *ML_data); 61 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 62 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 63 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,MatType,MatReuse,Mat*); 64 extern PetscErrorCode MatDestroy_ML(Mat); 65 extern PetscErrorCode MatWrapML_SeqAIJ(ML_Operator*,MatReuse,Mat*); 66 extern PetscErrorCode MatWrapML_MPIAIJ(ML_Operator*,Mat*); 67 extern PetscErrorCode MatWrapML_SHELL(ML_Operator*,MatReuse,Mat*); 68 extern PetscErrorCode PetscContainerDestroy_PC_ML(void *); 69 70 /* -------------------------------------------------------------------------- */ 71 /* 72 PCSetUp_ML - Prepares for the use of the ML preconditioner 73 by setting data structures and options. 74 75 Input Parameter: 76 . pc - the preconditioner context 77 78 Application Interface Routine: PCSetUp() 79 80 Notes: 81 The interface routine PCSetUp() is not usually called directly by 82 the user, but instead is called by PCApply() if necessary. 83 */ 84 extern PetscErrorCode PCSetFromOptions_MG(PC); 85 #undef __FUNCT__ 86 #define __FUNCT__ "PCSetUp_ML" 87 PetscErrorCode PCSetUp_ML(PC pc) 88 { 89 PetscErrorCode ierr; 90 PetscMPIInt size; 91 FineGridCtx *PetscMLdata; 92 ML *ml_object; 93 ML_Aggregate *agg_object; 94 ML_Operator *mlmat; 95 PetscInt nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level; 96 Mat A,Aloc; 97 GridCtx *gridctx; 98 PC_ML *pc_ml=PETSC_NULL; 99 PetscContainer container; 100 MatReuse reuse = MAT_INITIAL_MATRIX; 101 102 PetscFunctionBegin; 103 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 104 if (container) { 105 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 106 } else { 107 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 108 } 109 110 if (pc->setupcalled){ 111 if (pc->flag == SAME_NONZERO_PATTERN){ 112 reuse = MAT_REUSE_MATRIX; 113 PetscMLdata = pc_ml->PetscMLdata; 114 gridctx = pc_ml->gridctx; 115 /* ML objects cannot be reused */ 116 ML_Destroy(&pc_ml->ml_object); 117 ML_Aggregate_Destroy(&pc_ml->agg_object); 118 } else { 119 PC_ML *pc_ml_new = PETSC_NULL; 120 PetscContainer container_new; 121 ierr = PetscNew(PC_ML,&pc_ml_new);CHKERRQ(ierr); 122 ierr = PetscLogObjectMemory(pc,sizeof(PC_ML));CHKERRQ(ierr); 123 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container_new);CHKERRQ(ierr); 124 ierr = PetscContainerSetPointer(container_new,pc_ml_new);CHKERRQ(ierr); 125 ierr = PetscContainerSetUserDestroy(container_new,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 126 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container_new);CHKERRQ(ierr); 127 128 ierr = PetscMemcpy(pc_ml_new,pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 129 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 130 pc_ml = pc_ml_new; 131 } 132 } 133 134 /* setup special features of PCML */ 135 /*--------------------------------*/ 136 /* covert A to Aloc to be used by ML at fine grid */ 137 A = pc->pmat; 138 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 139 pc_ml->size = size; 140 if (size > 1){ 141 if (reuse) Aloc = PetscMLdata->Aloc; 142 ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,reuse,&Aloc);CHKERRQ(ierr); 143 } else { 144 Aloc = A; 145 } 146 147 /* create and initialize struct 'PetscMLdata' */ 148 if (!reuse){ 149 ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 150 pc_ml->PetscMLdata = PetscMLdata; 151 ierr = PetscMalloc((Aloc->cmap.n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 152 153 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 154 ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap.n,Aloc->cmap.n);CHKERRQ(ierr); 155 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 156 157 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 158 ierr = VecSetSizes(PetscMLdata->y,A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 159 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 160 } 161 PetscMLdata->A = A; 162 PetscMLdata->Aloc = Aloc; 163 164 /* create ML discretization matrix at fine grid */ 165 /* ML requires input of fine-grid matrix. It determines nlevels. */ 166 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 167 ML_Create(&ml_object,pc_ml->MaxNlevels); 168 pc_ml->ml_object = ml_object; 169 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 170 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 171 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 172 173 /* aggregation */ 174 ML_Aggregate_Create(&agg_object); 175 pc_ml->agg_object = agg_object; 176 177 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 178 /* set options */ 179 switch (pc_ml->CoarsenScheme) { 180 case 1: 181 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 182 case 2: 183 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 184 case 3: 185 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 186 } 187 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 188 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 189 if (pc_ml->SpectralNormScheme_Anorm){ 190 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 191 } 192 193 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 194 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 195 if (pc->setupcalled && pc_ml->Nlevels != Nlevels) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"previous Nlevels %D and current Nlevels %d must be same", pc_ml->Nlevels,Nlevels); 196 pc_ml->Nlevels = Nlevels; 197 if (!pc->setupcalled){ 198 ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 199 ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */ 200 } 201 202 if (!reuse){ 203 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 204 pc_ml->gridctx = gridctx; 205 } 206 fine_level = Nlevels - 1; 207 208 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 209 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 210 gridctx[fine_level].A = A; 211 212 level = fine_level - 1; 213 if (size == 1){ /* convert ML P, R and A into seqaij format */ 214 for (mllevel=1; mllevel<Nlevels; mllevel++){ 215 mlmat = &(ml_object->Pmat[mllevel]); 216 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 217 mlmat = &(ml_object->Rmat[mllevel-1]); 218 ierr = MatWrapML_SeqAIJ(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 219 220 mlmat = &(ml_object->Amat[mllevel]); 221 if (reuse){ 222 /* ML matrix A changes sparse pattern although PETSc A doesn't, thus gridctx[level].A must be recreated! */ 223 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 224 } 225 ierr = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr); 226 level--; 227 } 228 } else { /* convert ML P and R into shell format, ML A into mpiaij format */ 229 for (mllevel=1; mllevel<Nlevels; mllevel++){ 230 mlmat = &(ml_object->Pmat[mllevel]); 231 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].P);CHKERRQ(ierr); 232 mlmat = &(ml_object->Rmat[mllevel-1]); 233 ierr = MatWrapML_SHELL(mlmat,reuse,&gridctx[level].R);CHKERRQ(ierr); 234 235 mlmat = &(ml_object->Amat[mllevel]); 236 if (reuse){ 237 ierr = MatDestroy(gridctx[level].A);CHKERRQ(ierr); 238 } 239 ierr = MatWrapML_MPIAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 240 level--; 241 } 242 } 243 244 /* create vectors and ksp at all levels */ 245 if (!reuse){ 246 for (level=0; level<fine_level; level++){ 247 level1 = level + 1; 248 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr); 249 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap.n,PETSC_DECIDE);CHKERRQ(ierr); 250 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 251 ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 252 253 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr); 254 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 255 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 256 ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 257 258 ierr = VecCreate(gridctx[level1].A->comm,&gridctx[level1].r);CHKERRQ(ierr); 259 ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap.n,PETSC_DECIDE);CHKERRQ(ierr); 260 ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 261 ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr); 262 263 if (level == 0){ 264 ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 265 } else { 266 ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 267 } 268 } 269 ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr); 270 } 271 272 /* create coarse level and the interpolation between the levels */ 273 for (level=0; level<fine_level; level++){ 274 level1 = level + 1; 275 ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr); 276 ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr); 277 if (level > 0){ 278 ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 279 } 280 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 281 } 282 ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 283 ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 284 285 /* now call PCSetUp_MG() */ 286 /*-------------------------------*/ 287 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 288 PetscFunctionReturn(0); 289 } 290 291 #undef __FUNCT__ 292 #define __FUNCT__ "PetscContainerDestroy_PC_ML" 293 PetscErrorCode PetscContainerDestroy_PC_ML(void *ptr) 294 { 295 PetscErrorCode ierr; 296 PC_ML *pc_ml = (PC_ML*)ptr; 297 PetscInt level,fine_level=pc_ml->Nlevels-1; 298 299 PetscFunctionBegin; 300 if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 301 ML_Aggregate_Destroy(&pc_ml->agg_object); 302 ML_Destroy(&pc_ml->ml_object); 303 304 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 305 if (pc_ml->PetscMLdata->x){ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr);} 306 if (pc_ml->PetscMLdata->y){ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr);} 307 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 308 309 for (level=0; level<fine_level; level++){ 310 if (pc_ml->gridctx[level].A){ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr);} 311 if (pc_ml->gridctx[level].P){ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr);} 312 if (pc_ml->gridctx[level].R){ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr);} 313 if (pc_ml->gridctx[level].x){ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr);} 314 if (pc_ml->gridctx[level].b){ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr);} 315 if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} 316 } 317 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 318 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 319 PetscFunctionReturn(0); 320 } 321 /* -------------------------------------------------------------------------- */ 322 /* 323 PCDestroy_ML - Destroys the private context for the ML preconditioner 324 that was created with PCCreate_ML(). 325 326 Input Parameter: 327 . pc - the preconditioner context 328 329 Application Interface Routine: PCDestroy() 330 */ 331 #undef __FUNCT__ 332 #define __FUNCT__ "PCDestroy_ML" 333 PetscErrorCode PCDestroy_ML(PC pc) 334 { 335 PetscErrorCode ierr; 336 PC_ML *pc_ml=PETSC_NULL; 337 PetscContainer container; 338 339 PetscFunctionBegin; 340 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 341 if (container) { 342 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 343 pc->ops->destroy = pc_ml->PCDestroy; 344 } else { 345 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 346 } 347 ierr = PetscContainerDestroy(container);CHKERRQ(ierr); 348 349 /* detach pc and PC_ML and dereference container */ 350 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 351 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 352 PetscFunctionReturn(0); 353 } 354 355 #undef __FUNCT__ 356 #define __FUNCT__ "PCSetFromOptions_ML" 357 PetscErrorCode PCSetFromOptions_ML(PC pc) 358 { 359 PetscErrorCode ierr; 360 PetscInt indx,m,PrintLevel,MaxNlevels,MaxCoarseSize; 361 PetscReal Threshold,DampingFactor; 362 PetscTruth flg; 363 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 364 PC_ML *pc_ml=PETSC_NULL; 365 PetscContainer container; 366 PCMGType mgtype; 367 368 PetscFunctionBegin; 369 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 370 if (container) { 371 ierr = PetscContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 372 } else { 373 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 374 } 375 376 /* inherited MG options */ 377 ierr = PetscOptionsHead("Multigrid options(inherited)");CHKERRQ(ierr); 378 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 379 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 380 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 381 ierr = PetscOptionsEnum("-pc_mg_type","Multigrid type","PCMGSetType",PCMGTypes,(PetscEnum)PC_MG_MULTIPLICATIVE,(PetscEnum*)&mgtype,&flg);CHKERRQ(ierr); 382 ierr = PetscOptionsTail();CHKERRQ(ierr); 383 384 /* ML options */ 385 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 386 /* set defaults */ 387 PrintLevel = 0; 388 indx = 0; 389 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 390 ML_Set_PrintLevel(PrintLevel); 391 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",pc_ml->MaxNlevels,&pc_ml->MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 392 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",pc_ml->MaxCoarseSize,&pc_ml->MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 393 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); /* ??? */ 394 pc_ml->CoarsenScheme = indx; 395 396 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",pc_ml->DampingFactor,&pc_ml->DampingFactor,PETSC_NULL);CHKERRQ(ierr); 397 398 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",pc_ml->Threshold,&pc_ml->Threshold,PETSC_NULL);CHKERRQ(ierr); 399 400 ierr = PetscOptionsTruth("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",pc_ml->SpectralNormScheme_Anorm,&pc_ml->SpectralNormScheme_Anorm,PETSC_NULL); 401 402 ierr = PetscOptionsTail();CHKERRQ(ierr); 403 PetscFunctionReturn(0); 404 } 405 406 /* -------------------------------------------------------------------------- */ 407 /* 408 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 409 and sets this as the private data within the generic preconditioning 410 context, PC, that was created within PCCreate(). 411 412 Input Parameter: 413 . pc - the preconditioner context 414 415 Application Interface Routine: PCCreate() 416 */ 417 418 /*MC 419 PCML - Use algebraic multigrid preconditioning. This preconditioner requires you provide 420 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 421 operators are computed by ML, with the matrices coverted to PETSc matrices in aij format 422 and the restriction/interpolation operators wrapped as PETSc shell matrices. 423 424 Options Database Key: 425 Multigrid options(inherited) 426 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 427 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 428 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 429 - -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 430 431 ML options: 432 + -pc_ml_PrintLevel <0>: Print level (ML_Set_PrintLevel) 433 . -pc_ml_maxNlevels <10>: Maximum number of levels (None) 434 . -pc_ml_maxCoarseSize <1>: Maximum coarsest mesh size (ML_Aggregate_Set_MaxCoarseSize) 435 . -pc_ml_CoarsenScheme <Uncoupled>: (one of) Uncoupled Coupled MIS METIS 436 . -pc_ml_DampingFactor <1.33333>: P damping factor (ML_Aggregate_Set_DampingFactor) 437 . -pc_ml_Threshold <0>: Smoother drop tol (ML_Aggregate_Set_Threshold) 438 - -pc_ml_SpectralNormScheme_Anorm <false>: Method used for estimating spectral radius (ML_Aggregate_Set_SpectralNormScheme_Anorm) 439 440 Level: intermediate 441 442 Concepts: multigrid 443 444 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 445 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 446 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 447 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 448 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 449 M*/ 450 451 EXTERN_C_BEGIN 452 #undef __FUNCT__ 453 #define __FUNCT__ "PCCreate_ML" 454 PetscErrorCode PETSCKSP_DLLEXPORT PCCreate_ML(PC pc) 455 { 456 PetscErrorCode ierr; 457 PC_ML *pc_ml; 458 PetscContainer container; 459 460 PetscFunctionBegin; 461 /* PCML is an inherited class of PCMG. Initialize pc as PCMG */ 462 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 463 464 /* create a supporting struct and attach it to pc */ 465 ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr); 466 ierr = PetscLogObjectMemory(pc,sizeof(PC_ML));CHKERRQ(ierr); 467 ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 468 ierr = PetscContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 469 ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_PC_ML);CHKERRQ(ierr); 470 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 471 472 pc_ml->ml_object = 0; 473 pc_ml->agg_object = 0; 474 pc_ml->gridctx = 0; 475 pc_ml->PetscMLdata = 0; 476 pc_ml->Nlevels = -1; 477 pc_ml->MaxNlevels = 10; 478 pc_ml->MaxCoarseSize = 1; 479 pc_ml->CoarsenScheme = 1; /* ??? */ 480 pc_ml->Threshold = 0.0; 481 pc_ml->DampingFactor = 4.0/3.0; 482 pc_ml->SpectralNormScheme_Anorm = PETSC_FALSE; 483 pc_ml->size = 0; 484 485 pc_ml->PCSetUp = pc->ops->setup; 486 pc_ml->PCDestroy = pc->ops->destroy; 487 488 /* overwrite the pointers of PCMG by the functions of PCML */ 489 pc->ops->setfromoptions = PCSetFromOptions_ML; 490 pc->ops->setup = PCSetUp_ML; 491 pc->ops->destroy = PCDestroy_ML; 492 PetscFunctionReturn(0); 493 } 494 EXTERN_C_END 495 496 int PetscML_getrow(ML_Operator *ML_data, int N_requested_rows, int requested_rows[], 497 int allocated_space, int columns[], double values[], int row_lengths[]) 498 { 499 PetscErrorCode ierr; 500 Mat Aloc; 501 Mat_SeqAIJ *a; 502 PetscInt m,i,j,k=0,row,*aj; 503 PetscScalar *aa; 504 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyGetrowData(ML_data); 505 506 Aloc = ml->Aloc; 507 a = (Mat_SeqAIJ*)Aloc->data; 508 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 509 510 for (i = 0; i<N_requested_rows; i++) { 511 row = requested_rows[i]; 512 row_lengths[i] = a->ilen[row]; 513 if (allocated_space < k+row_lengths[i]) return(0); 514 if ( (row >= 0) || (row <= (m-1)) ) { 515 aj = a->j + a->i[row]; 516 aa = a->a + a->i[row]; 517 for (j=0; j<row_lengths[i]; j++){ 518 columns[k] = aj[j]; 519 values[k++] = aa[j]; 520 } 521 } 522 } 523 return(1); 524 } 525 526 int PetscML_matvec(ML_Operator *ML_data,int in_length,double p[],int out_length,double ap[]) 527 { 528 PetscErrorCode ierr; 529 FineGridCtx *ml=(FineGridCtx*)ML_Get_MyMatvecData(ML_data); 530 Mat A=ml->A, Aloc=ml->Aloc; 531 PetscMPIInt size; 532 PetscScalar *pwork=ml->pwork; 533 PetscInt i; 534 535 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 536 if (size == 1){ 537 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 538 } else { 539 for (i=0; i<in_length; i++) pwork[i] = p[i]; 540 PetscML_comm(pwork,ml); 541 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 542 } 543 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 544 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 545 ierr = VecResetArray(ml->x);CHKERRQ(ierr); 546 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 547 return 0; 548 } 549 550 int PetscML_comm(double p[],void *ML_data) 551 { 552 PetscErrorCode ierr; 553 FineGridCtx *ml=(FineGridCtx*)ML_data; 554 Mat A=ml->A; 555 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 556 PetscMPIInt size; 557 PetscInt i,in_length=A->rmap.n,out_length=ml->Aloc->cmap.n; 558 PetscScalar *array; 559 560 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 561 if (size == 1) return 0; 562 563 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 564 ierr = VecScatterBegin(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 565 ierr = VecScatterEnd(a->Mvctx,ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 566 ierr = VecResetArray(ml->y);CHKERRQ(ierr); 567 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 568 for (i=in_length; i<out_length; i++){ 569 p[i] = array[i-in_length]; 570 } 571 ierr = VecRestoreArray(a->lvec,&array);CHKERRQ(ierr); 572 return 0; 573 } 574 #undef __FUNCT__ 575 #define __FUNCT__ "MatMult_ML" 576 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 577 { 578 PetscErrorCode ierr; 579 Mat_MLShell *shell; 580 PetscScalar *xarray,*yarray; 581 PetscInt x_length,y_length; 582 583 PetscFunctionBegin; 584 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 585 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 586 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 587 x_length = shell->mlmat->invec_leng; 588 y_length = shell->mlmat->outvec_leng; 589 590 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 591 592 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 593 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 594 PetscFunctionReturn(0); 595 } 596 /* MatMultAdd_ML - Compute y = w + A*x */ 597 #undef __FUNCT__ 598 #define __FUNCT__ "MatMultAdd_ML" 599 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 600 { 601 PetscErrorCode ierr; 602 Mat_MLShell *shell; 603 PetscScalar *xarray,*yarray; 604 PetscInt x_length,y_length; 605 606 PetscFunctionBegin; 607 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 608 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 609 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 610 611 x_length = shell->mlmat->invec_leng; 612 y_length = shell->mlmat->outvec_leng; 613 614 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 615 616 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 617 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 618 ierr = VecAXPY(y,1.0,w);CHKERRQ(ierr); 619 620 PetscFunctionReturn(0); 621 } 622 623 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 624 #undef __FUNCT__ 625 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 626 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,MatType newtype,MatReuse scall,Mat *Aloc) 627 { 628 PetscErrorCode ierr; 629 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 630 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 631 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 632 PetscScalar *aa=a->a,*ba=b->a,*ca; 633 PetscInt am=A->rmap.n,an=A->cmap.n,i,j,k; 634 PetscInt *ci,*cj,ncols; 635 636 PetscFunctionBegin; 637 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 638 639 if (scall == MAT_INITIAL_MATRIX){ 640 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 641 ci[0] = 0; 642 for (i=0; i<am; i++){ 643 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 644 } 645 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 646 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 647 648 k = 0; 649 for (i=0; i<am; i++){ 650 /* diagonal portion of A */ 651 ncols = ai[i+1] - ai[i]; 652 for (j=0; j<ncols; j++) { 653 cj[k] = *aj++; 654 ca[k++] = *aa++; 655 } 656 /* off-diagonal portion of A */ 657 ncols = bi[i+1] - bi[i]; 658 for (j=0; j<ncols; j++) { 659 cj[k] = an + (*bj); bj++; 660 ca[k++] = *ba++; 661 } 662 } 663 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 664 665 /* put together the new matrix */ 666 an = mpimat->A->cmap.n+mpimat->B->cmap.n; 667 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 668 669 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 670 /* Since these are PETSc arrays, change flags to free them as necessary. */ 671 mat = (Mat_SeqAIJ*)(*Aloc)->data; 672 mat->free_a = PETSC_TRUE; 673 mat->free_ij = PETSC_TRUE; 674 675 mat->nonew = 0; 676 } else if (scall == MAT_REUSE_MATRIX){ 677 mat=(Mat_SeqAIJ*)(*Aloc)->data; 678 ci = mat->i; cj = mat->j; ca = mat->a; 679 for (i=0; i<am; i++) { 680 /* diagonal portion of A */ 681 ncols = ai[i+1] - ai[i]; 682 for (j=0; j<ncols; j++) *ca++ = *aa++; 683 /* off-diagonal portion of A */ 684 ncols = bi[i+1] - bi[i]; 685 for (j=0; j<ncols; j++) *ca++ = *ba++; 686 } 687 } else { 688 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 689 } 690 PetscFunctionReturn(0); 691 } 692 extern PetscErrorCode MatDestroy_Shell(Mat); 693 #undef __FUNCT__ 694 #define __FUNCT__ "MatDestroy_ML" 695 PetscErrorCode MatDestroy_ML(Mat A) 696 { 697 PetscErrorCode ierr; 698 Mat_MLShell *shell; 699 700 PetscFunctionBegin; 701 ierr = MatShellGetContext(A,(void **)&shell);CHKERRQ(ierr); 702 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 703 ierr = PetscFree(shell);CHKERRQ(ierr); 704 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 705 ierr = PetscObjectChangeTypeName((PetscObject)A,0);CHKERRQ(ierr); 706 PetscFunctionReturn(0); 707 } 708 709 #undef __FUNCT__ 710 #define __FUNCT__ "MatWrapML_SeqAIJ" 711 PetscErrorCode MatWrapML_SeqAIJ(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 712 { 713 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 714 PetscErrorCode ierr; 715 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,*nnz,nz_max; 716 PetscInt *ml_cols=matdata->columns,*aj,i,j,k; 717 PetscScalar *ml_vals=matdata->values,*aa; 718 719 PetscFunctionBegin; 720 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 721 if (m != n){ /* ML Pmat and Rmat are in CSR format. Pass array pointers into SeqAIJ matrix */ 722 if (reuse){ 723 Mat_SeqAIJ *aij= (Mat_SeqAIJ*)(*newmat)->data; 724 aij->i = matdata->rowptr; 725 aij->j = ml_cols; 726 aij->a = ml_vals; 727 } else { 728 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 729 } 730 PetscFunctionReturn(0); 731 } 732 733 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 734 ierr = MatCreate(PETSC_COMM_SELF,newmat);CHKERRQ(ierr); 735 ierr = MatSetSizes(*newmat,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 736 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 737 738 ierr = PetscMalloc((m+1)*sizeof(PetscInt),&nnz); 739 nz_max = 1; 740 for (i=0; i<m; i++) { 741 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 742 if (nnz[i] > nz_max) nz_max += nnz[i]; 743 } 744 745 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 746 ierr = MatSetOption(*newmat,MAT_COLUMNS_SORTED);CHKERRQ(ierr); 747 748 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 749 aa = (PetscScalar*)(aj + nz_max); 750 751 for (i=0; i<m; i++){ 752 k = 0; 753 /* diagonal entry */ 754 aj[k] = i; aa[k++] = ml_vals[i]; 755 /* off diagonal entries */ 756 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 757 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 758 } 759 /* sort aj and aa */ 760 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 761 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 762 } 763 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 764 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 765 766 ierr = PetscFree(aj);CHKERRQ(ierr); 767 ierr = PetscFree(nnz);CHKERRQ(ierr); 768 PetscFunctionReturn(0); 769 } 770 771 #undef __FUNCT__ 772 #define __FUNCT__ "MatWrapML_SHELL" 773 PetscErrorCode MatWrapML_SHELL(ML_Operator *mlmat,MatReuse reuse,Mat *newmat) 774 { 775 PetscErrorCode ierr; 776 PetscInt m,n; 777 ML_Comm *MLcomm; 778 Mat_MLShell *shellctx; 779 780 PetscFunctionBegin; 781 m = mlmat->outvec_leng; 782 n = mlmat->invec_leng; 783 if (!m || !n){ 784 newmat = PETSC_NULL; 785 PetscFunctionReturn(0); 786 } 787 788 if (reuse){ 789 ierr = MatShellGetContext(*newmat,(void **)&shellctx);CHKERRQ(ierr); 790 shellctx->mlmat = mlmat; 791 PetscFunctionReturn(0); 792 } 793 794 MLcomm = mlmat->comm; 795 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 796 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 797 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void(*)(void))MatMult_ML);CHKERRQ(ierr); 798 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void(*)(void))MatMultAdd_ML);CHKERRQ(ierr); 799 shellctx->A = *newmat; 800 shellctx->mlmat = mlmat; 801 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 802 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 803 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 804 (*newmat)->ops->destroy = MatDestroy_ML; 805 PetscFunctionReturn(0); 806 } 807 808 #undef __FUNCT__ 809 #define __FUNCT__ "MatWrapML_MPIAIJ" 810 PetscErrorCode MatWrapML_MPIAIJ(ML_Operator *mlmat,Mat *newmat) 811 { 812 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 813 PetscInt *ml_cols=matdata->columns,*aj; 814 PetscScalar *ml_vals=matdata->values,*aa; 815 PetscErrorCode ierr; 816 PetscInt i,j,k,*gordering; 817 PetscInt m=mlmat->outvec_leng,n,*nnzA,*nnzB,*nnz,nz_max,row; 818 Mat A; 819 820 PetscFunctionBegin; 821 if (mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 822 n = mlmat->invec_leng; 823 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 824 825 ierr = MatCreate(mlmat->comm->USR_comm,&A);CHKERRQ(ierr); 826 ierr = MatSetSizes(A,m,n,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr); 827 ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr); 828 ierr = PetscMalloc3(m,PetscInt,&nnzA,m,PetscInt,&nnzB,m,PetscInt,&nnz);CHKERRQ(ierr); 829 830 nz_max = 0; 831 for (i=0; i<m; i++){ 832 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 833 if (nz_max < nnz[i]) nz_max = nnz[i]; 834 nnzA[i] = 1; /* diag */ 835 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 836 if (ml_cols[j] < m) nnzA[i]++; 837 } 838 nnzB[i] = nnz[i] - nnzA[i]; 839 } 840 ierr = MatMPIAIJSetPreallocation(A,0,nnzA,0,nnzB);CHKERRQ(ierr); 841 842 /* insert mat values -- remap row and column indices */ 843 nz_max++; 844 ierr = PetscMalloc(nz_max*(sizeof(PetscInt)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 845 aa = (PetscScalar*)(aj + nz_max); 846 /* create global row numbering for a ML_Operator */ 847 ML_build_global_numbering(mlmat,&gordering,"rows"); 848 for (i=0; i<m; i++){ 849 row = gordering[i]; 850 k = 0; 851 /* diagonal entry */ 852 aj[k] = row; aa[k++] = ml_vals[i]; 853 /* off diagonal entries */ 854 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 855 aj[k] = gordering[ml_cols[j]]; aa[k++] = ml_vals[j]; 856 } 857 ierr = MatSetValues(A,1,&row,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 858 } 859 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 860 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 861 *newmat = A; 862 863 ierr = PetscFree3(nnzA,nnzB,nnz); 864 ierr = PetscFree(aj);CHKERRQ(ierr); 865 PetscFunctionReturn(0); 866 } 867