1 /* 2 Provides an interface to the ML 3.0 smoothed Aggregation 3 */ 4 #include "src/ksp/pc/pcimpl.h" /*I "petscpc.h" I*/ 5 #include "src/ksp/pc/impls/mg/mgimpl.h" /*I "petscmg.h" I*/ 6 #include "src/mat/impls/aij/seq/aij.h" 7 #include "src/mat/impls/aij/mpi/mpiaij.h" 8 EXTERN_C_BEGIN 9 #include <math.h> 10 #include "ml_include.h" 11 EXTERN_C_END 12 13 /* The context (data structure) at each grid level */ 14 typedef struct { 15 Vec x,b,r; /* global vectors */ 16 Mat A,P,R; 17 KSP ksp; 18 } GridCtx; 19 20 /* The context used to input PETSc matrix into ML at fine grid */ 21 typedef struct { 22 Mat A,Aloc; 23 Vec x,y; 24 ML_Operator *mlmat; 25 PetscScalar *pwork; /* tmp array used by PetscML_comm() */ 26 PetscInt rlen_max,*cols; /* used by MatConvert_ML_SeqAIJ() */ 27 PetscScalar *vals; /* used by MatConvert_ML_SeqAIJ() */ 28 } FineGridCtx; 29 30 /* The context associates a ML matrix with a PETSc shell matrix */ 31 typedef struct { 32 Mat A; /* PETSc shell matrix associated with mlmat */ 33 ML_Operator *mlmat; /* ML matrix assorciated with A */ 34 Vec y; 35 } Mat_MLShell; 36 37 /* Private context for the ML preconditioner */ 38 typedef struct { 39 ML *ml_object; 40 ML_Aggregate *agg_object; 41 GridCtx *gridctx; 42 FineGridCtx *PetscMLdata; 43 PetscInt fine_level,MaxNlevels,MaxCoarseSize,CoarsenScheme; 44 PetscReal Threshold,DampingFactor; 45 PetscTruth SpectralNormScheme_Anorm; 46 PetscMPIInt size; 47 48 PetscErrorCode (*PCSetUp)(PC); 49 PetscErrorCode (*PCDestroy)(PC); 50 51 } PC_ML; 52 extern int PetscML_getrow(void *ML_data,int N_requested_rows,int requested_rows[], 53 int allocated_space,int columns[],double values[],int row_lengths[]); 54 extern int PetscML_matvec(void *ML_data, int in_length, double p[], int out_length,double ap[]); 55 extern int PetscML_comm(double x[], void *ML_data); 56 extern PetscErrorCode MatMult_ML(Mat,Vec,Vec); 57 extern PetscErrorCode MatMultAdd_ML(Mat,Vec,Vec,Vec); 58 extern PetscErrorCode MatConvert_MPIAIJ_ML(Mat,const MatType,Mat*); 59 extern PetscErrorCode MatDestroy_ML(Mat); 60 extern PetscErrorCode MatConvert_ML_SeqAIJ(ML_Operator*,Mat*); 61 extern PetscErrorCode MatConvert_ML_MPIAIJ(FineGridCtx*,Mat*); 62 extern PetscErrorCode MatConvert_ML_SHELL(ML_Operator*,Mat*); 63 64 /* -------------------------------------------------------------------------- */ 65 /* 66 PCSetUp_ML - Prepares for the use of the ML preconditioner 67 by setting data structures and options. 68 69 Input Parameter: 70 . pc - the preconditioner context 71 72 Application Interface Routine: PCSetUp() 73 74 Notes: 75 The interface routine PCSetUp() is not usually called directly by 76 the user, but instead is called by PCApply() if necessary. 77 */ 78 79 #undef __FUNCT__ 80 #define __FUNCT__ "PCSetUp_ML" 81 static PetscErrorCode PCSetUp_ML(PC pc) 82 { 83 PetscErrorCode ierr; 84 PetscMPIInt size,rank; 85 FineGridCtx *PetscMLdata; 86 ML *ml_object; 87 ML_Aggregate *agg_object; 88 ML_Operator *mlmat; 89 PetscInt nlocal_allcols,Nlevels,mllevel,level,m,fine_level; 90 Mat A,Aloc; 91 GridCtx *gridctx; 92 PC pc_coarse; 93 PC_ML *pc_ml=PETSC_NULL; 94 PetscObjectContainer container; 95 96 PetscFunctionBegin; 97 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 98 if (container) { 99 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 100 } else { 101 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 102 } 103 104 /* setup special features of PCML */ 105 /*--------------------------------*/ 106 /* covert A to Aloc to be used by ML at fine grid */ 107 A = pc->pmat; 108 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 109 ierr = MPI_Comm_rank(A->comm,&rank);CHKERRQ(ierr); /* rm! */ 110 pc_ml->size = size; 111 if (size > 1){ 112 Aloc = PETSC_NULL; 113 ierr = MatConvert_MPIAIJ_ML(A,0,&Aloc);CHKERRQ(ierr); 114 } else { 115 Aloc = A; 116 } 117 118 /* create and initialize struct 'PetscMLdata' */ 119 ierr = PetscNew(FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 120 ierr = PetscMemzero(PetscMLdata,sizeof(FineGridCtx));CHKERRQ(ierr); 121 PetscMLdata->A = A; 122 PetscMLdata->Aloc = Aloc; 123 ierr = PetscMalloc((Aloc->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 124 pc_ml->PetscMLdata = PetscMLdata; 125 126 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr); 127 if (size == 1){ 128 ierr = VecSetSizes(PetscMLdata->x,A->n,PETSC_DECIDE);CHKERRQ(ierr); 129 } else { 130 ierr = VecSetSizes(PetscMLdata->x,Aloc->n,PETSC_DECIDE);CHKERRQ(ierr); 131 } 132 ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 133 134 ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 135 ierr = VecSetSizes(PetscMLdata->y,A->m,PETSC_DECIDE);CHKERRQ(ierr); 136 ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr); 137 138 /* create ML discretization matrix at fine grid */ 139 ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr); 140 ML_Create(&ml_object,pc_ml->MaxNlevels); 141 ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata); 142 ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 143 ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec); 144 145 /* aggregation */ 146 ML_Aggregate_Create(&agg_object); 147 ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize); 148 /* set options */ 149 switch (pc_ml->CoarsenScheme) { 150 case 1: 151 ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break; 152 case 2: 153 ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break; 154 case 3: 155 ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break; 156 } 157 ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 158 ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 159 if (pc_ml->SpectralNormScheme_Anorm){ 160 ML_Aggregate_Set_SpectralNormScheme_Anorm(agg_object); 161 } 162 163 Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object); 164 if (Nlevels<=0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels); 165 ierr = MGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 166 pc_ml->ml_object = ml_object; 167 pc_ml->agg_object = agg_object; 168 169 ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr); 170 fine_level = Nlevels - 1; 171 pc_ml->gridctx = gridctx; 172 pc_ml->fine_level = fine_level; 173 174 /* wrap ML matrices by PETSc shell matrices at coarsened grids. 175 Level 0 is the finest grid for ML, but coarsest for PETSc! */ 176 PetscMLdata->rlen_max = A->N; 177 ierr = PetscMalloc(PetscMLdata->rlen_max*(sizeof(PetscScalar)+sizeof(PetscInt)),&PetscMLdata->cols);CHKERRQ(ierr); 178 PetscMLdata->vals = (PetscScalar*)(PetscMLdata->cols + PetscMLdata->rlen_max); 179 180 gridctx[fine_level].A = A; 181 level = fine_level - 1; 182 if (size == 1){ /* convert ML mat format into petsc seqaij format */ 183 for (mllevel=1; mllevel<Nlevels; mllevel++){ 184 mlmat = &(ml_object->Pmat[mllevel]); 185 ierr = MatConvert_ML_SeqAIJ(mlmat,&gridctx[level].P);CHKERRQ(ierr); 186 mlmat = &(ml_object->Amat[mllevel]); 187 ierr = MatConvert_ML_SeqAIJ(mlmat,&gridctx[level].A);CHKERRQ(ierr); 188 mlmat = &(ml_object->Rmat[mllevel-1]); 189 ierr = MatConvert_ML_SeqAIJ(mlmat,&gridctx[level].R);CHKERRQ(ierr); 190 level--; 191 } 192 } else { /* convert ML mat format into petsc shell format */ 193 for (mllevel=1; mllevel<Nlevels; mllevel++){ 194 mlmat = &(ml_object->Pmat[mllevel]); 195 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].P);CHKERRQ(ierr); 196 mlmat = &(ml_object->Amat[mllevel]); 197 /* 198 if (mllevel==1) { 199 ML_Operator_Print_UsingGlobalOrdering(mlmat,"Amat1",PETSC_NULL,PETSC_NULL); 200 } 201 */ 202 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].A);CHKERRQ(ierr); 203 #ifndef TMP 204 if (mllevel>0){ 205 PetscMLdata->mlmat = &(ml_object->Amat[mllevel]); 206 Mat A_tmp; 207 ierr = MatConvert_ML_MPIAIJ(PetscMLdata,&A_tmp);CHKERRQ(ierr); 208 /* ierr = MatView(A_tmp,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */ 209 210 Vec x,yy1,yy2; 211 PetscInt am,an; 212 ierr = MatGetLocalSize(A_tmp,&am,&an);CHKERRQ(ierr); 213 ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr); 214 ierr = VecSetSizes(x,an,PETSC_DECIDE);CHKERRQ(ierr); 215 ierr = VecSetFromOptions(x);CHKERRQ(ierr); 216 ierr = VecCreate(PETSC_COMM_WORLD,&yy1);CHKERRQ(ierr); 217 ierr = VecSetSizes(yy1,am,PETSC_DECIDE);CHKERRQ(ierr); 218 ierr = VecSetFromOptions(yy1);CHKERRQ(ierr); 219 ierr = VecDuplicate(yy1,&yy2);CHKERRQ(ierr); 220 221 PetscRandom rd; 222 PetscReal rnorm; 223 PetscScalar mone = -1.0; 224 ierr = PetscRandomCreate(PETSC_COMM_WORLD,RANDOM_DEFAULT,&rd);CHKERRQ(ierr); 225 ierr = VecSetRandom(rd,x);CHKERRQ(ierr); 226 ierr = MatMult(gridctx[level].A,x,yy1);CHKERRQ(ierr); 227 ierr = MatMult(A_tmp,x,yy2);CHKERRQ(ierr); 228 ierr = VecAXPY(&mone,yy1,yy2);CHKERRQ(ierr); 229 ierr = VecNorm(yy2,NORM_2,&rnorm);CHKERRQ(ierr); 230 printf(" [%d] mllevel: %d rnorm %g\n",rank,mllevel,rnorm); 231 232 ierr = MatDestroy(A_tmp);CHKERRQ(ierr); 233 ierr = VecDestroy(x);CHKERRQ(ierr); 234 ierr = VecDestroy(yy1);CHKERRQ(ierr); 235 ierr = VecDestroy(yy2);CHKERRQ(ierr); 236 ierr = PetscRandomDestroy(rd);CHKERRQ(ierr); 237 } 238 #endif 239 mlmat = &(ml_object->Rmat[mllevel-1]); 240 ierr = MatConvert_ML_SHELL(mlmat,&gridctx[level].R);CHKERRQ(ierr); 241 level--; 242 } 243 } 244 245 /* create coarse level and the interpolation between the levels */ 246 level = fine_level; 247 while ( level >= 0 ){ 248 if (level != fine_level){ 249 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].x);CHKERRQ(ierr); 250 ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->n,PETSC_DECIDE);CHKERRQ(ierr); 251 ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 252 ierr = MGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 253 254 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].b);CHKERRQ(ierr); 255 ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr); 256 ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 257 ierr = MGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 258 } 259 ierr = VecCreate(gridctx[level].A->comm,&gridctx[level].r);CHKERRQ(ierr); 260 ierr = VecSetSizes(gridctx[level].r,gridctx[level].A->m,PETSC_DECIDE);CHKERRQ(ierr); 261 ierr = VecSetType(gridctx[level].r,VECMPI);CHKERRQ(ierr); 262 ierr = MGSetR(pc,level,gridctx[level].r);CHKERRQ(ierr); 263 264 if (level == 0){ 265 ierr = MGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr); 266 } else { 267 ierr = MGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr); 268 ierr = MGSetResidual(pc,level,MGDefaultResidual,gridctx[level].A);CHKERRQ(ierr); 269 if (level == fine_level){ 270 ierr = KSPSetOptionsPrefix(gridctx[level].ksp,"mg_fine_");CHKERRQ(ierr); 271 ierr = MGSetR(pc,level,gridctx[level].r);CHKERRQ(ierr); 272 } 273 } 274 ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr); 275 276 if (level < fine_level){ 277 if (size > 1){ 278 ierr = KSPGetPC(gridctx[level].ksp,&pc_coarse);CHKERRQ(ierr); 279 ierr = PCSetType(pc_coarse,PCNONE);CHKERRQ(ierr); 280 } 281 ierr = MGSetInterpolate(pc,level+1,gridctx[level].P);CHKERRQ(ierr); 282 ierr = MGSetRestriction(pc,level+1,gridctx[level].R);CHKERRQ(ierr); 283 } 284 level--; 285 } 286 287 /* now call PCSetUp_MG() */ 288 /*--------------------------------*/ 289 ierr = (*pc_ml->PCSetUp)(pc);CHKERRQ(ierr); 290 PetscFunctionReturn(0); 291 } 292 293 #undef __FUNCT__ 294 #define __FUNCT__ "PetscObjectContainerDestroy_PC_ML" 295 PetscErrorCode PetscObjectContainerDestroy_PC_ML(void *ptr) 296 { 297 PetscErrorCode ierr; 298 PC_ML *pc_ml = (PC_ML*)ptr; 299 PetscInt level; 300 301 PetscFunctionBegin; 302 if (pc_ml->size > 1){ierr = MatDestroy(pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr);} 303 ML_Aggregate_Destroy(&pc_ml->agg_object); 304 ML_Destroy(&pc_ml->ml_object); 305 306 ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); 307 ierr = VecDestroy(pc_ml->PetscMLdata->x);CHKERRQ(ierr); 308 ierr = VecDestroy(pc_ml->PetscMLdata->y);CHKERRQ(ierr); 309 ierr = PetscFree(pc_ml->PetscMLdata->cols);CHKERRQ(ierr); 310 ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); 311 312 level = pc_ml->fine_level; 313 while ( level>= 0 ){ 314 if (level != pc_ml->fine_level){ 315 ierr = MatDestroy(pc_ml->gridctx[level].A);CHKERRQ(ierr); 316 ierr = MatDestroy(pc_ml->gridctx[level].P);CHKERRQ(ierr); 317 ierr = MatDestroy(pc_ml->gridctx[level].R);CHKERRQ(ierr); 318 ierr = VecDestroy(pc_ml->gridctx[level].x);CHKERRQ(ierr); 319 ierr = VecDestroy(pc_ml->gridctx[level].b);CHKERRQ(ierr); 320 } 321 ierr = VecDestroy(pc_ml->gridctx[level].r);CHKERRQ(ierr); 322 level--; 323 } 324 ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); 325 ierr = PetscFree(pc_ml);CHKERRQ(ierr); 326 PetscFunctionReturn(0); 327 } 328 /* -------------------------------------------------------------------------- */ 329 /* 330 PCDestroy_ML - Destroys the private context for the ML preconditioner 331 that was created with PCCreate_ML(). 332 333 Input Parameter: 334 . pc - the preconditioner context 335 336 Application Interface Routine: PCDestroy() 337 */ 338 #undef __FUNCT__ 339 #define __FUNCT__ "PCDestroy_ML" 340 static PetscErrorCode PCDestroy_ML(PC pc) 341 { 342 PetscErrorCode ierr; 343 PC_ML *pc_ml=PETSC_NULL; 344 PetscObjectContainer container; 345 346 PetscFunctionBegin; 347 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 348 if (container) { 349 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 350 pc->ops->destroy = pc_ml->PCDestroy; 351 } else { 352 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 353 } 354 /* detach pc and PC_ML and dereference container */ 355 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",0);CHKERRQ(ierr); 356 ierr = (*pc->ops->destroy)(pc);CHKERRQ(ierr); 357 358 ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr); 359 PetscFunctionReturn(0); 360 } 361 362 #undef __FUNCT__ 363 #define __FUNCT__ "PCSetFromOptions_ML" 364 static PetscErrorCode PCSetFromOptions_ML(PC pc) 365 { 366 PetscErrorCode ierr; 367 PetscInt indx,m,PrintLevel,MaxNlevels,MaxCoarseSize; 368 PetscReal Threshold,DampingFactor; 369 PetscTruth flg; 370 const char *type[] = {"additive","multiplicative","full","cascade","kascade"}; 371 const char *scheme[] = {"Uncoupled","Coupled","MIS","METIS"}; 372 PC_ML *pc_ml=PETSC_NULL; 373 PetscObjectContainer container; 374 375 PetscFunctionBegin; 376 ierr = PetscObjectQuery((PetscObject)pc,"PC_ML",(PetscObject *)&container);CHKERRQ(ierr); 377 if (container) { 378 ierr = PetscObjectContainerGetPointer(container,(void **)&pc_ml);CHKERRQ(ierr); 379 } else { 380 SETERRQ(PETSC_ERR_ARG_NULL,"Container does not exit"); 381 } 382 ierr = PetscOptionsHead("MG options");CHKERRQ(ierr); 383 /* inherited MG options */ 384 ierr = PetscOptionsInt("-pc_mg_cycles","1 for V cycle, 2 for W-cycle","MGSetCycles",1,&m,&flg);CHKERRQ(ierr); 385 if (flg) { 386 ierr = MGSetCycles(pc,m);CHKERRQ(ierr); 387 } 388 ierr = PetscOptionsInt("-pc_mg_smoothup","Number of post-smoothing steps","MGSetNumberSmoothUp",1,&m,&flg);CHKERRQ(ierr); 389 if (flg) { 390 ierr = MGSetNumberSmoothUp(pc,m);CHKERRQ(ierr); 391 } 392 ierr = PetscOptionsInt("-pc_mg_smoothdown","Number of pre-smoothing steps","MGSetNumberSmoothDown",1,&m,&flg);CHKERRQ(ierr); 393 if (flg) { 394 ierr = MGSetNumberSmoothDown(pc,m);CHKERRQ(ierr); 395 } 396 ierr = PetscOptionsEList("-pc_mg_type","Multigrid type","MGSetType",type,5,type[1],&indx,&flg);CHKERRQ(ierr); 397 if (flg) { 398 MGType mg = (MGType) 0; 399 switch (indx) { 400 case 0: 401 mg = MGADDITIVE; 402 break; 403 case 1: 404 mg = MGMULTIPLICATIVE; 405 break; 406 case 2: 407 mg = MGFULL; 408 break; 409 case 3: 410 mg = MGKASKADE; 411 break; 412 case 4: 413 mg = MGKASKADE; 414 break; 415 } 416 ierr = MGSetType(pc,mg);CHKERRQ(ierr); 417 } 418 ierr = PetscOptionsTail();CHKERRQ(ierr); 419 420 /* ML options */ 421 ierr = PetscOptionsHead("ML options");CHKERRQ(ierr); 422 /* set defaults */ 423 PrintLevel = 0; 424 MaxNlevels = 10; 425 MaxCoarseSize = 1; 426 indx = 0; 427 Threshold = 0.0; 428 DampingFactor = 4.0/3.0; 429 430 ierr = PetscOptionsInt("-pc_ml_PrintLevel","Print level","ML_Set_PrintLevel",PrintLevel,&PrintLevel,PETSC_NULL);CHKERRQ(ierr); 431 ML_Set_PrintLevel(PrintLevel); 432 433 ierr = PetscOptionsInt("-pc_ml_maxNlevels","Maximum number of levels","None",MaxNlevels,&MaxNlevels,PETSC_NULL);CHKERRQ(ierr); 434 pc_ml->MaxNlevels = MaxNlevels; 435 436 ierr = PetscOptionsInt("-pc_ml_maxCoarseSize","Maximum coarsest mesh size","ML_Aggregate_Set_MaxCoarseSize",MaxCoarseSize,&MaxCoarseSize,PETSC_NULL);CHKERRQ(ierr); 437 pc_ml->MaxCoarseSize = MaxCoarseSize; 438 439 ierr = PetscOptionsEList("-pc_ml_CoarsenScheme","Aggregate Coarsen Scheme","ML_Aggregate_Set_CoarsenScheme_*",scheme,4,scheme[0],&indx,PETSC_NULL);CHKERRQ(ierr); 440 pc_ml->CoarsenScheme = indx; 441 442 ierr = PetscOptionsReal("-pc_ml_DampingFactor","P damping factor","ML_Aggregate_Set_DampingFactor",DampingFactor,&DampingFactor,PETSC_NULL);CHKERRQ(ierr); 443 pc_ml->DampingFactor = DampingFactor; 444 445 ierr = PetscOptionsReal("-pc_ml_Threshold","Smoother drop tol","ML_Aggregate_Set_Threshold",Threshold,&Threshold,PETSC_NULL);CHKERRQ(ierr); 446 pc_ml->Threshold = Threshold; 447 448 ierr = PetscOptionsLogical("-pc_ml_SpectralNormScheme_Anorm","Method used for estimating spectral radius","ML_Aggregate_Set_SpectralNormScheme_Anorm",PETSC_FALSE,&pc_ml->SpectralNormScheme_Anorm,PETSC_FALSE); 449 450 ierr = PetscOptionsTail();CHKERRQ(ierr); 451 PetscFunctionReturn(0); 452 } 453 454 /* -------------------------------------------------------------------------- */ 455 /* 456 PCCreate_ML - Creates a ML preconditioner context, PC_ML, 457 and sets this as the private data within the generic preconditioning 458 context, PC, that was created within PCCreate(). 459 460 Input Parameter: 461 . pc - the preconditioner context 462 463 Application Interface Routine: PCCreate() 464 */ 465 466 /*MC 467 PCML - Use geometric multigrid preconditioning. This preconditioner requires you provide 468 fine grid discretization matrix. The coarser grid matrices and restriction/interpolation 469 operators are computed by ML and wrapped as PETSc shell matrices. 470 471 Options Database Key: (not done yet!) 472 + -pc_mg_maxlevels <nlevels> - maximum number of levels including finest 473 . -pc_mg_cycles 1 or 2 - for V or W-cycle 474 . -pc_mg_smoothup <n> - number of smoothing steps after interpolation 475 . -pc_mg_smoothdown <n> - number of smoothing steps before applying restriction operator 476 . -pc_mg_type <additive,multiplicative,full,cascade> - multiplicative is the default 477 . -pc_mg_monitor - print information on the multigrid convergence 478 - -pc_mg_dump_matlab - dumps the matrices for each level and the restriction/interpolation matrices 479 to the Socket viewer for reading from Matlab. 480 481 Level: intermediate 482 483 Concepts: multigrid 484 485 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 486 MGSetLevels(), MGGetLevels(), MGSetType(), MPSetCycles(), MGSetNumberSmoothDown(), 487 MGSetNumberSmoothUp(), MGGetCoarseSolve(), MGSetResidual(), MGSetInterpolation(), 488 MGSetRestriction(), MGGetSmoother(), MGGetSmootherUp(), MGGetSmootherDown(), 489 MGSetCyclesOnLevel(), MGSetRhs(), MGSetX(), MGSetR() 490 M*/ 491 492 EXTERN_C_BEGIN 493 #undef __FUNCT__ 494 #define __FUNCT__ "PCCreate_ML" 495 PetscErrorCode PCCreate_ML(PC pc) 496 { 497 PetscErrorCode ierr; 498 PC_ML *pc_ml; 499 PetscObjectContainer container; 500 501 PetscFunctionBegin; 502 /* initialize pc as PCMG */ 503 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 504 505 /* create a supporting struct and attach it to pc */ 506 ierr = PetscNew(PC_ML,&pc_ml);CHKERRQ(ierr); 507 ierr = PetscMemzero(pc_ml,sizeof(PC_ML));CHKERRQ(ierr); 508 ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr); 509 ierr = PetscObjectContainerSetPointer(container,pc_ml);CHKERRQ(ierr); 510 ierr = PetscObjectContainerSetUserDestroy(container,PetscObjectContainerDestroy_PC_ML);CHKERRQ(ierr); 511 ierr = PetscObjectCompose((PetscObject)pc,"PC_ML",(PetscObject)container);CHKERRQ(ierr); 512 513 pc_ml->PCSetUp = pc->ops->setup; 514 pc_ml->PCDestroy = pc->ops->destroy; 515 516 /* overwrite the pointers of PCMG by the functions of PCML */ 517 pc->ops->setfromoptions = PCSetFromOptions_ML; 518 pc->ops->setup = PCSetUp_ML; 519 pc->ops->destroy = PCDestroy_ML; 520 PetscFunctionReturn(0); 521 } 522 EXTERN_C_END 523 524 int PetscML_getrow(void *ML_data, int N_requested_rows, int requested_rows[], 525 int allocated_space, int columns[], double values[], int row_lengths[]) 526 { 527 PetscErrorCode ierr; 528 Mat Aloc; 529 Mat_SeqAIJ *a; 530 PetscInt m,i,j,k=0,row,*aj; 531 PetscScalar *aa; 532 FineGridCtx *ml=(FineGridCtx*)ML_data; 533 534 Aloc = ml->Aloc; 535 a = (Mat_SeqAIJ*)Aloc->data; 536 ierr = MatGetSize(Aloc,&m,PETSC_NULL);CHKERRQ(ierr); 537 538 for (i = 0; i<N_requested_rows; i++) { 539 row = requested_rows[i]; 540 row_lengths[i] = a->ilen[row]; 541 if (allocated_space < k+row_lengths[i]) return(0); 542 if ( (row >= 0) || (row <= (m-1)) ) { 543 aj = a->j + a->i[row]; 544 aa = a->a + a->i[row]; 545 for (j=0; j<row_lengths[i]; j++){ 546 columns[k] = aj[j]; 547 values[k++] = aa[j]; 548 } 549 } 550 } 551 return(1); 552 } 553 554 int PetscML_matvec(void *ML_data,int in_length,double p[],int out_length,double ap[]) 555 { 556 PetscErrorCode ierr; 557 FineGridCtx *ml=(FineGridCtx*)ML_data; 558 Mat A=ml->A, Aloc=ml->Aloc; 559 PetscMPIInt size; 560 PetscScalar *pwork=ml->pwork; 561 PetscInt i; 562 563 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 564 if (size == 1){ 565 ierr = VecPlaceArray(ml->x,p);CHKERRQ(ierr); 566 } else { 567 for (i=0; i<in_length; i++) pwork[i] = p[i]; 568 PetscML_comm(pwork,ml); 569 ierr = VecPlaceArray(ml->x,pwork);CHKERRQ(ierr); 570 } 571 ierr = VecPlaceArray(ml->y,ap);CHKERRQ(ierr); 572 ierr = MatMult(Aloc,ml->x,ml->y);CHKERRQ(ierr); 573 return 0; 574 } 575 576 int PetscML_comm(double p[],void *ML_data) 577 { 578 PetscErrorCode ierr; 579 FineGridCtx *ml=(FineGridCtx*)ML_data; 580 Mat A=ml->A; 581 Mat_MPIAIJ *a = (Mat_MPIAIJ*)A->data; 582 PetscMPIInt size; 583 PetscInt i,in_length=A->m,out_length=ml->Aloc->n; 584 PetscScalar *array; 585 586 ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr); 587 if (size == 1) return 0; 588 589 ierr = VecPlaceArray(ml->y,p);CHKERRQ(ierr); 590 ierr = VecScatterBegin(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 591 ierr = VecScatterEnd(ml->y,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr); 592 ierr = VecGetArray(a->lvec,&array);CHKERRQ(ierr); 593 for (i=in_length; i<out_length; i++){ 594 p[i] = array[i-in_length]; 595 } 596 return 0; 597 } 598 #undef __FUNCT__ 599 #define __FUNCT__ "MatMult_ML" 600 PetscErrorCode MatMult_ML(Mat A,Vec x,Vec y) 601 { 602 PetscErrorCode ierr; 603 Mat_MLShell *shell; 604 PetscScalar *xarray,*yarray; 605 PetscInt x_length,y_length; 606 607 PetscFunctionBegin; 608 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 609 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 610 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 611 x_length = shell->mlmat->invec_leng; 612 y_length = shell->mlmat->outvec_leng; 613 614 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 615 616 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 617 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 618 PetscFunctionReturn(0); 619 } 620 /* MatMultAdd_ML - Compute y = w + A*x */ 621 #undef __FUNCT__ 622 #define __FUNCT__ "MatMultAdd_ML" 623 PetscErrorCode MatMultAdd_ML(Mat A,Vec x,Vec w,Vec y) 624 { 625 PetscErrorCode ierr; 626 Mat_MLShell *shell; 627 PetscScalar *xarray,*yarray; 628 const PetscScalar one=1.0; 629 PetscInt x_length,y_length; 630 631 PetscFunctionBegin; 632 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 633 ierr = VecGetArray(x,&xarray);CHKERRQ(ierr); 634 ierr = VecGetArray(y,&yarray);CHKERRQ(ierr); 635 636 x_length = shell->mlmat->invec_leng; 637 y_length = shell->mlmat->outvec_leng; 638 639 ML_Operator_Apply(shell->mlmat,x_length,xarray,y_length,yarray); 640 641 ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr); 642 ierr = VecRestoreArray(y,&yarray);CHKERRQ(ierr); 643 ierr = VecAXPY(&one,w,y);CHKERRQ(ierr); 644 645 PetscFunctionReturn(0); 646 } 647 648 /* newtype is ignored because "ml" is not listed under Petsc MatType yet */ 649 #undef __FUNCT__ 650 #define __FUNCT__ "MatConvert_MPIAIJ_ML" 651 PetscErrorCode MatConvert_MPIAIJ_ML(Mat A,const MatType newtype,Mat *Aloc) 652 { 653 PetscErrorCode ierr; 654 Mat_MPIAIJ *mpimat=(Mat_MPIAIJ*)A->data; 655 Mat_SeqAIJ *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data; 656 PetscInt *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 657 PetscScalar *aa=a->a,*ba=b->a,*ca; 658 PetscInt am=A->m,an=A->n,i,j,k; 659 PetscInt *ci,*cj,ncols; 660 MatReuse scall=MAT_INITIAL_MATRIX; 661 662 PetscFunctionBegin; 663 if (am != an) SETERRQ2(PETSC_ERR_ARG_WRONG,"A must have a square diagonal portion, am: %d != an: %d",am,an); 664 665 if (*Aloc) scall = MAT_REUSE_MATRIX; 666 if (scall == MAT_INITIAL_MATRIX){ 667 ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr); 668 ci[0] = 0; 669 for (i=0; i<am; i++){ 670 ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]); 671 } 672 ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr); 673 ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr); 674 675 k = 0; 676 for (i=0; i<am; i++){ 677 /* diagonal portion of A */ 678 ncols = ai[i+1] - ai[i]; 679 for (j=0; j<ncols; j++) { 680 cj[k] = *aj++; 681 ca[k++] = *aa++; 682 } 683 /* off-diagonal portion of A */ 684 ncols = bi[i+1] - bi[i]; 685 for (j=0; j<ncols; j++) { 686 cj[k] = an + (*bj); bj++; 687 ca[k++] = *ba++; 688 } 689 } 690 if (k != ci[am]) SETERRQ2(PETSC_ERR_ARG_WRONG,"k: %d != ci[am]: %d",k,ci[am]); 691 692 /* put together the new matrix */ 693 an = mpimat->A->n+mpimat->B->n; 694 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,an,ci,cj,ca,Aloc);CHKERRQ(ierr); 695 696 /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */ 697 /* Since these are PETSc arrays, change flags to free them as necessary. */ 698 mat = (Mat_SeqAIJ*)(*Aloc)->data; 699 mat->freedata = PETSC_TRUE; 700 mat->nonew = 0; 701 } else if (scall == MAT_REUSE_MATRIX){ 702 mat=(Mat_SeqAIJ*)(*Aloc)->data; 703 ci = mat->i; cj = mat->j; ca = mat->a; 704 for (i=0; i<am; i++) { 705 /* diagonal portion of A */ 706 ncols = ai[i+1] - ai[i]; 707 for (j=0; j<ncols; j++) *ca++ = *aa++; 708 /* off-diagonal portion of A */ 709 ncols = bi[i+1] - bi[i]; 710 for (j=0; j<ncols; j++) *ca++ = *ba++; 711 } 712 } else { 713 SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall); 714 } 715 PetscFunctionReturn(0); 716 } 717 extern PetscErrorCode MatDestroy_Shell(Mat); 718 #undef __FUNCT__ 719 #define __FUNCT__ "MatDestroy_ML" 720 PetscErrorCode MatDestroy_ML(Mat A) 721 { 722 PetscErrorCode ierr; 723 Mat_MLShell *shell; 724 725 PetscFunctionBegin; 726 ierr = MatShellGetContext(A,(void *)&shell);CHKERRQ(ierr); 727 ierr = VecDestroy(shell->y);CHKERRQ(ierr); 728 ierr = PetscFree(shell);CHKERRQ(ierr); 729 ierr = MatDestroy_Shell(A);CHKERRQ(ierr); 730 PetscFunctionReturn(0); 731 } 732 733 extern PetscErrorCode PetscSortIntWithScalarArray(PetscInt,PetscInt [],PetscScalar []); 734 #undef __FUNCT__ 735 #define __FUNCT__ "MatConvert_ML_SeqAIJ" 736 PetscErrorCode MatConvert_ML_SeqAIJ(ML_Operator *mlmat,Mat *newmat) 737 { 738 struct ML_CSR_MSRdata *matdata = (struct ML_CSR_MSRdata *)mlmat->data; 739 PetscErrorCode ierr; 740 PetscInt m=mlmat->outvec_leng,n=mlmat->invec_leng,nnz[m],nz_max; 741 PetscInt *ml_cols=matdata->columns,*aj,i,j,k; 742 PetscScalar *ml_vals=matdata->values,*aa; 743 744 PetscFunctionBegin; 745 if ( mlmat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mlmat->getrow = NULL"); 746 if (m != n){ /* pass array pointers if ml->mlmat is Pmat or Rmat */ 747 ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,matdata->rowptr,ml_cols,ml_vals,newmat);CHKERRQ(ierr); 748 PetscFunctionReturn(0); 749 } 750 751 /* ML Amat is in MSR format. Copy its data into SeqAIJ matrix */ 752 ierr = MatCreate(PETSC_COMM_SELF,m,n,PETSC_DECIDE,PETSC_DECIDE,newmat);CHKERRQ(ierr); 753 ierr = MatSetType(*newmat,MATSEQAIJ);CHKERRQ(ierr); 754 755 nz_max = 0; 756 for (i=0; i<m; i++) { 757 nnz[i] = ml_cols[i+1] - ml_cols[i] + 1; 758 if (nnz[i] > nz_max) nz_max = nnz[i]; 759 } 760 ierr = MatSeqAIJSetPreallocation(*newmat,0,nnz);CHKERRQ(ierr); 761 762 nz_max++; 763 ierr = PetscMalloc(nz_max*(sizeof(int)+sizeof(PetscScalar)),&aj);CHKERRQ(ierr); 764 aa = (PetscScalar*)(aj + nz_max); 765 766 for (i=0; i<m; i++){ 767 k = 0; 768 /* diagonal entry */ 769 aj[k] = i; aa[k++] = ml_vals[i]; 770 /* off diagonal entries */ 771 for (j=ml_cols[i]; j<ml_cols[i+1]; j++){ 772 aj[k] = ml_cols[j]; aa[k++] = ml_vals[j]; 773 } 774 /* sort aj and aa ??? */ 775 ierr = PetscSortIntWithScalarArray(nnz[i],aj,aa);CHKERRQ(ierr); 776 ierr = MatSetValues(*newmat,1,&i,nnz[i],aj,aa,INSERT_VALUES);CHKERRQ(ierr); 777 } 778 ierr = MatAssemblyBegin(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 779 ierr = MatAssemblyEnd(*newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 780 ierr = PetscFree(aj);CHKERRQ(ierr); 781 PetscFunctionReturn(0); 782 } 783 784 #undef __FUNCT__ 785 #define __FUNCT__ "MatConvert_ML_SHELL" 786 PetscErrorCode MatConvert_ML_SHELL(ML_Operator *mlmat,Mat *newmat) 787 { 788 PetscErrorCode ierr; 789 PetscInt m,n; 790 ML_Comm *MLcomm; 791 Mat_MLShell *shellctx; 792 793 PetscFunctionBegin; 794 m = mlmat->outvec_leng; 795 n = mlmat->invec_leng; 796 if (!m || !n){ 797 newmat = PETSC_NULL; 798 } else { 799 MLcomm = mlmat->comm; 800 ierr = PetscNew(Mat_MLShell,&shellctx);CHKERRQ(ierr); 801 ierr = PetscMemzero(shellctx,sizeof(Mat_MLShell));CHKERRQ(ierr); 802 ierr = MatCreateShell(MLcomm->USR_comm,m,n,PETSC_DETERMINE,PETSC_DETERMINE,shellctx,newmat);CHKERRQ(ierr); 803 ierr = MatShellSetOperation(*newmat,MATOP_MULT,(void *)MatMult_ML);CHKERRQ(ierr); 804 ierr = MatShellSetOperation(*newmat,MATOP_MULT_ADD,(void *)MatMultAdd_ML);CHKERRQ(ierr); 805 shellctx->A = *newmat; 806 shellctx->mlmat = mlmat; 807 ierr = VecCreate(PETSC_COMM_WORLD,&shellctx->y);CHKERRQ(ierr); 808 ierr = VecSetSizes(shellctx->y,m,PETSC_DECIDE);CHKERRQ(ierr); 809 ierr = VecSetFromOptions(shellctx->y);CHKERRQ(ierr); 810 (*newmat)->ops->destroy = MatDestroy_ML; 811 } 812 PetscFunctionReturn(0); 813 } 814 815 #undef __FUNCT__ 816 #define __FUNCT__ "MatConvert_ML_MPIAIJ" 817 PetscErrorCode MatConvert_ML_MPIAIJ(FineGridCtx *ml,Mat *newmat) 818 { 819 PetscErrorCode ierr; 820 ML_Operator *mat=ml->mlmat; 821 PetscInt i,j,*cols=ml->cols,cstart,cend,rlen_max,*gordering; 822 PetscInt m=mat->outvec_leng,n,nnzA[m],nnzB[m],nnz[m],nz_max,row,col,*cols_tmp; 823 PetscScalar *vals=ml->vals; 824 Mat C; 825 Mat_MPIAIJ *c; 826 PetscMPIInt rank; 827 828 PetscFunctionBegin; 829 if ( mat->getrow == NULL) SETERRQ(PETSC_ERR_ARG_NULL,"mat->getrow = NULL"); 830 831 ML_build_global_numbering(mat,mat->comm,&gordering); 832 ierr = MPI_Comm_rank(ml->A->comm,&rank);CHKERRQ(ierr); 833 n = mat->invec_leng; 834 /* ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] m: %d, %d; n: %d,\n",rank,m,mat->getrow->Nrows,n);CHKERRQ(ierr);*/ 835 if (m != n) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"m %d must equal to n %d",m,n); 836 837 ierr = MatCreate(ml->A->comm,m,n,PETSC_DECIDE,PETSC_DECIDE,&C);CHKERRQ(ierr); 838 ierr = MatSetType(C,MATMPIAIJ);CHKERRQ(ierr); 839 c = (Mat_MPIAIJ*)C->data; 840 cstart = c->cstart; 841 cend = c->cend; 842 rlen_max = C->N; 843 /* ierr = PetscPrintf(PETSC_COMM_SELF,"[%d] cstart/end: %d %d, C->N: %d\n",rank,cstart,cend,C->N); */ 844 845 ierr = PetscMalloc(nz_max*sizeof(int),&cols_tmp);CHKERRQ(ierr); 846 847 nz_max = 0; 848 for (i=0; i<m; i++){ 849 ML_get_matrix_row(mat,1,&i,&rlen_max,&cols,&vals,&nnz[i],0); 850 if (nz_max < nnz[i]) nz_max = nnz[i]; 851 nnzA[i] = 0; 852 for (j=0; j<nnz[i]; j++){ 853 col = gordering[cols[j]]; 854 if (cstart <= col && col < cend ) nnzA[i]++; 855 } 856 nnzB[i] = nnz[i] - nnzA[i]; 857 } 858 ierr = MatMPIAIJSetPreallocation(C,0,nnzA,0,nnzB);CHKERRQ(ierr); 859 860 /* insert values -- remap row and column indices */ 861 for (i=0; i<m; i++){ 862 ML_get_matrix_row(mat,1,&i,&rlen_max,&cols_tmp,&vals,&nnz[i],0); 863 for (j=0; j<nnz[i]; j++){ 864 cols[j] = gordering[cols_tmp[j]]; 865 } 866 row = gordering[i]; 867 ierr = MatSetValues(C,1,&row,nnz[i],cols,ml->vals,INSERT_VALUES);CHKERRQ(ierr); 868 } 869 ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 870 ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 871 *newmat = C; 872 873 ierr = PetscFree(cols_tmp);CHKERRQ(ierr); 874 PetscFunctionReturn(0); 875 } 876 877 /* -----------------------------------------------------------------------*/ 878 #define SWAP2IntScalar(a,b,c,d,t,ts) {t=a;a=b;b=t;ts=c;c=d;d=ts;} 879 880 #undef __FUNCT__ 881 #define __FUNCT__ "PetscSortIntWithScalarArray_Private" 882 /* 883 A simple version of quicksort; taken from Kernighan and Ritchie, page 87. 884 Assumes 0 origin for v, number of elements = right+1 (right is index of 885 right-most member). 886 */ 887 static PetscErrorCode PetscSortIntWithScalarArray_Private(PetscInt *v,PetscScalar *V,PetscInt right) 888 { 889 PetscErrorCode ierr; 890 PetscInt i,vl,last,tmp; 891 PetscScalar stmp; 892 893 PetscFunctionBegin; 894 if (right <= 1) { 895 if (right == 1) { 896 if (v[0] > v[1]) SWAP2IntScalar(v[0],v[1],V[0],V[1],tmp,stmp); 897 } 898 PetscFunctionReturn(0); 899 } 900 SWAP2IntScalar(v[0],v[right/2],V[0],V[right/2],tmp,stmp); 901 vl = v[0]; 902 last = 0; 903 for (i=1; i<=right; i++) { 904 if (v[i] < vl) {last++; SWAP2IntScalar(v[last],v[i],V[last],V[i],tmp,stmp);} 905 } 906 SWAP2IntScalar(v[0],v[last],V[0],V[last],tmp,stmp); 907 ierr = PetscSortIntWithScalarArray_Private(v,V,last-1);CHKERRQ(ierr); 908 ierr = PetscSortIntWithScalarArray_Private(v+last+1,V+last+1,right-(last+1));CHKERRQ(ierr); 909 PetscFunctionReturn(0); 910 } 911 912 #undef __FUNCT__ 913 #define __FUNCT__ "PetscSortIntWithScalarArray" 914 /*@ 915 PetscSortIntWithScalarArray - Sorts an array of integers in place in increasing order; 916 changes a second array to match the sorted first array. 917 918 Not Collective 919 920 Input Parameters: 921 + n - number of values 922 . i - array of integers 923 - I - second array of integers 924 925 Level: intermediate 926 927 Concepts: sorting^ints with array 928 929 .seealso: PetscSortReal(), PetscSortIntPermutation(), PetscSortInt() 930 @*/ 931 PetscErrorCode PetscSortIntWithScalarArray(PetscInt n,PetscInt i[],PetscScalar I[]) 932 { 933 PetscErrorCode ierr; 934 PetscInt j,k,tmp,ik; 935 PetscScalar stmp; 936 937 PetscFunctionBegin; 938 if (n<8) { 939 for (k=0; k<n; k++) { 940 ik = i[k]; 941 for (j=k+1; j<n; j++) { 942 if (ik > i[j]) { 943 SWAP2IntScalar(i[k],i[j],I[k],I[j],tmp,stmp); 944 ik = i[k]; 945 } 946 } 947 } 948 } else { 949 ierr = PetscSortIntWithScalarArray_Private(i,I,n-1);CHKERRQ(ierr); 950 } 951 PetscFunctionReturn(0); 952 } 953