1 /* 2 GAMG geometric-algebric multiogrid PC - Mark Adams 2011 3 */ 4 #include <../src/ksp/pc/impls/gamg/gamg.h> 5 6 PetscLogEvent gamg_setup_stages[NUM_SET]; 7 8 /* Private context for the GAMG preconditioner */ 9 typedef struct gamg_TAG { 10 PetscInt m_dim; 11 PetscInt m_Nlevels; 12 PetscInt m_data_sz; 13 PetscInt m_data_rows; 14 PetscInt m_data_cols; 15 PetscBool m_useSA; 16 PetscReal *m_data; /* blocked vector of vertex data on fine grid (coordinates) */ 17 } PC_GAMG; 18 19 /* -------------------------------------------------------------------------- */ 20 /* 21 PCSetCoordinates_GAMG 22 23 Input Parameter: 24 . pc - the preconditioner context 25 */ 26 EXTERN_C_BEGIN 27 #undef __FUNCT__ 28 #define __FUNCT__ "PCSetCoordinates_GAMG" 29 PetscErrorCode PCSetCoordinates_GAMG( PC a_pc, PetscInt a_ndm, PetscReal *a_coords ) 30 { 31 PC_MG *mg = (PC_MG*)a_pc->data; 32 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 33 PetscErrorCode ierr; 34 PetscInt arrsz,bs,my0,kk,ii,jj,nloc,Iend; 35 Mat Amat = a_pc->pmat; 36 PetscBool flag; 37 char str[64]; 38 39 PetscFunctionBegin; 40 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ( ierr ); 41 ierr = MatGetOwnershipRange( Amat, &my0, &Iend ); CHKERRQ(ierr); 42 nloc = (Iend-my0)/bs; 43 if((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 44 45 ierr = PetscOptionsGetString(PETSC_NULL,"-pc_gamg_type",str,64,&flag); CHKERRQ( ierr ); 46 pc_gamg->m_useSA = (PetscBool)(flag && strcmp(str,"sa") == 0); 47 48 pc_gamg->m_data_rows = 1; 49 if(a_coords == 0) pc_gamg->m_useSA = PETSC_TRUE; /* use SA if no data */ 50 if( !pc_gamg->m_useSA ) pc_gamg->m_data_cols = a_ndm; /* coordinates */ 51 else{ /* SA: null space vectors */ 52 if(a_coords != 0 && bs==1 ) pc_gamg->m_data_cols = 1; /* scalar w/ coords and SA (not needed) */ 53 else if(a_coords != 0) pc_gamg->m_data_cols = (a_ndm==2 ? 3 : 6); /* elasticity */ 54 else pc_gamg->m_data_cols = bs; /* no data, force SA with constant null space vectors */ 55 pc_gamg->m_data_rows = bs; 56 } 57 arrsz = nloc*pc_gamg->m_data_rows*pc_gamg->m_data_cols; 58 59 /* create data - syntactic sugar that should be refactored at some point */ 60 if (!pc_gamg->m_data || (pc_gamg->m_data_sz != arrsz)) { 61 ierr = PetscFree( pc_gamg->m_data ); CHKERRQ(ierr); 62 ierr = PetscMalloc((arrsz+1)*sizeof(double), &pc_gamg->m_data ); CHKERRQ(ierr); 63 } 64 for(kk=0;kk<arrsz;kk++)pc_gamg->m_data[kk] = -999.; 65 pc_gamg->m_data[arrsz] = -99.; 66 /* copy data in - column oriented */ 67 if( pc_gamg->m_useSA ) { 68 const PetscInt M = Iend - my0; 69 for(kk=0;kk<nloc;kk++){ 70 PetscReal *data = &pc_gamg->m_data[kk*bs]; 71 if( pc_gamg->m_data_cols==1 ) *data = 1.0; 72 else { 73 for(ii=0;ii<bs;ii++) 74 for(jj=0;jj<bs;jj++) 75 if(ii==jj)data[ii*M + jj] = 1.0; /* translational modes */ 76 else data[ii*M + jj] = 0.0; 77 if( a_coords != 0 ) { 78 if( a_ndm == 2 ){ /* rotational modes */ 79 data += 2*M; 80 data[0] = -a_coords[2*kk+1]; 81 data[1] = a_coords[2*kk]; 82 } 83 else { 84 data += 3*M; 85 data[0] = 0.0; data[M+0] = a_coords[3*kk+2]; data[2*M+0] = -a_coords[3*kk+1]; 86 data[1] = -a_coords[3*kk+2]; data[M+1] = 0.0; data[2*M+1] = a_coords[3*kk]; 87 data[2] = a_coords[3*kk+1]; data[M+2] = -a_coords[3*kk]; data[2*M+2] = 0.0; 88 } 89 } 90 } 91 } 92 } 93 else { 94 for( kk = 0 ; kk < nloc ; kk++ ){ 95 for( ii = 0 ; ii < a_ndm ; ii++ ) { 96 pc_gamg->m_data[ii*nloc + kk] = a_coords[kk*a_ndm + ii]; 97 } 98 } 99 } 100 assert(pc_gamg->m_data[arrsz] == -99.); 101 102 pc_gamg->m_data_sz = arrsz; 103 pc_gamg->m_dim = a_ndm; 104 105 PetscFunctionReturn(0); 106 } 107 EXTERN_C_END 108 109 110 /* -----------------------------------------------------------------------------*/ 111 #undef __FUNCT__ 112 #define __FUNCT__ "PCReset_GAMG" 113 PetscErrorCode PCReset_GAMG(PC pc) 114 { 115 PetscErrorCode ierr; 116 PC_MG *mg = (PC_MG*)pc->data; 117 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 118 119 PetscFunctionBegin; 120 ierr = PetscFree(pc_gamg->m_data);CHKERRQ(ierr); 121 pc_gamg->m_data = 0; pc_gamg->m_data_sz = 0; 122 PetscFunctionReturn(0); 123 } 124 125 /* -------------------------------------------------------------------------- */ 126 /* 127 partitionLevel 128 129 Input Parameter: 130 . a_Amat_fine - matrix on this fine (k) level 131 . a_ndata_rows - size of data to move (coarse grid) 132 . a_ndata_cols - size of data to move (coarse grid) 133 In/Output Parameter: 134 . a_P_inout - prolongation operator to the next level (k-1) 135 . a_coarse_data - data that need to be moved 136 . a_nactive_proc - number of active procs 137 Output Parameter: 138 . a_Amat_crs - coarse matrix that is created (k-1) 139 */ 140 141 #undef __FUNCT__ 142 #define __FUNCT__ "partitionLevel" 143 PetscErrorCode partitionLevel( Mat a_Amat_fine, 144 PetscInt a_ndata_rows, 145 PetscInt a_ndata_cols, 146 PetscInt a_cbs, 147 Mat *a_P_inout, 148 PetscReal **a_coarse_data, 149 PetscMPIInt *a_nactive_proc, 150 Mat *a_Amat_crs 151 ) 152 { 153 PetscErrorCode ierr; 154 Mat Cmat,Pnew,Pold=*a_P_inout; 155 IS new_indices,isnum; 156 MPI_Comm wcomm = ((PetscObject)a_Amat_fine)->comm; 157 PetscMPIInt mype,npe; 158 PetscInt neq,NN,Istart,Iend,Istart0,Iend0,ncrs0,ncrs_new,fbs; 159 PetscMPIInt new_npe,targ_npe; 160 PetscBool flag = PETSC_FALSE; 161 162 PetscFunctionBegin; 163 ierr = MPI_Comm_rank( wcomm, &mype ); CHKERRQ(ierr); 164 ierr = MPI_Comm_size( wcomm, &npe ); CHKERRQ(ierr); 165 ierr = MatGetBlockSize( a_Amat_fine, &fbs ); CHKERRQ(ierr); 166 /* RAP */ 167 ierr = MatPtAP( a_Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat ); CHKERRQ(ierr); 168 169 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 170 171 ierr = MatGetOwnershipRange( Cmat, &Istart0, &Iend0 ); CHKERRQ(ierr); 172 ncrs0 = (Iend0-Istart0)/a_cbs; assert((Iend0-Istart0)%a_cbs == 0); 173 174 /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates accordingly */ 175 ierr = MatGetSize( Cmat, &neq, &NN );CHKERRQ(ierr); 176 #define MIN_EQ_PROC 100 177 targ_npe = neq/MIN_EQ_PROC; /* hardwire min. number of eq/proc */ 178 #define TOP_GRID_LIM 1000 179 if( targ_npe == 0 || neq < TOP_GRID_LIM ) { 180 new_npe = 1; /* output for next level */ 181 } 182 else if (targ_npe >= *a_nactive_proc ) new_npe = *a_nactive_proc; /* no change */ 183 else{ 184 PetscMPIInt factstart,fact,nactivepe=*a_nactive_proc; 185 factstart = nactivepe; 186 new_npe = -9999; /*???*/ 187 for(fact=factstart;fact>0;fact--){ /* try to find a better number of procs */ 188 if( nactivepe%fact==0 && neq/(nactivepe/fact) > MIN_EQ_PROC ) { 189 new_npe = nactivepe/fact; 190 } 191 } 192 if( new_npe == -9999 ){ 193 new_npe = *a_nactive_proc; /*???*/ 194 PetscPrintf(PETSC_COMM_WORLD,"[%d]%s warning: failed to find reduced PE number, targ_npe=%d\n",mype,__FUNCT__,targ_npe); 195 } 196 } 197 *a_nactive_proc = new_npe; /* this is a nominal value -- output */ 198 PetscPrintf(PETSC_COMM_WORLD,"[%d]%s new_npe=%d neq=%d\n",mype,__FUNCT__,new_npe,neq); 199 ierr = PetscOptionsHasName(PETSC_NULL,"-pc_gamg_avoid_repartitioning",&flag); 200 CHKERRQ( ierr ); 201 if( !flag ) { /* re-partition */ 202 MatPartitioning mpart; 203 Mat adj; 204 const PetscInt *is_idx; 205 PetscInt is_sz,kk,jj,ii, *isnewproc_idx; 206 /* create sub communicator */ 207 MPI_Comm cm,new_comm; 208 IS isnewproc; 209 MPI_Group wg, g2; 210 PetscMPIInt ranks[npe],counts[npe],nc=ncrs0; 211 212 /* if( ncrs0 == 0 ) { */ 213 /* if(mype==0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"zero local nodes on root -- increase min"); */ 214 /* } */ 215 ierr = MPI_Allgather( &nc, 1, MPI_INT, counts, 1, MPI_INT, wcomm ); CHKERRQ(ierr); 216 assert(counts[mype]==ncrs0); 217 /* count real active pes */ 218 for( new_npe = jj = 0 ; jj < npe ; jj++) { 219 if( counts[jj] != 0 ) { 220 ranks[new_npe++] = jj; 221 } 222 } 223 ierr = MPI_Comm_group( wcomm, &wg ); CHKERRQ(ierr); 224 ierr = MPI_Group_incl( wg, new_npe, ranks, &g2 ); CHKERRQ(ierr); 225 ierr = MPI_Comm_create( wcomm, g2, &cm ); CHKERRQ(ierr); 226 if( cm != MPI_COMM_NULL ) { 227 ierr = PetscCommDuplicate( cm, &new_comm, PETSC_NULL ); CHKERRQ(ierr); 228 ierr = MPI_Comm_free( &cm ); CHKERRQ(ierr); 229 } 230 ierr = MPI_Group_free( &wg ); CHKERRQ(ierr); 231 ierr = MPI_Group_free( &g2 ); CHKERRQ(ierr); 232 233 /* MatPartitioningApply call MatConvert, which is collective */ 234 ierr = PetscLogEventBegin(gamg_setup_stages[SET12],0,0,0,0);CHKERRQ(ierr); 235 if( a_cbs == 1) { 236 ierr = MatConvert( Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 237 } 238 else{ 239 /* make a scalar matrix to partition */ 240 Mat tMat; 241 PetscInt Ii,ncols; const PetscScalar *vals; const PetscInt *idx; 242 MatInfo info; 243 ierr = MatGetInfo(Cmat,MAT_LOCAL,&info); CHKERRQ(ierr); 244 ncols = (PetscInt)info.nz_used/((ncrs0+1)*a_cbs*a_cbs)+1; 245 246 ierr = MatCreateMPIAIJ( wcomm, ncrs0, ncrs0, 247 PETSC_DETERMINE, PETSC_DETERMINE, 248 2*ncols, PETSC_NULL, ncols, PETSC_NULL, 249 &tMat ); 250 CHKERRQ(ierr); 251 252 for ( Ii = Istart0; Ii < Iend0; Ii++ ) { 253 PetscInt dest_row = Ii/a_cbs; 254 ierr = MatGetRow(Cmat,Ii,&ncols,&idx,&vals); CHKERRQ(ierr); 255 for( jj = 0 ; jj < ncols ; jj++ ){ 256 PetscInt dest_col = idx[jj]/a_cbs; 257 PetscScalar v = 1.0; 258 ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES); CHKERRQ(ierr); 259 } 260 ierr = MatRestoreRow(Cmat,Ii,&ncols,&idx,&vals); CHKERRQ(ierr); 261 } 262 ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 263 ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 264 265 ierr = MatConvert( tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 266 267 ierr = MatDestroy( &tMat ); CHKERRQ(ierr); 268 } 269 if( ncrs0 != 0 ){ 270 if(new_comm==MPI_COMM_NULL)SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"new_comm == MPI_COMM_NULL"); 271 /* if(ncrs0==0)SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"zero local nodes -- increase min"); */ 272 /* hack to fix global data that pmetis.c uses in 'adj' */ 273 for(new_npe=jj=0 ; jj<npe ; jj++) { 274 if( counts[jj] != 0 ) { 275 adj->rmap->range[new_npe++] = adj->rmap->range[jj]; 276 } 277 } 278 ierr = MatPartitioningCreate( new_comm, &mpart ); CHKERRQ(ierr); 279 ierr = MatPartitioningSetAdjacency( mpart, adj ); CHKERRQ(ierr); 280 ierr = MatPartitioningSetFromOptions( mpart ); CHKERRQ(ierr); 281 ierr = MatPartitioningSetNParts( mpart, new_npe ); CHKERRQ(ierr); 282 ierr = MatPartitioningApply( mpart, &isnewproc ); CHKERRQ(ierr); 283 ierr = MatPartitioningDestroy( &mpart ); CHKERRQ(ierr); 284 /* collect IS info */ 285 ierr = ISGetLocalSize( isnewproc, &is_sz ); CHKERRQ(ierr); 286 ierr = PetscMalloc( a_cbs*is_sz*sizeof(PetscInt), &isnewproc_idx ); CHKERRQ(ierr); 287 ierr = ISGetIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 288 /* spread partitioning across machine - probably the right thing to do but machine spec. */ 289 for(kk=0,jj=0;kk<is_sz;kk++){ 290 for(ii=0 ; ii<a_cbs ; ii++, jj++ ) { 291 isnewproc_idx[jj] = is_idx[kk]; /* distribution */ 292 } 293 } 294 ierr = ISRestoreIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 295 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 296 is_sz *= a_cbs; 297 298 ierr = MPI_Comm_free( &new_comm ); CHKERRQ(ierr); 299 } 300 else{ 301 isnewproc_idx = 0; 302 is_sz = 0; 303 } 304 ierr = MatDestroy( &adj ); CHKERRQ(ierr); 305 ierr = ISCreateGeneral( wcomm, is_sz, isnewproc_idx, PETSC_COPY_VALUES, &isnewproc ); 306 if( isnewproc_idx != 0 ) { 307 ierr = PetscFree( isnewproc_idx ); CHKERRQ(ierr); 308 } 309 310 /* 311 Create an index set from the isnewproc index set to indicate the mapping TO 312 */ 313 ierr = ISPartitioningToNumbering( isnewproc, &isnum ); CHKERRQ(ierr); 314 /* 315 Determine how many elements are assigned to each processor 316 */ 317 ierr = ISPartitioningCount( isnewproc, npe, counts ); CHKERRQ(ierr); 318 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 319 ncrs_new = counts[mype]/a_cbs; 320 ierr = PetscLogEventEnd(gamg_setup_stages[SET12],0,0,0,0); CHKERRQ(ierr); 321 322 { /* Create a vector to contain the newly ordered element information */ 323 const PetscInt *idx, data_sz=a_ndata_rows*a_ndata_cols; 324 const PetscInt stride0=ncrs0*a_ndata_rows,strideNew=ncrs_new*a_ndata_rows; 325 PetscInt ii,jj,kk; 326 IS isscat; 327 PetscScalar *array; 328 Vec src_crd, dest_crd; 329 PetscReal *data = *a_coarse_data; 330 VecScatter vecscat; 331 PetscInt tidx[ncrs0*data_sz]; 332 333 ierr = VecCreate( wcomm, &dest_crd ); 334 ierr = VecSetSizes( dest_crd, data_sz*ncrs_new, PETSC_DECIDE ); CHKERRQ(ierr); 335 ierr = VecSetFromOptions( dest_crd ); CHKERRQ(ierr); /*funny vector-get global options?*/ 336 /* 337 There are 'a_ndata_rows*a_ndata_cols' data items per node, (one can think of the vectors of having 338 a block size of ...). Note, ISs are expanded into equation space by 'a_cbs'. 339 */ 340 ierr = ISGetIndices( isnum, &idx ); CHKERRQ(ierr); 341 for(ii=0,jj=0; ii<ncrs0 ; ii++) { 342 PetscInt id = idx[ii*a_cbs]/a_cbs; /* get node back */ 343 for( kk=0; kk<data_sz ; kk++, jj++) tidx[jj] = id*data_sz + kk; 344 } 345 ierr = ISRestoreIndices( isnum, &idx ); CHKERRQ(ierr); 346 ierr = ISCreateGeneral( wcomm, data_sz*ncrs0, tidx, PETSC_COPY_VALUES, &isscat ); 347 CHKERRQ(ierr); 348 /* 349 Create a vector to contain the original vertex information for each element 350 */ 351 ierr = VecCreateSeq( PETSC_COMM_SELF, data_sz*ncrs0, &src_crd ); CHKERRQ(ierr); 352 for( jj=0; jj<a_ndata_cols ; jj++ ) { 353 for( ii=0 ; ii<ncrs0 ; ii++) { 354 for( kk=0; kk<a_ndata_rows ; kk++ ) { 355 PetscInt ix = ii*a_ndata_rows + kk + jj*stride0, jx = ii*data_sz + kk*a_ndata_cols + jj; 356 ierr = VecSetValues( src_crd, 1, &jx, &data[ix], INSERT_VALUES ); CHKERRQ(ierr); 357 } 358 } 359 } 360 ierr = VecAssemblyBegin(src_crd); CHKERRQ(ierr); 361 ierr = VecAssemblyEnd(src_crd); CHKERRQ(ierr); 362 /* 363 Scatter the element vertex information (still in the original vertex ordering) 364 to the correct processor 365 */ 366 ierr = VecScatterCreate( src_crd, PETSC_NULL, dest_crd, isscat, &vecscat); 367 CHKERRQ(ierr); 368 ierr = ISDestroy( &isscat ); CHKERRQ(ierr); 369 ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 370 ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 371 ierr = VecScatterDestroy( &vecscat ); CHKERRQ(ierr); 372 ierr = VecDestroy( &src_crd ); CHKERRQ(ierr); 373 /* 374 Put the element vertex data into a new allocation of the gdata->ele 375 */ 376 ierr = PetscFree( *a_coarse_data ); CHKERRQ(ierr); 377 ierr = PetscMalloc( data_sz*ncrs_new*sizeof(PetscReal), a_coarse_data ); CHKERRQ(ierr); 378 379 ierr = VecGetArray( dest_crd, &array ); CHKERRQ(ierr); 380 data = *a_coarse_data; 381 for( jj=0; jj<a_ndata_cols ; jj++ ) { 382 for( ii=0 ; ii<ncrs_new ; ii++) { 383 for( kk=0; kk<a_ndata_rows ; kk++ ) { 384 PetscInt ix = ii*a_ndata_rows + kk + jj*strideNew, jx = ii*data_sz + kk*a_ndata_cols + jj; 385 data[ix] = PetscRealPart(array[jx]); 386 array[jx] = 1.e300; 387 } 388 } 389 } 390 ierr = VecRestoreArray( dest_crd, &array ); CHKERRQ(ierr); 391 ierr = VecDestroy( &dest_crd ); CHKERRQ(ierr); 392 } 393 394 /* 395 Invert for MatGetSubMatrix 396 */ 397 ierr = ISInvertPermutation( isnum, ncrs_new*a_cbs, &new_indices ); CHKERRQ(ierr); 398 ierr = ISSort( new_indices ); CHKERRQ(ierr); /* is this needed? */ 399 ierr = ISDestroy( &isnum ); CHKERRQ(ierr); 400 /* A_crs output */ 401 ierr = MatGetSubMatrix( Cmat, new_indices, new_indices, MAT_INITIAL_MATRIX, a_Amat_crs ); 402 CHKERRQ(ierr); 403 404 ierr = MatDestroy( &Cmat ); CHKERRQ(ierr); 405 Cmat = *a_Amat_crs; /* output */ 406 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 407 408 /* prolongator */ 409 ierr = MatGetOwnershipRange( Pold, &Istart, &Iend ); CHKERRQ(ierr); 410 { 411 IS findices; 412 ierr = ISCreateStride(wcomm,Iend-Istart,Istart,1,&findices); CHKERRQ(ierr); 413 ierr = MatGetSubMatrix( Pold, findices, new_indices, MAT_INITIAL_MATRIX, &Pnew ); 414 CHKERRQ(ierr); 415 ierr = ISDestroy( &findices ); CHKERRQ(ierr); 416 } 417 ierr = MatDestroy( a_P_inout ); CHKERRQ(ierr); 418 *a_P_inout = Pnew; /* output */ 419 ierr = ISDestroy( &new_indices ); CHKERRQ(ierr); 420 421 ierr = MatGetOwnershipRange(Cmat, &neq, &NN ); CHKERRQ(ierr); 422 } 423 else { 424 *a_Amat_crs = Cmat; /* output */ 425 } 426 427 PetscFunctionReturn(0); 428 } 429 430 #define GAMG_MAXLEVELS 30 431 #if defined(PETSC_USE_LOG) 432 PetscLogStage gamg_stages[20]; 433 #endif 434 /* -------------------------------------------------------------------------- */ 435 /* 436 PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner 437 by setting data structures and options. 438 439 Input Parameter: 440 . pc - the preconditioner context 441 442 Application Interface Routine: PCSetUp() 443 444 Notes: 445 The interface routine PCSetUp() is not usually called directly by 446 the user, but instead is called by PCApply() if necessary. 447 */ 448 #undef __FUNCT__ 449 #define __FUNCT__ "PCSetUp_GAMG" 450 PetscErrorCode PCSetUp_GAMG( PC a_pc ) 451 { 452 PetscErrorCode ierr; 453 PC_MG *mg = (PC_MG*)a_pc->data; 454 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 455 Mat Amat = a_pc->mat, Pmat = a_pc->pmat; 456 PetscInt fine_level, level, level1, M, N, bs, nloc, lidx, Istart, Iend; 457 MPI_Comm wcomm = ((PetscObject)a_pc)->comm; 458 PetscMPIInt mype,npe,nactivepe; 459 PetscBool isOK; 460 Mat Aarr[GAMG_MAXLEVELS], Parr[GAMG_MAXLEVELS]; 461 PetscReal *coarse_data = 0, *data, emaxs[GAMG_MAXLEVELS]; 462 MatInfo info; 463 464 PetscFunctionBegin; 465 if( a_pc->setupcalled ) { 466 /* no state data in GAMG to destroy */ 467 ierr = PCReset_MG( a_pc ); CHKERRQ(ierr); 468 } 469 ierr = MPI_Comm_rank(wcomm,&mype);CHKERRQ(ierr); 470 ierr = MPI_Comm_size(wcomm,&npe);CHKERRQ(ierr); 471 /* GAMG requires input of fine-grid matrix. It determines nlevels. */ 472 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ(ierr); 473 ierr = MatGetSize( Amat, &M, &N );CHKERRQ(ierr); 474 ierr = MatGetOwnershipRange( Amat, &Istart, &Iend ); CHKERRQ(ierr); 475 nloc = (Iend-Istart)/bs; assert((Iend-Istart)%bs == 0); 476 477 /* get data of not around */ 478 if( pc_gamg->m_data == 0 && nloc > 0 ) { 479 ierr = PCSetCoordinates_GAMG( a_pc, -1, 0 ); CHKERRQ( ierr ); 480 } 481 data = pc_gamg->m_data; 482 483 /* Get A_i and R_i */ 484 ierr = MatGetInfo(Amat,MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 485 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s level %d N=%d, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n", 486 mype,__FUNCT__,0,N,pc_gamg->m_data_rows,pc_gamg->m_data_cols,(PetscInt)(info.nz_used/(PetscReal)N),npe); 487 for ( level=0, Aarr[0] = Pmat, nactivepe = npe; /* hard wired stopping logic */ 488 level < GAMG_MAXLEVELS-1 && (level==0 || M>TOP_GRID_LIM) && (npe==1 || nactivepe>1); 489 level++ ){ 490 level1 = level + 1; 491 ierr = PetscLogEventBegin(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 492 ierr = createProlongation(Aarr[level], data, pc_gamg->m_dim, pc_gamg->m_data_cols, pc_gamg->m_useSA, 493 level, &bs, &Parr[level1], &coarse_data, &isOK, &emaxs[level] ); 494 CHKERRQ(ierr); 495 ierr = PetscFree( data ); CHKERRQ( ierr ); 496 ierr = PetscLogEventEnd(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 497 498 if(level==0) Aarr[0] = Amat; /* use Pmat for finest level setup, but use mat for solver */ 499 500 if( isOK ) { 501 ierr = PetscLogEventBegin(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 502 ierr = partitionLevel( Aarr[level], pc_gamg->m_useSA ? bs : 1, pc_gamg->m_data_cols, bs, 503 &Parr[level1], &coarse_data, &nactivepe, &Aarr[level1] ); 504 CHKERRQ(ierr); 505 ierr = PetscLogEventEnd(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 506 ierr = MatGetSize( Aarr[level1], &M, &N );CHKERRQ(ierr); 507 ierr = MatGetInfo(Aarr[level1],MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 508 PetscPrintf(PETSC_COMM_WORLD,"\t\t[%d]%s %d) N=%d, bs=%d, n data cols=%d, nnz/row (ave)=%d, %d active pes\n", 509 mype,__FUNCT__,level1,N,bs,pc_gamg->m_data_cols,(PetscInt)(info.nz_used/(PetscReal)N),nactivepe); 510 /* coarse grids with SA can have zero row/cols from singleton aggregates */ 511 /* aggregation method can probably gaurrentee this does not happen! - be safe for now */ 512 513 if( PETSC_TRUE ){ 514 Vec diag; PetscScalar *data_arr,v; PetscInt Istart,Iend,kk,nloceq,id; 515 v = 1.e-10; /* LU factor has hard wired numbers for small diags so this needs to match (yuk) */ 516 ierr = MatGetOwnershipRange(Aarr[level1], &Istart, &Iend); CHKERRQ(ierr); 517 nloceq = Iend-Istart; 518 ierr = MatGetVecs( Aarr[level1], &diag, 0 ); CHKERRQ(ierr); 519 ierr = MatGetDiagonal( Aarr[level1], diag ); CHKERRQ(ierr); 520 ierr = VecGetArray( diag, &data_arr ); CHKERRQ(ierr); 521 for(kk=0;kk<nloceq;kk++){ 522 if(data_arr[kk]==0.0) { 523 id = kk + Istart; 524 ierr = MatSetValues(Aarr[level1],1,&id,1,&id,&v,INSERT_VALUES); 525 CHKERRQ(ierr); 526 PetscPrintf(PETSC_COMM_SELF,"\t[%d]%s warning: added diag to zero (%d) on level %d \n",mype,__FUNCT__,id,level); 527 } 528 } 529 ierr = VecRestoreArray( diag, &data_arr ); CHKERRQ(ierr); 530 ierr = VecDestroy( &diag ); CHKERRQ(ierr); 531 ierr = MatAssemblyBegin(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 532 ierr = MatAssemblyEnd(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 533 } 534 } 535 else{ 536 coarse_data = 0; 537 break; 538 } 539 data = coarse_data; 540 } 541 if( coarse_data ) { 542 ierr = PetscFree( coarse_data ); CHKERRQ( ierr ); 543 } 544 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s %d levels\n",0,__FUNCT__,level + 1); 545 pc_gamg->m_data = 0; /* destroyed coordinate data */ 546 pc_gamg->m_Nlevels = level + 1; 547 fine_level = level; 548 ierr = PCMGSetLevels(a_pc,pc_gamg->m_Nlevels,PETSC_NULL);CHKERRQ(ierr); 549 550 /* set default smoothers */ 551 for ( lidx=1, level = pc_gamg->m_Nlevels-2; 552 lidx <= fine_level; 553 lidx++, level--) { 554 PetscReal emax, emin; 555 KSP smoother; PC subpc; 556 ierr = PCMGGetSmoother( a_pc, lidx, &smoother ); CHKERRQ(ierr); 557 ierr = KSPSetType( smoother, KSPCHEBYCHEV );CHKERRQ(ierr); 558 if( emaxs[level] > 0.0 ) emax=emaxs[level]; 559 else{ /* eigen estimate 'emax' */ 560 KSP eksp; Mat Lmat = Aarr[level]; 561 Vec bb, xx; PC pc; 562 563 ierr = MatGetVecs( Lmat, &bb, 0 ); CHKERRQ(ierr); 564 ierr = MatGetVecs( Lmat, &xx, 0 ); CHKERRQ(ierr); 565 { 566 PetscRandom rctx; 567 ierr = PetscRandomCreate(wcomm,&rctx);CHKERRQ(ierr); 568 ierr = PetscRandomSetFromOptions(rctx);CHKERRQ(ierr); 569 ierr = VecSetRandom(bb,rctx);CHKERRQ(ierr); 570 ierr = PetscRandomDestroy( &rctx ); CHKERRQ(ierr); 571 } 572 ierr = KSPCreate(wcomm,&eksp);CHKERRQ(ierr); 573 ierr = KSPSetType( eksp, KSPCG ); CHKERRQ(ierr); 574 ierr = KSPSetInitialGuessNonzero( eksp, PETSC_FALSE ); CHKERRQ(ierr); 575 ierr = KSPSetOperators( eksp, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); CHKERRQ( ierr ); 576 ierr = KSPGetPC( eksp, &pc );CHKERRQ( ierr ); 577 ierr = PCSetType( pc, PCPBJACOBI ); CHKERRQ(ierr); /* should be same as above */ 578 ierr = KSPSetTolerances( eksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 10 ); 579 CHKERRQ(ierr); 580 //ierr = KSPSetConvergenceTest( eksp, KSPSkipConverged, 0, 0 ); CHKERRQ(ierr); 581 ierr = KSPSetNormType( eksp, KSP_NORM_NONE ); CHKERRQ(ierr); 582 583 ierr = KSPSetComputeSingularValues( eksp,PETSC_TRUE ); CHKERRQ(ierr); 584 ierr = KSPSolve( eksp, bb, xx ); CHKERRQ(ierr); 585 ierr = KSPComputeExtremeSingularValues( eksp, &emax, &emin ); CHKERRQ(ierr); 586 ierr = VecDestroy( &xx ); CHKERRQ(ierr); 587 ierr = VecDestroy( &bb ); CHKERRQ(ierr); 588 ierr = KSPDestroy( &eksp ); CHKERRQ(ierr); 589 PetscPrintf(PETSC_COMM_WORLD,"\t\t\t%s max eigen=%e min=%e PC=%s\n",__FUNCT__,emax,emin,PETSC_GAMG_SMOOTHER); 590 } 591 { 592 PetscInt N1, N0, tt; 593 ierr = MatGetSize( Aarr[level], &N1, &tt ); CHKERRQ(ierr); 594 ierr = MatGetSize( Aarr[level+1], &N0, &tt ); CHKERRQ(ierr); 595 emin = 1.*emax/((PetscReal)N1/(PetscReal)N0); /* this should be about the coarsening rate */ 596 emax *= 1.05; 597 598 } 599 600 ierr = KSPSetOperators( smoother, Aarr[level], Aarr[level], DIFFERENT_NONZERO_PATTERN ); 601 ierr = KSPChebychevSetEigenvalues( smoother, emax, emin );CHKERRQ(ierr); 602 /*ierr = KSPSetTolerances(smoother,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,2); CHKERRQ(ierr);*/ 603 ierr = KSPGetPC( smoother, &subpc ); CHKERRQ(ierr); 604 ierr = PCSetType( subpc, PETSC_GAMG_SMOOTHER ); CHKERRQ(ierr); 605 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 606 } 607 { 608 KSP smoother; /* coarse grid */ 609 Mat Lmat = Aarr[pc_gamg->m_Nlevels-1]; 610 ierr = PCMGGetSmoother( a_pc, 0, &smoother ); CHKERRQ(ierr); 611 ierr = KSPSetOperators( smoother, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); 612 CHKERRQ(ierr); 613 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 614 } 615 616 /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */ 617 ierr = PCSetFromOptions_MG(a_pc); CHKERRQ(ierr); 618 { 619 PetscBool galerkin; 620 ierr = PCMGGetGalerkin( a_pc, &galerkin); CHKERRQ(ierr); 621 if(galerkin){ 622 SETERRQ(wcomm,PETSC_ERR_ARG_WRONG, "GAMG does galerkin manually so it must not be used in PC_MG."); 623 } 624 } 625 626 /* set interpolation between the levels, create timer stages, clean up */ 627 if( PETSC_FALSE ) { 628 char str[32]; 629 sprintf(str,"MG Level %d (%d)",0,pc_gamg->m_Nlevels-1); 630 PetscLogStageRegister(str, &gamg_stages[fine_level]); 631 } 632 for (lidx=0,level=pc_gamg->m_Nlevels-1; 633 lidx<fine_level; 634 lidx++, level--){ 635 ierr = PCMGSetInterpolation( a_pc, lidx+1, Parr[level] );CHKERRQ(ierr); 636 if( !PETSC_TRUE ) { 637 PetscViewer viewer; char fname[32]; 638 sprintf(fname,"Pmat_%d.m",level); 639 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 640 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 641 ierr = MatView( Parr[level], viewer ); CHKERRQ(ierr); 642 ierr = PetscViewerDestroy( &viewer ); 643 sprintf(fname,"Amat_%d.m",level); 644 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 645 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 646 ierr = MatView( Aarr[level], viewer ); CHKERRQ(ierr); 647 ierr = PetscViewerDestroy( &viewer ); 648 } 649 ierr = MatDestroy( &Parr[level] ); CHKERRQ(ierr); 650 ierr = MatDestroy( &Aarr[level] ); CHKERRQ(ierr); 651 if( PETSC_FALSE ) { 652 char str[32]; 653 sprintf(str,"MG Level %d (%d)",lidx+1,level-1); 654 PetscLogStageRegister(str, &gamg_stages[level-1]); 655 } 656 } 657 658 /* setupcalled is set to 0 so that MG is setup from scratch */ 659 a_pc->setupcalled = 0; 660 ierr = PCSetUp_MG(a_pc);CHKERRQ(ierr); 661 662 PetscFunctionReturn(0); 663 } 664 665 /* ------------------------------------------------------------------------- */ 666 /* 667 PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner 668 that was created with PCCreate_GAMG(). 669 670 Input Parameter: 671 . pc - the preconditioner context 672 673 Application Interface Routine: PCDestroy() 674 */ 675 #undef __FUNCT__ 676 #define __FUNCT__ "PCDestroy_GAMG" 677 PetscErrorCode PCDestroy_GAMG(PC pc) 678 { 679 PetscErrorCode ierr; 680 PC_MG *mg = (PC_MG*)pc->data; 681 PC_GAMG *pc_gamg= (PC_GAMG*)mg->innerctx; 682 683 PetscFunctionBegin; 684 ierr = PCReset_GAMG(pc);CHKERRQ(ierr); 685 ierr = PetscFree(pc_gamg);CHKERRQ(ierr); 686 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 687 PetscFunctionReturn(0); 688 } 689 690 #undef __FUNCT__ 691 #define __FUNCT__ "PCSetFromOptions_GAMG" 692 PetscErrorCode PCSetFromOptions_GAMG(PC pc) 693 { 694 /* PetscErrorCode ierr; */ 695 /* PC_MG *mg = (PC_MG*)pc->data; */ 696 /* PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; */ 697 /* MPI_Comm comm = ((PetscObject)pc)->comm; */ 698 699 PetscFunctionBegin; 700 PetscFunctionReturn(0); 701 } 702 703 /* -------------------------------------------------------------------------- */ 704 /* 705 PCCreate_GAMG - Creates a GAMG preconditioner context, PC_GAMG 706 707 Input Parameter: 708 . pc - the preconditioner context 709 710 Application Interface Routine: PCCreate() 711 712 */ 713 /* MC 714 PCGAMG - Use algebraic multigrid preconditioning. This preconditioner requires you provide 715 fine grid discretization matrix and coordinates on the fine grid. 716 717 Options Database Key: 718 Multigrid options(inherited) 719 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 720 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 721 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 722 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 723 GAMG options: 724 725 Level: intermediate 726 Concepts: multigrid 727 728 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 729 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 730 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 731 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 732 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 733 M */ 734 735 EXTERN_C_BEGIN 736 #undef __FUNCT__ 737 #define __FUNCT__ "PCCreate_GAMG" 738 PetscErrorCode PCCreate_GAMG(PC pc) 739 { 740 PetscErrorCode ierr; 741 PC_GAMG *pc_gamg; 742 PC_MG *mg; 743 PetscClassId cookie; 744 745 PetscFunctionBegin; 746 /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */ 747 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 748 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCGAMG);CHKERRQ(ierr); 749 750 /* create a supporting struct and attach it to pc */ 751 ierr = PetscNewLog(pc,PC_GAMG,&pc_gamg);CHKERRQ(ierr); 752 mg = (PC_MG*)pc->data; 753 mg->innerctx = pc_gamg; 754 755 pc_gamg->m_Nlevels = -1; 756 757 /* overwrite the pointers of PCMG by the functions of PCGAMG */ 758 pc->ops->setfromoptions = PCSetFromOptions_GAMG; 759 pc->ops->setup = PCSetUp_GAMG; 760 pc->ops->reset = PCReset_GAMG; 761 pc->ops->destroy = PCDestroy_GAMG; 762 763 ierr = PetscObjectComposeFunctionDynamic( (PetscObject)pc, 764 "PCSetCoordinates_C", 765 "PCSetCoordinates_GAMG", 766 PCSetCoordinates_GAMG);CHKERRQ(ierr); 767 static int count = 0; 768 if( count++ == 0 ) { 769 PetscClassIdRegister("GAMG Setup",&cookie); 770 PetscLogEventRegister("GAMG: createProl", cookie, &gamg_setup_stages[SET1]); 771 PetscLogEventRegister(" make graph", cookie, &gamg_setup_stages[SET3]); 772 PetscLogEventRegister(" MIS/Agg", cookie, &gamg_setup_stages[SET4]); 773 PetscLogEventRegister(" geo: growSupp", cookie, &gamg_setup_stages[SET5]); 774 PetscLogEventRegister(" geo: triangle", cookie, &gamg_setup_stages[SET6]); 775 PetscLogEventRegister(" search & set", cookie, &gamg_setup_stages[FIND_V]); 776 PetscLogEventRegister(" SA: init", cookie, &gamg_setup_stages[SET7]); 777 /* PetscLogEventRegister(" SA: frmProl0", cookie, &gamg_setup_stages[SET8]); */ 778 PetscLogEventRegister(" SA: smooth", cookie, &gamg_setup_stages[SET9]); 779 PetscLogEventRegister("GAMG: partLevel", cookie, &gamg_setup_stages[SET2]); 780 PetscLogEventRegister(" PL repartition", cookie, &gamg_setup_stages[SET12]); 781 /* PetscLogEventRegister(" PL move data", cookie, &gamg_setup_stages[SET13]); */ 782 /* PetscLogEventRegister("GAMG: fix", cookie, &gamg_setup_stages[SET10]); */ 783 /* PetscLogEventRegister("GAMG: set levels", cookie, &gamg_setup_stages[SET11]); */ 784 } 785 786 PetscFunctionReturn(0); 787 } 788 EXTERN_C_END 789