1 /* 2 GAMG geometric-algebric multiogrid PC - Mark Adams 2011 3 */ 4 #include <../src/ksp/pc/impls/gamg/gamg.h> 5 6 PetscLogEvent gamg_setup_stages[NUM_SET]; 7 8 /* Private context for the GAMG preconditioner */ 9 typedef struct gamg_TAG { 10 PetscInt m_dim; 11 PetscInt m_Nlevels; 12 PetscInt m_data_sz; 13 PetscInt m_data_rows; 14 PetscInt m_data_cols; 15 PetscBool m_useSA; 16 PetscReal *m_data; /* blocked vector of vertex data on fine grid (coordinates) */ 17 } PC_GAMG; 18 19 /* -------------------------------------------------------------------------- */ 20 /* 21 PCSetCoordinates_GAMG 22 23 Input Parameter: 24 . pc - the preconditioner context 25 */ 26 EXTERN_C_BEGIN 27 #undef __FUNCT__ 28 #define __FUNCT__ "PCSetCoordinates_GAMG" 29 PetscErrorCode PCSetCoordinates_GAMG( PC a_pc, PetscInt a_ndm, PetscReal *a_coords ) 30 { 31 PC_MG *mg = (PC_MG*)a_pc->data; 32 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 33 PetscErrorCode ierr; 34 PetscInt arrsz,bs,my0,kk,ii,jj,nloc,Iend; 35 Mat Amat = a_pc->pmat; 36 PetscBool flag; 37 char str[16]; 38 39 PetscFunctionBegin; 40 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ( ierr ); 41 ierr = MatGetOwnershipRange( Amat, &my0, &Iend ); CHKERRQ(ierr); 42 nloc = (Iend-my0)/bs; 43 if((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 44 45 ierr = PetscOptionsGetString(PETSC_NULL,"-pc_gamg_type",str,16,&flag); CHKERRQ( ierr ); 46 pc_gamg->m_useSA = (PetscBool)(flag && strcmp(str,"sa") == 0); 47 48 pc_gamg->m_data_rows = 1; 49 if(a_coords == 0) pc_gamg->m_useSA = PETSC_TRUE; /* use SA if no data */ 50 if( !pc_gamg->m_useSA ) pc_gamg->m_data_cols = a_ndm; /* coordinates */ 51 else{ /* SA: null space vectors */ 52 if(a_coords != 0 && bs==1 ) pc_gamg->m_data_cols = 1; /* scalar w/ coords and SA (not needed) */ 53 else if(a_coords != 0) pc_gamg->m_data_cols = (a_ndm==2 ? 3 : 6); /* elasticity */ 54 else pc_gamg->m_data_cols = bs; /* no data, force SA with constant null space vectors */ 55 pc_gamg->m_data_rows = bs; 56 } 57 arrsz = nloc*pc_gamg->m_data_rows*pc_gamg->m_data_cols; 58 59 /* create data - syntactic sugar that should be refactored at some point */ 60 if (!pc_gamg->m_data || (pc_gamg->m_data_sz != arrsz)) { 61 ierr = PetscFree( pc_gamg->m_data ); CHKERRQ(ierr); 62 ierr = PetscMalloc((arrsz+1)*sizeof(double), &pc_gamg->m_data ); CHKERRQ(ierr); 63 } 64 for(kk=0;kk<arrsz;kk++)pc_gamg->m_data[kk] = -999.; 65 pc_gamg->m_data[arrsz] = -99.; 66 /* copy data in - column oriented */ 67 if( pc_gamg->m_useSA ) { 68 const PetscInt M = Iend - my0; 69 for(kk=0;kk<nloc;kk++){ 70 PetscReal *data = &pc_gamg->m_data[kk*bs]; 71 if( pc_gamg->m_data_cols==1 ) *data = 1.0; 72 else { 73 for(ii=0;ii<bs;ii++) 74 for(jj=0;jj<bs;jj++) 75 if(ii==jj)data[ii*M + jj] = 1.0; /* translational modes */ 76 else data[ii*M + jj] = 0.0; 77 if( a_coords != 0 ) { 78 if( a_ndm == 2 ){ /* rotational modes */ 79 data += 2*M; 80 data[0] = -a_coords[2*kk+1]; 81 data[1] = a_coords[2*kk]; 82 } 83 else { 84 data += 3*M; 85 data[0] = 0.0; data[M+0] = a_coords[3*kk+2]; data[2*M+0] = -a_coords[3*kk+1]; 86 data[1] = -a_coords[3*kk+2]; data[M+1] = 0.0; data[2*M+1] = a_coords[3*kk]; 87 data[2] = a_coords[3*kk+1]; data[M+2] = -a_coords[3*kk]; data[2*M+2] = 0.0; 88 } 89 } 90 } 91 } 92 } 93 else { 94 for( kk = 0 ; kk < nloc ; kk++ ){ 95 for( ii = 0 ; ii < a_ndm ; ii++ ) { 96 pc_gamg->m_data[ii*nloc + kk] = a_coords[kk*a_ndm + ii]; 97 } 98 } 99 } 100 101 assert(pc_gamg->m_data[arrsz] == -99.); 102 for(kk=0;kk<arrsz;kk++) assert(pc_gamg->m_data[kk] != -999.); // debug 103 104 pc_gamg->m_data_sz = arrsz; 105 pc_gamg->m_dim = a_ndm; 106 107 PetscFunctionReturn(0); 108 } 109 EXTERN_C_END 110 111 112 /* -----------------------------------------------------------------------------*/ 113 #undef __FUNCT__ 114 #define __FUNCT__ "PCReset_GAMG" 115 PetscErrorCode PCReset_GAMG(PC pc) 116 { 117 PetscErrorCode ierr; 118 PC_MG *mg = (PC_MG*)pc->data; 119 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 120 121 PetscFunctionBegin; 122 ierr = PetscFree(pc_gamg->m_data);CHKERRQ(ierr); 123 pc_gamg->m_data = 0; pc_gamg->m_data_sz = 0; 124 PetscFunctionReturn(0); 125 } 126 127 /* -------------------------------------------------------------------------- */ 128 /* 129 partitionLevel 130 131 Input Parameter: 132 . a_Amat_fine - matrix on this fine (k) level 133 . a_ndata_rows - size of data to move (coarse grid) 134 . a_ndata_cols - size of data to move (coarse grid) 135 In/Output Parameter: 136 . a_P_inout - prolongation operator to the next level (k-1) 137 . a_coarse_data - data that need to be moved 138 . a_active_proc - number of active procs 139 Output Parameter: 140 . a_Amat_crs - coarse matrix that is created (k-1) 141 */ 142 143 #undef __FUNCT__ 144 #define __FUNCT__ "partitionLevel" 145 PetscErrorCode partitionLevel( Mat a_Amat_fine, 146 PetscInt a_ndata_rows, 147 PetscInt a_ndata_cols, 148 PetscInt a_cbs, 149 Mat *a_P_inout, 150 PetscReal **a_coarse_data, 151 PetscMPIInt *a_active_proc, 152 Mat *a_Amat_crs 153 ) 154 { 155 PetscErrorCode ierr; 156 Mat Cmat,Pnew,Pold=*a_P_inout; 157 IS new_indices,isnum; 158 MPI_Comm wcomm = ((PetscObject)a_Amat_fine)->comm; 159 PetscMPIInt nactive_procs,mype,npe; 160 PetscInt Istart,Iend,Istart0,Iend0,ncrs0,ncrs_new,fbs; 161 PetscInt neq,NN; 162 PetscMPIInt new_npe,targ_npe; 163 164 PetscFunctionBegin; 165 ierr = MPI_Comm_rank( wcomm, &mype ); CHKERRQ(ierr); 166 ierr = MPI_Comm_size( wcomm, &npe ); CHKERRQ(ierr); 167 ierr = MatGetBlockSize( a_Amat_fine, &fbs ); CHKERRQ(ierr); 168 /* RAP */ 169 ierr = MatPtAP( a_Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat ); CHKERRQ(ierr); 170 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 171 172 ierr = MatGetOwnershipRange( Cmat, &Istart0, &Iend0 ); CHKERRQ(ierr); 173 ncrs0 = (Iend0-Istart0)/a_cbs; assert((Iend0-Istart0)%a_cbs == 0); 174 175 /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates accordingly */ 176 ierr = MatGetSize( Cmat, &neq, &NN );CHKERRQ(ierr); 177 #define MIN_EQ_PROC 200 178 nactive_procs = *a_active_proc; 179 targ_npe = neq/MIN_EQ_PROC; /* hardwire min. number of eq/proc */ 180 #define TOP_GRID_LIM 1000 181 if( targ_npe == 0 || neq < TOP_GRID_LIM ) new_npe = 1; /* chop coarsest grid */ 182 else if (targ_npe >= nactive_procs ) new_npe = nactive_procs; /* no change */ 183 else { 184 PetscMPIInt factstart,fact; 185 new_npe = -9999; 186 factstart = nactive_procs; 187 for(fact=factstart;fact>0;fact--){ /* try to find a better number of procs */ 188 if( nactive_procs%fact==0 && neq/(nactive_procs/fact) > MIN_EQ_PROC ) { 189 new_npe = nactive_procs/fact; 190 } 191 } 192 if(new_npe == -9999) new_npe = nactive_procs; 193 } 194 195 if( PETSC_TRUE ) { /* partition: get 'isnewproc' */ 196 MatPartitioning mpart; 197 Mat adj; 198 const PetscInt *is_idx; 199 PetscInt is_sz,kk,jj,ii,old_fact=(npe/nactive_procs), *isnewproc_idx; 200 /* create sub communicator */ 201 MPI_Comm cm,new_comm; 202 PetscInt membershipKey = mype%old_fact, new_fact=(npe/new_npe),counts[npe]; 203 IS isnewproc; 204 205 *a_active_proc = new_npe; /* output for next level */ 206 ierr = MPI_Comm_split(wcomm, membershipKey, mype, &cm); CHKERRQ(ierr); 207 ierr = PetscCommDuplicate( cm, &new_comm, PETSC_NULL ); CHKERRQ(ierr); 208 ierr = MPI_Comm_free( &cm ); CHKERRQ(ierr); 209 210 /* MatPartitioningApply call MatConvert, which is collective */ 211 if( a_cbs==1) { 212 ierr = MatConvert( Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 213 } 214 else { 215 /* make a scalar matrix to partition */ 216 Mat tMat; 217 PetscInt Ii,ncols; const PetscScalar *vals; const PetscInt *idx; 218 ierr = MatCreateMPIAIJ( wcomm, ncrs0, ncrs0, 219 PETSC_DETERMINE, PETSC_DETERMINE, 220 25, PETSC_NULL, 10, PETSC_NULL, 221 &tMat ); 222 223 for ( Ii = Istart0; Ii < Iend0; Ii++ ) { 224 PetscInt dest_row = Ii/a_cbs; 225 ierr = MatGetRow(Cmat,Ii,&ncols,&idx,&vals); CHKERRQ(ierr); 226 for( jj = 0 ; jj < ncols ; jj++ ){ 227 PetscInt dest_col = idx[jj]/a_cbs; 228 PetscScalar v = 1.0; 229 ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES); CHKERRQ(ierr); 230 } 231 ierr = MatRestoreRow(Cmat,Ii,&ncols,&idx,&vals); CHKERRQ(ierr); 232 } 233 ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 234 ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 235 236 ierr = MatConvert( tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 237 238 ierr = MatDestroy( &tMat ); CHKERRQ(ierr); 239 } 240 if( membershipKey == 0 ) { 241 if(ncrs0==0)SETERRQ(wcomm,PETSC_ERR_LIB,"zero local nodes -- increase min"); 242 /* hack to fix global data that pmetis.c uses in 'adj' */ 243 for( kk=0 , jj=0 ; kk<=npe ; jj++, kk += old_fact ) { 244 adj->rmap->range[jj] = adj->rmap->range[kk]; 245 } 246 ierr = MatPartitioningCreate( new_comm, &mpart ); CHKERRQ(ierr); 247 ierr = MatPartitioningSetAdjacency( mpart, adj ); CHKERRQ(ierr); 248 ierr = MatPartitioningSetFromOptions( mpart ); CHKERRQ(ierr); 249 ierr = MatPartitioningSetNParts( mpart, new_npe ); CHKERRQ(ierr); 250 ierr = MatPartitioningApply( mpart, &isnewproc ); CHKERRQ(ierr); 251 ierr = MatPartitioningDestroy( &mpart ); CHKERRQ(ierr); 252 /* collect IS info */ 253 ierr = ISGetLocalSize( isnewproc, &is_sz ); CHKERRQ(ierr); 254 ierr = PetscMalloc( a_cbs*is_sz*sizeof(PetscInt), &isnewproc_idx ); CHKERRQ(ierr); 255 ierr = ISGetIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 256 /* spread partitioning across machine - probably the right thing to do but machine spec. */ 257 for(kk=0,jj=0;kk<is_sz;kk++){ 258 for(ii=0 ; ii<a_cbs ; ii++, jj++ ) { /* expand for equation level by 'bs' */ 259 isnewproc_idx[jj] = is_idx[kk] * new_fact; 260 } 261 } 262 ierr = ISRestoreIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 263 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 264 is_sz *= a_cbs; 265 } 266 else { 267 isnewproc_idx = 0; 268 is_sz = 0; 269 } 270 ierr = MatDestroy( &adj ); CHKERRQ(ierr); 271 ierr = MPI_Comm_free( &new_comm ); CHKERRQ(ierr); 272 ierr = ISCreateGeneral( wcomm, is_sz, isnewproc_idx, PETSC_COPY_VALUES, &isnewproc ); 273 if( membershipKey == 0 ) { 274 ierr = PetscFree( isnewproc_idx ); CHKERRQ(ierr); 275 } 276 277 /* 278 Create an index set from the isnewproc index set to indicate the mapping TO 279 */ 280 ierr = ISPartitioningToNumbering( isnewproc, &isnum ); CHKERRQ(ierr); 281 /* 282 Determine how many elements are assigned to each processor 283 */ 284 ierr = ISPartitioningCount( isnewproc, npe, counts ); CHKERRQ(ierr); 285 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 286 ncrs_new = counts[mype]/a_cbs; 287 288 { /* Create a vector to contain the newly ordered element information */ 289 const PetscInt *idx, data_sz=a_ndata_rows*a_ndata_cols; 290 const PetscInt stride0=ncrs0*a_ndata_rows,strideNew=ncrs_new*a_ndata_rows; 291 PetscInt ii,jj,kk; 292 IS isscat; 293 PetscScalar *array; 294 Vec src_crd, dest_crd; 295 PetscReal *data = *a_coarse_data; 296 VecScatter vecscat; 297 PetscInt tidx[ncrs0*data_sz]; 298 299 ierr = VecCreate( wcomm, &dest_crd ); 300 ierr = VecSetSizes( dest_crd, data_sz*ncrs_new, PETSC_DECIDE ); CHKERRQ(ierr); 301 ierr = VecSetFromOptions( dest_crd ); CHKERRQ(ierr); /*funny vector-get global options?*/ 302 /* 303 There are 'a_ndata_rows*a_ndata_cols' data items per node, (one can think of the vectors of having 304 a block size of ...). Note, ISs are expanded into equation space by 'a_cbs'. 305 */ 306 ierr = ISGetIndices( isnum, &idx ); CHKERRQ(ierr); 307 for(ii=0,jj=0; ii<ncrs0 ; ii++) { 308 PetscInt id = idx[ii*a_cbs]/a_cbs; /* get node back */ 309 for( kk=0; kk<data_sz ; kk++, jj++) tidx[jj] = id*data_sz + kk; 310 } 311 ierr = ISRestoreIndices( isnum, &idx ); CHKERRQ(ierr); 312 ierr = ISCreateGeneral( wcomm, data_sz*ncrs0, tidx, PETSC_COPY_VALUES, &isscat ); 313 CHKERRQ(ierr); 314 /* 315 Create a vector to contain the original vertex information for each element 316 */ 317 ierr = VecCreateSeq( PETSC_COMM_SELF, data_sz*ncrs0, &src_crd ); CHKERRQ(ierr); 318 for( jj=0; jj<a_ndata_cols ; jj++ ) { 319 for( ii=0 ; ii<ncrs0 ; ii++) { 320 for( kk=0; kk<a_ndata_rows ; kk++ ) { 321 PetscInt ix = ii*a_ndata_rows + kk + jj*stride0, jx = ii*data_sz + kk*a_ndata_cols + jj; 322 ierr = VecSetValues( src_crd, 1, &jx, &data[ix], INSERT_VALUES ); CHKERRQ(ierr); 323 } 324 } 325 } 326 ierr = VecAssemblyBegin(src_crd); CHKERRQ(ierr); 327 ierr = VecAssemblyEnd(src_crd); CHKERRQ(ierr); 328 /* 329 Scatter the element vertex information (still in the original vertex ordering) 330 to the correct processor 331 */ 332 ierr = VecScatterCreate( src_crd, PETSC_NULL, dest_crd, isscat, &vecscat); 333 CHKERRQ(ierr); 334 ierr = ISDestroy( &isscat ); CHKERRQ(ierr); 335 ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 336 ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 337 ierr = VecScatterDestroy( &vecscat ); CHKERRQ(ierr); 338 ierr = VecDestroy( &src_crd ); CHKERRQ(ierr); 339 /* 340 Put the element vertex data into a new allocation of the gdata->ele 341 */ 342 ierr = PetscFree( *a_coarse_data ); CHKERRQ(ierr); 343 ierr = PetscMalloc( data_sz*ncrs_new*sizeof(PetscReal), a_coarse_data ); CHKERRQ(ierr); 344 345 ierr = VecGetArray( dest_crd, &array ); CHKERRQ(ierr); 346 data = *a_coarse_data; 347 for( jj=0; jj<a_ndata_cols ; jj++ ) { 348 for( ii=0 ; ii<ncrs_new ; ii++) { 349 for( kk=0; kk<a_ndata_rows ; kk++ ) { 350 PetscInt ix = ii*a_ndata_rows + kk + jj*strideNew, jx = ii*data_sz + kk*a_ndata_cols + jj; 351 data[ix] = PetscRealPart(array[jx]); 352 array[jx] = 1.e300; 353 } 354 } 355 } 356 ierr = VecRestoreArray( dest_crd, &array ); CHKERRQ(ierr); 357 ierr = VecDestroy( &dest_crd ); CHKERRQ(ierr); 358 } 359 /* 360 Invert for MatGetSubMatrix 361 */ 362 ierr = ISInvertPermutation( isnum, ncrs_new*a_cbs, &new_indices ); CHKERRQ(ierr); 363 ierr = ISSort( new_indices ); CHKERRQ(ierr); /* is this needed? */ 364 ierr = ISDestroy( &isnum ); CHKERRQ(ierr); 365 /* A_crs output */ 366 ierr = MatGetSubMatrix( Cmat, new_indices, new_indices, MAT_INITIAL_MATRIX, a_Amat_crs ); 367 CHKERRQ(ierr); 368 369 ierr = MatDestroy( &Cmat ); CHKERRQ(ierr); 370 Cmat = *a_Amat_crs; /* output */ 371 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 372 373 /* prolongator */ 374 ierr = MatGetOwnershipRange( Pold, &Istart, &Iend ); CHKERRQ(ierr); 375 { 376 IS findices; 377 ierr = ISCreateStride(wcomm,Iend-Istart,Istart,1,&findices); CHKERRQ(ierr); 378 ierr = MatGetSubMatrix( Pold, findices, new_indices, MAT_INITIAL_MATRIX, &Pnew ); 379 CHKERRQ(ierr); 380 ierr = ISDestroy( &findices ); CHKERRQ(ierr); 381 } 382 ierr = MatDestroy( a_P_inout ); CHKERRQ(ierr); 383 *a_P_inout = Pnew; /* output */ 384 ierr = ISDestroy( &new_indices ); CHKERRQ(ierr); 385 } 386 else { 387 *a_Amat_crs = Cmat; /* output */ 388 } 389 390 PetscFunctionReturn(0); 391 } 392 393 #define GAMG_MAXLEVELS 30 394 #if defined(PETSC_USE_LOG) 395 PetscLogStage gamg_stages[20]; 396 #endif 397 /* -------------------------------------------------------------------------- */ 398 /* 399 PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner 400 by setting data structures and options. 401 402 Input Parameter: 403 . pc - the preconditioner context 404 405 Application Interface Routine: PCSetUp() 406 407 Notes: 408 The interface routine PCSetUp() is not usually called directly by 409 the user, but instead is called by PCApply() if necessary. 410 */ 411 #undef __FUNCT__ 412 #define __FUNCT__ "PCSetUp_GAMG" 413 PetscErrorCode PCSetUp_GAMG( PC a_pc ) 414 { 415 PetscErrorCode ierr; 416 PC_MG *mg = (PC_MG*)a_pc->data; 417 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 418 Mat Amat = a_pc->mat, Pmat = a_pc->pmat; 419 PetscInt fine_level, level, level1, M, N, bs, nloc, lidx, Istart, Iend; 420 MPI_Comm wcomm = ((PetscObject)a_pc)->comm; 421 PetscMPIInt mype,npe,nactivepe; 422 PetscBool isOK; 423 Mat Aarr[GAMG_MAXLEVELS], Parr[GAMG_MAXLEVELS]; 424 PetscReal *coarse_data = 0, *data, emaxs[GAMG_MAXLEVELS]; 425 426 PetscFunctionBegin; 427 if( a_pc->setupcalled ) { 428 /* no state data in GAMG to destroy */ 429 ierr = PCReset_MG( a_pc ); CHKERRQ(ierr); 430 } 431 ierr = MPI_Comm_rank(wcomm,&mype);CHKERRQ(ierr); 432 ierr = MPI_Comm_size(wcomm,&npe);CHKERRQ(ierr); 433 /* GAMG requires input of fine-grid matrix. It determines nlevels. */ 434 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ(ierr); 435 ierr = MatGetSize( Amat, &M, &N );CHKERRQ(ierr); 436 ierr = MatGetOwnershipRange( Amat, &Istart, &Iend ); CHKERRQ(ierr); 437 nloc = (Iend-Istart)/bs; assert((Iend-Istart)%bs == 0); 438 439 /* get data of not around */ 440 if( pc_gamg->m_data == 0 && nloc > 0 ) { 441 ierr = PCSetCoordinates_GAMG( a_pc, -1, 0 ); CHKERRQ( ierr ); 442 } 443 data = pc_gamg->m_data; 444 445 /* Get A_i and R_i */ 446 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s level %d N=%d, n data rows=%d, n data cols=%d, nloc=%d, np=%d\n",0,__FUNCT__,0,N,pc_gamg->m_data_rows,pc_gamg->m_data_cols,nloc,npe); 447 for ( level=0, Aarr[0] = Pmat, nactivepe = npe; /* hard wired stopping logic */ 448 level < GAMG_MAXLEVELS-1 && (level==0 || M/bs>TOP_GRID_LIM) && (npe==1 || nactivepe>1); 449 level++ ) { 450 level1 = level + 1; 451 ierr = PetscLogEventBegin(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 452 ierr = createProlongation(Aarr[level], data, pc_gamg->m_dim, pc_gamg->m_data_cols, pc_gamg->m_useSA, 453 &bs, &Parr[level1], &coarse_data, &isOK, &emaxs[level] ); 454 CHKERRQ(ierr); 455 ierr = PetscFree( data ); CHKERRQ( ierr ); 456 ierr = PetscLogEventEnd(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 457 458 if(level==0) Aarr[0] = Amat; /* use Pmat for finest level setup, but use mat for solver */ 459 460 if( isOK ) { 461 ierr = PetscLogEventBegin(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 462 ierr = partitionLevel( Aarr[level], pc_gamg->m_useSA ? bs : 1, pc_gamg->m_data_cols, bs, 463 &Parr[level1], &coarse_data, &nactivepe, &Aarr[level1] ); 464 CHKERRQ(ierr); 465 ierr = PetscLogEventEnd(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 466 ierr = MatGetSize( Aarr[level1], &M, &N );CHKERRQ(ierr); 467 PetscPrintf(PETSC_COMM_WORLD,"\t\t[%d]%s %d) N=%d, bs=%d, n data cols=%d, %d active pes\n",0,__FUNCT__,level1,N,bs,pc_gamg->m_data_cols,nactivepe); 468 /* coarse grids with SA can have zero row/cols from singleton aggregates */ 469 /* aggregation method can probably gaurrentee this does not happen! - be safe for now */ 470 if( PETSC_TRUE ){ 471 Vec diag; PetscScalar *data_arr,v; PetscInt Istart,Iend,kk,nloc,id; 472 ierr = MatGetOwnershipRange(Aarr[level1], &Istart, &Iend); CHKERRQ(ierr); 473 nloc = Iend-Istart; 474 ierr = MatGetVecs( Aarr[level1], &diag, 0 ); CHKERRQ(ierr); 475 ierr = MatGetDiagonal( Aarr[level1], diag ); CHKERRQ(ierr); 476 ierr = VecGetArray( diag, &data_arr ); CHKERRQ(ierr); 477 for(kk=0;kk<nloc;kk++){ 478 if(data_arr[kk]==0.0) { 479 id = kk + Istart; v = 1.e-1; 480 ierr = MatSetValues(Aarr[level1],1,&id,1,&id,&v,INSERT_VALUES); 481 CHKERRQ(ierr); 482 /* PetscPrintf(PETSC_COMM_SELF,"[%d]%s warning: added diag to zero (%d) on level %d \n",mype,__FUNCT__,id,level); */ 483 } 484 } 485 ierr = VecRestoreArray( diag, &data_arr ); CHKERRQ(ierr); 486 ierr = VecDestroy( &diag ); CHKERRQ(ierr); 487 ierr = MatAssemblyBegin(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 488 ierr = MatAssemblyEnd(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 489 } 490 } 491 else{ 492 break; 493 } 494 data = coarse_data; 495 } 496 ierr = PetscFree( coarse_data ); CHKERRQ( ierr ); 497 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s %d levels\n",0,__FUNCT__,level + 1); 498 pc_gamg->m_data = 0; /* destroyed coordinate data */ 499 pc_gamg->m_Nlevels = level + 1; 500 fine_level = level; 501 ierr = PCMGSetLevels(a_pc,pc_gamg->m_Nlevels,PETSC_NULL);CHKERRQ(ierr); 502 503 /* set default smoothers */ 504 for ( lidx=1, level = pc_gamg->m_Nlevels-2; 505 lidx <= fine_level; 506 lidx++, level--) { 507 PetscReal emax, emin; 508 KSP smoother; PC subpc; 509 ierr = PCMGGetSmoother( a_pc, lidx, &smoother ); CHKERRQ(ierr); 510 ierr = KSPSetType( smoother, KSPCHEBYCHEV );CHKERRQ(ierr); 511 if( emaxs[level] > 0.0 ) emax = emaxs[level]; 512 else{ /* eigen estimate 'emax' */ 513 KSP eksp; Mat Lmat = Aarr[level]; 514 Vec bb, xx; PC pc; 515 516 ierr = MatGetVecs( Lmat, &bb, 0 ); CHKERRQ(ierr); 517 ierr = MatGetVecs( Lmat, &xx, 0 ); CHKERRQ(ierr); 518 { 519 PetscRandom rctx; 520 ierr = PetscRandomCreate(wcomm,&rctx);CHKERRQ(ierr); 521 ierr = PetscRandomSetFromOptions(rctx);CHKERRQ(ierr); 522 ierr = VecSetRandom(bb,rctx);CHKERRQ(ierr); 523 ierr = PetscRandomDestroy( &rctx ); CHKERRQ(ierr); 524 } 525 ierr = KSPCreate(wcomm,&eksp);CHKERRQ(ierr); 526 ierr = KSPSetType( eksp, KSPCG ); CHKERRQ(ierr); 527 ierr = KSPSetInitialGuessNonzero( eksp, PETSC_FALSE ); CHKERRQ(ierr); 528 ierr = KSPSetOperators( eksp, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); CHKERRQ( ierr ); 529 ierr = KSPGetPC( eksp, &pc );CHKERRQ( ierr ); 530 ierr = PCSetType( pc, PETSC_GAMG_SMOOTHER ); CHKERRQ(ierr); /* should be same as above */ 531 ierr = KSPSetTolerances( eksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 10 ); 532 CHKERRQ(ierr); 533 //ierr = KSPSetConvergenceTest( eksp, KSPSkipConverged, 0, 0 ); CHKERRQ(ierr); 534 ierr = KSPSetNormType( eksp, KSP_NORM_NONE ); CHKERRQ(ierr); 535 536 ierr = KSPSetComputeSingularValues( eksp,PETSC_TRUE ); CHKERRQ(ierr); 537 ierr = KSPSolve( eksp, bb, xx ); CHKERRQ(ierr); 538 ierr = KSPComputeExtremeSingularValues( eksp, &emax, &emin ); CHKERRQ(ierr); 539 ierr = VecDestroy( &xx ); CHKERRQ(ierr); 540 ierr = VecDestroy( &bb ); CHKERRQ(ierr); 541 ierr = KSPDestroy( &eksp ); CHKERRQ(ierr); 542 PetscPrintf(PETSC_COMM_WORLD,"%s max eigen = %e min = %e\n",__FUNCT__,emax,emin); 543 } 544 { 545 PetscInt N1, N0, tt; 546 ierr = MatGetSize( Aarr[level], &N1, &tt ); CHKERRQ(ierr); 547 ierr = MatGetSize( Aarr[level+1], &N0, &tt ); CHKERRQ(ierr); 548 emin = .5*emax/((PetscReal)N1/(PetscReal)N0); /* this should be about the coarsening rate */ 549 emax *= 1.05; 550 } 551 552 ierr = KSPSetOperators( smoother, Aarr[level], Aarr[level], DIFFERENT_NONZERO_PATTERN ); 553 ierr = KSPChebychevSetEigenvalues( smoother, emax, emin );CHKERRQ(ierr); 554 ierr = KSPSetTolerances(smoother,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,2); CHKERRQ(ierr); 555 ierr = KSPGetPC( smoother, &subpc ); CHKERRQ(ierr); 556 ierr = PCSetType( subpc, PETSC_GAMG_SMOOTHER ); CHKERRQ(ierr); 557 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 558 } 559 { 560 KSP smoother; /* coarse grid */ 561 Mat Lmat = Aarr[pc_gamg->m_Nlevels-1]; 562 ierr = PCMGGetSmoother( a_pc, 0, &smoother ); CHKERRQ(ierr); 563 ierr = KSPSetOperators( smoother, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); 564 CHKERRQ(ierr); 565 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 566 } 567 /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */ 568 ierr = PCSetFromOptions_MG(a_pc); CHKERRQ(ierr); 569 { 570 PetscBool galerkin; 571 ierr = PCMGGetGalerkin( a_pc, &galerkin); CHKERRQ(ierr); 572 if(galerkin){ 573 SETERRQ(wcomm,PETSC_ERR_ARG_WRONG, "GAMG does galerkin manually so it must not be used in PC_MG."); 574 } 575 } 576 577 /* set interpolation between the levels, create timer stages, clean up */ 578 if( PETSC_FALSE ) { 579 char str[32]; 580 sprintf(str,"MG Level %d (%d)",0,pc_gamg->m_Nlevels-1); 581 PetscLogStageRegister(str, &gamg_stages[fine_level]); 582 } 583 for (lidx=0,level=pc_gamg->m_Nlevels-1; 584 lidx<fine_level; 585 lidx++, level--){ 586 ierr = PCMGSetInterpolation( a_pc, lidx+1, Parr[level] );CHKERRQ(ierr); 587 if( !PETSC_TRUE ) { 588 PetscViewer viewer; char fname[32]; 589 sprintf(fname,"Pmat_%d.m",level); 590 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 591 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 592 ierr = MatView( Parr[level], viewer ); CHKERRQ(ierr); 593 ierr = PetscViewerDestroy( &viewer ); 594 sprintf(fname,"Amat_%d.m",level); 595 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 596 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 597 ierr = MatView( Aarr[level], viewer ); CHKERRQ(ierr); 598 ierr = PetscViewerDestroy( &viewer ); 599 } 600 ierr = MatDestroy( &Parr[level] ); CHKERRQ(ierr); 601 ierr = MatDestroy( &Aarr[level] ); CHKERRQ(ierr); 602 if( PETSC_FALSE ) { 603 char str[32]; 604 sprintf(str,"MG Level %d (%d)",lidx+1,level-1); 605 PetscLogStageRegister(str, &gamg_stages[level-1]); 606 } 607 } 608 609 /* setupcalled is set to 0 so that MG is setup from scratch */ 610 a_pc->setupcalled = 0; 611 ierr = PCSetUp_MG(a_pc);CHKERRQ(ierr); 612 613 PetscFunctionReturn(0); 614 } 615 616 /* ------------------------------------------------------------------------- */ 617 /* 618 PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner 619 that was created with PCCreate_GAMG(). 620 621 Input Parameter: 622 . pc - the preconditioner context 623 624 Application Interface Routine: PCDestroy() 625 */ 626 #undef __FUNCT__ 627 #define __FUNCT__ "PCDestroy_GAMG" 628 PetscErrorCode PCDestroy_GAMG(PC pc) 629 { 630 PetscErrorCode ierr; 631 PC_MG *mg = (PC_MG*)pc->data; 632 PC_GAMG *pc_gamg= (PC_GAMG*)mg->innerctx; 633 634 PetscFunctionBegin; 635 ierr = PCReset_GAMG(pc);CHKERRQ(ierr); 636 ierr = PetscFree(pc_gamg);CHKERRQ(ierr); 637 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 638 PetscFunctionReturn(0); 639 } 640 641 #undef __FUNCT__ 642 #define __FUNCT__ "PCSetFromOptions_GAMG" 643 PetscErrorCode PCSetFromOptions_GAMG(PC pc) 644 { 645 /* PetscErrorCode ierr; */ 646 /* PC_MG *mg = (PC_MG*)pc->data; */ 647 /* PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; */ 648 /* MPI_Comm comm = ((PetscObject)pc)->comm; */ 649 650 PetscFunctionBegin; 651 PetscFunctionReturn(0); 652 } 653 654 /* -------------------------------------------------------------------------- */ 655 /* 656 PCCreate_GAMG - Creates a GAMG preconditioner context, PC_GAMG 657 658 Input Parameter: 659 . pc - the preconditioner context 660 661 Application Interface Routine: PCCreate() 662 663 */ 664 /* MC 665 PCGAMG - Use algebraic multigrid preconditioning. This preconditioner requires you provide 666 fine grid discretization matrix and coordinates on the fine grid. 667 668 Options Database Key: 669 Multigrid options(inherited) 670 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 671 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 672 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 673 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 674 GAMG options: 675 676 Level: intermediate 677 Concepts: multigrid 678 679 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 680 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 681 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 682 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 683 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 684 M */ 685 686 EXTERN_C_BEGIN 687 #undef __FUNCT__ 688 #define __FUNCT__ "PCCreate_GAMG" 689 PetscErrorCode PCCreate_GAMG(PC pc) 690 { 691 PetscErrorCode ierr; 692 PC_GAMG *pc_gamg; 693 PC_MG *mg; 694 PetscClassId cookie; 695 696 PetscFunctionBegin; 697 /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */ 698 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 699 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCGAMG);CHKERRQ(ierr); 700 701 /* create a supporting struct and attach it to pc */ 702 ierr = PetscNewLog(pc,PC_GAMG,&pc_gamg);CHKERRQ(ierr); 703 mg = (PC_MG*)pc->data; 704 mg->innerctx = pc_gamg; 705 706 pc_gamg->m_Nlevels = -1; 707 708 /* overwrite the pointers of PCMG by the functions of PCGAMG */ 709 pc->ops->setfromoptions = PCSetFromOptions_GAMG; 710 pc->ops->setup = PCSetUp_GAMG; 711 pc->ops->reset = PCReset_GAMG; 712 pc->ops->destroy = PCDestroy_GAMG; 713 714 ierr = PetscObjectComposeFunctionDynamic( (PetscObject)pc, 715 "PCSetCoordinates_C", 716 "PCSetCoordinates_GAMG", 717 PCSetCoordinates_GAMG);CHKERRQ(ierr); 718 719 PetscClassIdRegister("GAMG Setup",&cookie); 720 PetscLogEventRegister("GAMG-createProl", cookie, &gamg_setup_stages[SET1]); 721 PetscLogEventRegister("GAMG-partLevel", cookie, &gamg_setup_stages[SET2]); 722 PetscLogEventRegister("GAMG-MIS Graph", cookie, &gamg_setup_stages[SET3]); 723 PetscLogEventRegister("GAMG-MIS-Agg", cookie, &gamg_setup_stages[SET4]); 724 PetscLogEventRegister("GAMG-growSupp", cookie, &gamg_setup_stages[SET5]); 725 PetscLogEventRegister("GAMG-tri-Prol", cookie, &gamg_setup_stages[SET6]); 726 PetscLogEventRegister("GAMG-sa-init", cookie, &gamg_setup_stages[SET7]); 727 PetscLogEventRegister("GAMG-sa-frmProl0", cookie, &gamg_setup_stages[SET8]); 728 PetscLogEventRegister("GAMG-sa-smooth", cookie, &gamg_setup_stages[SET9]); 729 730 PetscFunctionReturn(0); 731 } 732 EXTERN_C_END 733