1 /* 2 GAMG geometric-algebric multiogrid PC - Mark Adams 2011 3 */ 4 #include <../src/ksp/pc/impls/gamg/gamg.h> 5 6 PetscLogEvent gamg_setup_stages[NUM_SET]; 7 8 /* Private context for the GAMG preconditioner */ 9 typedef struct gamg_TAG { 10 PetscInt m_dim; 11 PetscInt m_Nlevels; 12 PetscInt m_data_sz; 13 PetscInt m_data_rows; 14 PetscInt m_data_cols; 15 PetscBool m_useSA; 16 PetscReal *m_data; /* blocked vector of vertex data on fine grid (coordinates) */ 17 } PC_GAMG; 18 19 /* -------------------------------------------------------------------------- */ 20 /* 21 PCSetCoordinates_GAMG 22 23 Input Parameter: 24 . pc - the preconditioner context 25 */ 26 EXTERN_C_BEGIN 27 #undef __FUNCT__ 28 #define __FUNCT__ "PCSetCoordinates_GAMG" 29 PetscErrorCode PCSetCoordinates_GAMG( PC a_pc, PetscInt a_ndm, PetscReal *a_coords ) 30 { 31 PC_MG *mg = (PC_MG*)a_pc->data; 32 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 33 PetscErrorCode ierr; 34 PetscInt arrsz,bs,my0,kk,ii,jj,nloc,Iend; 35 Mat Amat = a_pc->pmat; 36 PetscBool flag; 37 char str[64]; 38 39 PetscFunctionBegin; 40 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ( ierr ); 41 ierr = MatGetOwnershipRange( Amat, &my0, &Iend ); CHKERRQ(ierr); 42 nloc = (Iend-my0)/bs; 43 if((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 44 45 ierr = PetscOptionsGetString(PETSC_NULL,"-pc_gamg_type",str,64,&flag); CHKERRQ( ierr ); 46 pc_gamg->m_useSA = (PetscBool)(flag && strcmp(str,"sa") == 0); 47 48 pc_gamg->m_data_rows = 1; 49 if(a_coords == 0) pc_gamg->m_useSA = PETSC_TRUE; /* use SA if no data */ 50 if( !pc_gamg->m_useSA ) pc_gamg->m_data_cols = a_ndm; /* coordinates */ 51 else{ /* SA: null space vectors */ 52 if(a_coords != 0 && bs==1 ) pc_gamg->m_data_cols = 1; /* scalar w/ coords and SA (not needed) */ 53 else if(a_coords != 0) pc_gamg->m_data_cols = (a_ndm==2 ? 3 : 6); /* elasticity */ 54 else pc_gamg->m_data_cols = bs; /* no data, force SA with constant null space vectors */ 55 pc_gamg->m_data_rows = bs; 56 } 57 arrsz = nloc*pc_gamg->m_data_rows*pc_gamg->m_data_cols; 58 59 /* create data - syntactic sugar that should be refactored at some point */ 60 if (!pc_gamg->m_data || (pc_gamg->m_data_sz != arrsz)) { 61 ierr = PetscFree( pc_gamg->m_data ); CHKERRQ(ierr); 62 ierr = PetscMalloc((arrsz+1)*sizeof(double), &pc_gamg->m_data ); CHKERRQ(ierr); 63 } 64 for(kk=0;kk<arrsz;kk++)pc_gamg->m_data[kk] = -999.; 65 pc_gamg->m_data[arrsz] = -99.; 66 /* copy data in - column oriented */ 67 if( pc_gamg->m_useSA ) { 68 const PetscInt M = Iend - my0; 69 for(kk=0;kk<nloc;kk++){ 70 PetscReal *data = &pc_gamg->m_data[kk*bs]; 71 if( pc_gamg->m_data_cols==1 ) *data = 1.0; 72 else { 73 for(ii=0;ii<bs;ii++) 74 for(jj=0;jj<bs;jj++) 75 if(ii==jj)data[ii*M + jj] = 1.0; /* translational modes */ 76 else data[ii*M + jj] = 0.0; 77 if( a_coords != 0 ) { 78 if( a_ndm == 2 ){ /* rotational modes */ 79 data += 2*M; 80 data[0] = -a_coords[2*kk+1]; 81 data[1] = a_coords[2*kk]; 82 } 83 else { 84 data += 3*M; 85 data[0] = 0.0; data[M+0] = a_coords[3*kk+2]; data[2*M+0] = -a_coords[3*kk+1]; 86 data[1] = -a_coords[3*kk+2]; data[M+1] = 0.0; data[2*M+1] = a_coords[3*kk]; 87 data[2] = a_coords[3*kk+1]; data[M+2] = -a_coords[3*kk]; data[2*M+2] = 0.0; 88 } 89 } 90 } 91 } 92 } 93 else { 94 for( kk = 0 ; kk < nloc ; kk++ ){ 95 for( ii = 0 ; ii < a_ndm ; ii++ ) { 96 pc_gamg->m_data[ii*nloc + kk] = a_coords[kk*a_ndm + ii]; 97 } 98 } 99 } 100 assert(pc_gamg->m_data[arrsz] == -99.); 101 102 pc_gamg->m_data_sz = arrsz; 103 pc_gamg->m_dim = a_ndm; 104 105 PetscFunctionReturn(0); 106 } 107 EXTERN_C_END 108 109 110 /* -----------------------------------------------------------------------------*/ 111 #undef __FUNCT__ 112 #define __FUNCT__ "PCReset_GAMG" 113 PetscErrorCode PCReset_GAMG(PC pc) 114 { 115 PetscErrorCode ierr; 116 PC_MG *mg = (PC_MG*)pc->data; 117 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 118 119 PetscFunctionBegin; 120 ierr = PetscFree(pc_gamg->m_data);CHKERRQ(ierr); 121 pc_gamg->m_data = 0; pc_gamg->m_data_sz = 0; 122 PetscFunctionReturn(0); 123 } 124 125 /* -------------------------------------------------------------------------- */ 126 /* 127 partitionLevel 128 129 Input Parameter: 130 . a_Amat_fine - matrix on this fine (k) level 131 . a_ndata_rows - size of data to move (coarse grid) 132 . a_ndata_cols - size of data to move (coarse grid) 133 In/Output Parameter: 134 . a_P_inout - prolongation operator to the next level (k-1) 135 . a_coarse_data - data that need to be moved 136 . a_nactive_proc - number of active procs 137 Output Parameter: 138 . a_Amat_crs - coarse matrix that is created (k-1) 139 */ 140 141 #define MIN_EQ_PROC 300 142 #define TOP_GRID_LIM 1000 143 144 #undef __FUNCT__ 145 #define __FUNCT__ "partitionLevel" 146 PetscErrorCode partitionLevel( Mat a_Amat_fine, 147 PetscInt a_ndata_rows, 148 PetscInt a_ndata_cols, 149 PetscInt a_cbs, 150 Mat *a_P_inout, 151 PetscReal **a_coarse_data, 152 PetscMPIInt *a_nactive_proc, 153 Mat *a_Amat_crs 154 ) 155 { 156 PetscErrorCode ierr; 157 Mat Cmat,Pnew,Pold=*a_P_inout; 158 IS new_indices,isnum; 159 MPI_Comm wcomm = ((PetscObject)a_Amat_fine)->comm; 160 PetscMPIInt mype,npe; 161 PetscInt neq,NN,Istart,Iend,Istart0,Iend0,ncrs_new; 162 PetscMPIInt new_npe,nactive,ncrs0; 163 PetscBool flag = PETSC_FALSE; 164 165 PetscFunctionBegin; 166 ierr = MPI_Comm_rank( wcomm, &mype ); CHKERRQ(ierr); 167 ierr = MPI_Comm_size( wcomm, &npe ); CHKERRQ(ierr); 168 /* RAP */ 169 ierr = MatPtAP( a_Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat ); CHKERRQ(ierr); 170 171 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 172 ierr = MatGetOwnershipRange( Cmat, &Istart0, &Iend0 ); CHKERRQ(ierr); 173 ncrs0 = (Iend0-Istart0)/a_cbs; assert((Iend0-Istart0)%a_cbs == 0); 174 175 ierr = PetscOptionsHasName(PETSC_NULL,"-pc_gamg_avoid_repartitioning",&flag); 176 CHKERRQ( ierr ); 177 if( flag ) { 178 *a_Amat_crs = Cmat; /* output */ 179 } 180 else { 181 /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates accordingly */ 182 MatPartitioning mpart; 183 Mat adj; 184 const PetscInt *idx,data_sz=a_ndata_rows*a_ndata_cols; 185 const PetscInt stride0=ncrs0*a_ndata_rows,*is_idx; 186 PetscInt is_sz,*isnewproc_idx,ii,jj,kk,strideNew,*tidx; 187 /* create sub communicator */ 188 MPI_Comm cm,new_comm; 189 IS isnewproc; 190 MPI_Group wg, g2; 191 PetscMPIInt *ranks,*counts; 192 IS isscat; 193 PetscScalar *array; 194 Vec src_crd, dest_crd; 195 PetscReal *data = *a_coarse_data; 196 VecScatter vecscat; 197 198 /* get number of PEs to make active, reduce */ 199 ierr = MatGetSize( Cmat, &neq, &NN );CHKERRQ(ierr); 200 new_npe = neq/MIN_EQ_PROC; /* hardwire min. number of eq/proc */ 201 if( new_npe == 0 || neq < TOP_GRID_LIM ) new_npe = 1; 202 else if (new_npe >= *a_nactive_proc ) new_npe = *a_nactive_proc; /* no change, rare */ 203 204 ierr = PetscMalloc( npe*sizeof(PetscMPIInt), &ranks ); CHKERRQ(ierr); 205 ierr = PetscMalloc( npe*sizeof(PetscMPIInt), &counts ); CHKERRQ(ierr); 206 207 ierr = MPI_Allgather( &ncrs0, 1, MPI_INT, counts, 1, MPI_INT, wcomm ); CHKERRQ(ierr); 208 assert(counts[mype]==ncrs0); 209 /* count real active pes */ 210 for( nactive = jj = 0 ; jj < npe ; jj++) { 211 if( counts[jj] != 0 ) { 212 ranks[nactive++] = jj; 213 } 214 } 215 assert(nactive>=new_npe); 216 217 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s npe (active): %d --> %d. new npe = %d, neq = %d\n",mype,__FUNCT__,*a_nactive_proc,nactive,new_npe,neq); 218 *a_nactive_proc = new_npe; /* output */ 219 220 ierr = MPI_Comm_group( wcomm, &wg ); CHKERRQ(ierr); 221 ierr = MPI_Group_incl( wg, nactive, ranks, &g2 ); CHKERRQ(ierr); 222 ierr = MPI_Comm_create( wcomm, g2, &cm ); CHKERRQ(ierr); 223 if( cm != MPI_COMM_NULL ) { 224 ierr = PetscCommDuplicate( cm, &new_comm, PETSC_NULL ); CHKERRQ(ierr); 225 ierr = MPI_Comm_free( &cm ); CHKERRQ(ierr); 226 } 227 ierr = MPI_Group_free( &wg ); CHKERRQ(ierr); 228 ierr = MPI_Group_free( &g2 ); CHKERRQ(ierr); 229 230 /* MatPartitioningApply call MatConvert, which is collective */ 231 ierr = PetscLogEventBegin(gamg_setup_stages[SET12],0,0,0,0);CHKERRQ(ierr); 232 if( a_cbs == 1) { 233 ierr = MatConvert( Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 234 } 235 else{ 236 /* make a scalar matrix to partition */ 237 Mat tMat; 238 PetscInt ncols; const PetscScalar *vals; const PetscInt *idx; 239 MatInfo info; 240 ierr = MatGetInfo(Cmat,MAT_LOCAL,&info); CHKERRQ(ierr); 241 ncols = (PetscInt)info.nz_used/((ncrs0+1)*a_cbs*a_cbs)+1; 242 243 ierr = MatCreateMPIAIJ( wcomm, ncrs0, ncrs0, 244 PETSC_DETERMINE, PETSC_DETERMINE, 245 2*ncols, PETSC_NULL, ncols, PETSC_NULL, 246 &tMat ); 247 CHKERRQ(ierr); 248 249 for ( ii = Istart0; ii < Iend0; ii++ ) { 250 PetscInt dest_row = ii/a_cbs; 251 ierr = MatGetRow(Cmat,ii,&ncols,&idx,&vals); CHKERRQ(ierr); 252 for( jj = 0 ; jj < ncols ; jj++ ){ 253 PetscInt dest_col = idx[jj]/a_cbs; 254 PetscScalar v = 1.0; 255 ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES); CHKERRQ(ierr); 256 } 257 ierr = MatRestoreRow(Cmat,ii,&ncols,&idx,&vals); CHKERRQ(ierr); 258 } 259 ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 260 ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 261 262 ierr = MatConvert( tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 263 264 ierr = MatDestroy( &tMat ); CHKERRQ(ierr); 265 } 266 267 if( ncrs0 != 0 ){ 268 /* hack to fix global data that pmetis.c uses in 'adj' */ 269 for( nactive = jj = 0 ; jj < npe ; jj++ ) { 270 if( counts[jj] != 0 ) { 271 adj->rmap->range[nactive++] = adj->rmap->range[jj]; 272 } 273 } 274 adj->rmap->range[nactive] = adj->rmap->range[npe]; 275 276 ierr = MatPartitioningCreate( new_comm, &mpart ); CHKERRQ(ierr); 277 ierr = MatPartitioningSetAdjacency( mpart, adj ); CHKERRQ(ierr); 278 ierr = MatPartitioningSetFromOptions( mpart ); CHKERRQ(ierr); 279 ierr = MatPartitioningSetNParts( mpart, new_npe );CHKERRQ(ierr); 280 ierr = MatPartitioningApply( mpart, &isnewproc ); CHKERRQ(ierr); 281 ierr = MatPartitioningDestroy( &mpart ); CHKERRQ(ierr); 282 283 /* collect IS info */ 284 ierr = ISGetLocalSize( isnewproc, &is_sz ); CHKERRQ(ierr); 285 ierr = PetscMalloc( a_cbs*is_sz*sizeof(PetscInt), &isnewproc_idx ); CHKERRQ(ierr); 286 ierr = ISGetIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 287 /* spread partitioning across machine - probably the right thing to do but machine spec. */ 288 NN = npe/new_npe; 289 for(kk=0,jj=0;kk<is_sz;kk++){ 290 for(ii=0 ; ii<a_cbs ; ii++, jj++ ) { 291 isnewproc_idx[jj] = is_idx[kk] * NN; /* distribution */ 292 } 293 } 294 ierr = ISRestoreIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 295 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 296 is_sz *= a_cbs; 297 298 ierr = PetscCommDestroy( &new_comm ); CHKERRQ(ierr); 299 } 300 else{ 301 isnewproc_idx = 0; 302 is_sz = 0; 303 } 304 ierr = MatDestroy( &adj ); CHKERRQ(ierr); 305 ierr = ISCreateGeneral( wcomm, is_sz, isnewproc_idx, PETSC_COPY_VALUES, &isnewproc ); 306 if( isnewproc_idx != 0 ) { 307 ierr = PetscFree( isnewproc_idx ); CHKERRQ(ierr); 308 } 309 310 /* 311 Create an index set from the isnewproc index set to indicate the mapping TO 312 */ 313 ierr = ISPartitioningToNumbering( isnewproc, &isnum ); CHKERRQ(ierr); 314 /* 315 Determine how many elements are assigned to each processor 316 */ 317 ierr = ISPartitioningCount( isnewproc, npe, counts ); CHKERRQ(ierr); 318 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 319 ncrs_new = counts[mype]/a_cbs; 320 strideNew = ncrs_new*a_ndata_rows; 321 ierr = PetscLogEventEnd(gamg_setup_stages[SET12],0,0,0,0); CHKERRQ(ierr); 322 323 /* Create a vector to contain the newly ordered element information */ 324 ierr = VecCreate( wcomm, &dest_crd ); 325 ierr = VecSetSizes( dest_crd, data_sz*ncrs_new, PETSC_DECIDE ); CHKERRQ(ierr); 326 ierr = VecSetFromOptions( dest_crd ); CHKERRQ(ierr); /*funny vector-get global options?*/ 327 /* 328 There are 'a_ndata_rows*a_ndata_cols' data items per node, (one can think of the vectors of having 329 a block size of ...). Note, ISs are expanded into equation space by 'a_cbs'. 330 */ 331 ierr = PetscMalloc( (ncrs0*data_sz)*sizeof(PetscInt), &tidx ); CHKERRQ(ierr); 332 ierr = ISGetIndices( isnum, &idx ); CHKERRQ(ierr); 333 for(ii=0,jj=0; ii<ncrs0 ; ii++) { 334 PetscInt id = idx[ii*a_cbs]/a_cbs; /* get node back */ 335 for( kk=0; kk<data_sz ; kk++, jj++) tidx[jj] = id*data_sz + kk; 336 } 337 ierr = ISRestoreIndices( isnum, &idx ); CHKERRQ(ierr); 338 ierr = ISCreateGeneral( wcomm, data_sz*ncrs0, tidx, PETSC_COPY_VALUES, &isscat ); 339 CHKERRQ(ierr); 340 ierr = PetscFree( tidx ); CHKERRQ(ierr); 341 /* 342 Create a vector to contain the original vertex information for each element 343 */ 344 ierr = VecCreateSeq( PETSC_COMM_SELF, data_sz*ncrs0, &src_crd ); CHKERRQ(ierr); 345 for( jj=0; jj<a_ndata_cols ; jj++ ) { 346 for( ii=0 ; ii<ncrs0 ; ii++) { 347 for( kk=0; kk<a_ndata_rows ; kk++ ) { 348 PetscInt ix = ii*a_ndata_rows + kk + jj*stride0, jx = ii*data_sz + kk*a_ndata_cols + jj; 349 ierr = VecSetValues( src_crd, 1, &jx, &data[ix], INSERT_VALUES ); CHKERRQ(ierr); 350 } 351 } 352 } 353 ierr = VecAssemblyBegin(src_crd); CHKERRQ(ierr); 354 ierr = VecAssemblyEnd(src_crd); CHKERRQ(ierr); 355 /* 356 Scatter the element vertex information (still in the original vertex ordering) 357 to the correct processor 358 */ 359 ierr = VecScatterCreate( src_crd, PETSC_NULL, dest_crd, isscat, &vecscat); 360 CHKERRQ(ierr); 361 ierr = ISDestroy( &isscat ); CHKERRQ(ierr); 362 ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 363 ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 364 ierr = VecScatterDestroy( &vecscat ); CHKERRQ(ierr); 365 ierr = VecDestroy( &src_crd ); CHKERRQ(ierr); 366 /* 367 Put the element vertex data into a new allocation of the gdata->ele 368 */ 369 ierr = PetscFree( *a_coarse_data ); CHKERRQ(ierr); 370 ierr = PetscMalloc( data_sz*ncrs_new*sizeof(PetscReal), a_coarse_data ); CHKERRQ(ierr); 371 372 ierr = VecGetArray( dest_crd, &array ); CHKERRQ(ierr); 373 data = *a_coarse_data; 374 for( jj=0; jj<a_ndata_cols ; jj++ ) { 375 for( ii=0 ; ii<ncrs_new ; ii++) { 376 for( kk=0; kk<a_ndata_rows ; kk++ ) { 377 PetscInt ix = ii*a_ndata_rows + kk + jj*strideNew, jx = ii*data_sz + kk*a_ndata_cols + jj; 378 data[ix] = PetscRealPart(array[jx]); 379 array[jx] = 1.e300; 380 } 381 } 382 } 383 ierr = VecRestoreArray( dest_crd, &array ); CHKERRQ(ierr); 384 ierr = VecDestroy( &dest_crd ); CHKERRQ(ierr); 385 386 /* 387 Invert for MatGetSubMatrix 388 */ 389 ierr = ISInvertPermutation( isnum, ncrs_new*a_cbs, &new_indices ); CHKERRQ(ierr); 390 ierr = ISSort( new_indices ); CHKERRQ(ierr); /* is this needed? */ 391 ierr = ISDestroy( &isnum ); CHKERRQ(ierr); 392 /* A_crs output */ 393 ierr = MatGetSubMatrix( Cmat, new_indices, new_indices, MAT_INITIAL_MATRIX, a_Amat_crs ); 394 CHKERRQ(ierr); 395 396 ierr = MatDestroy( &Cmat ); CHKERRQ(ierr); 397 Cmat = *a_Amat_crs; /* output */ 398 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 399 400 /* prolongator */ 401 ierr = MatGetOwnershipRange( Pold, &Istart, &Iend ); CHKERRQ(ierr); 402 { 403 IS findices; 404 ierr = ISCreateStride(wcomm,Iend-Istart,Istart,1,&findices); CHKERRQ(ierr); 405 ierr = MatGetSubMatrix( Pold, findices, new_indices, MAT_INITIAL_MATRIX, &Pnew ); 406 CHKERRQ(ierr); 407 ierr = ISDestroy( &findices ); CHKERRQ(ierr); 408 } 409 ierr = MatDestroy( a_P_inout ); CHKERRQ(ierr); 410 *a_P_inout = Pnew; /* output */ 411 412 ierr = ISDestroy( &new_indices ); CHKERRQ(ierr); 413 ierr = PetscFree( counts ); CHKERRQ(ierr); 414 ierr = PetscFree( ranks ); CHKERRQ(ierr); 415 } 416 417 PetscFunctionReturn(0); 418 } 419 420 #define GAMG_MAXLEVELS 30 421 #if defined(PETSC_USE_LOG) 422 PetscLogStage gamg_stages[20]; 423 #endif 424 /* -------------------------------------------------------------------------- */ 425 /* 426 PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner 427 by setting data structures and options. 428 429 Input Parameter: 430 . pc - the preconditioner context 431 432 Application Interface Routine: PCSetUp() 433 434 Notes: 435 The interface routine PCSetUp() is not usually called directly by 436 the user, but instead is called by PCApply() if necessary. 437 */ 438 #undef __FUNCT__ 439 #define __FUNCT__ "PCSetUp_GAMG" 440 PetscErrorCode PCSetUp_GAMG( PC a_pc ) 441 { 442 PetscErrorCode ierr; 443 PC_MG *mg = (PC_MG*)a_pc->data; 444 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 445 Mat Amat = a_pc->mat, Pmat = a_pc->pmat; 446 PetscInt fine_level, level, level1, M, N, bs, nloc, lidx, Istart, Iend; 447 MPI_Comm wcomm = ((PetscObject)a_pc)->comm; 448 PetscMPIInt mype,npe,nactivepe; 449 PetscBool isOK; 450 Mat Aarr[GAMG_MAXLEVELS], Parr[GAMG_MAXLEVELS]; 451 PetscReal *coarse_data = 0, *data, emaxs[GAMG_MAXLEVELS]; 452 MatInfo info; 453 454 PetscFunctionBegin; 455 if( a_pc->setupcalled ) { 456 /* no state data in GAMG to destroy */ 457 ierr = PCReset_MG( a_pc ); CHKERRQ(ierr); 458 } 459 ierr = MPI_Comm_rank(wcomm,&mype);CHKERRQ(ierr); 460 ierr = MPI_Comm_size(wcomm,&npe);CHKERRQ(ierr); 461 /* GAMG requires input of fine-grid matrix. It determines nlevels. */ 462 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ(ierr); 463 ierr = MatGetSize( Amat, &M, &N );CHKERRQ(ierr); 464 ierr = MatGetOwnershipRange( Amat, &Istart, &Iend ); CHKERRQ(ierr); 465 nloc = (Iend-Istart)/bs; assert((Iend-Istart)%bs == 0); 466 467 /* get data of not around */ 468 if( pc_gamg->m_data == 0 && nloc > 0 ) { 469 ierr = PCSetCoordinates_GAMG( a_pc, -1, 0 ); CHKERRQ( ierr ); 470 } 471 data = pc_gamg->m_data; 472 473 /* Get A_i and R_i */ 474 ierr = MatGetInfo(Amat,MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 475 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s level %d N=%d, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n", 476 mype,__FUNCT__,0,N,pc_gamg->m_data_rows,pc_gamg->m_data_cols,(PetscInt)(info.nz_used/(PetscReal)N),npe); 477 for ( level=0, Aarr[0] = Pmat, nactivepe = npe; /* hard wired stopping logic */ 478 level < GAMG_MAXLEVELS-1 && (level==0 || M>TOP_GRID_LIM) && (npe==1 || nactivepe>1); 479 level++ ){ 480 level1 = level + 1; 481 ierr = PetscLogEventBegin(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 482 ierr = createProlongation(Aarr[level], data, pc_gamg->m_dim, pc_gamg->m_data_cols, pc_gamg->m_useSA, 483 level, &bs, &Parr[level1], &coarse_data, &isOK, &emaxs[level] ); 484 CHKERRQ(ierr); 485 ierr = PetscFree( data ); CHKERRQ( ierr ); 486 ierr = PetscLogEventEnd(gamg_setup_stages[SET1],0,0,0,0);CHKERRQ(ierr); 487 488 if(level==0) Aarr[0] = Amat; /* use Pmat for finest level setup, but use mat for solver */ 489 490 if( isOK ) { 491 ierr = PetscLogEventBegin(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 492 ierr = partitionLevel( Aarr[level], pc_gamg->m_useSA ? bs : 1, pc_gamg->m_data_cols, bs, 493 &Parr[level1], &coarse_data, &nactivepe, &Aarr[level1] ); 494 CHKERRQ(ierr); 495 ierr = PetscLogEventEnd(gamg_setup_stages[SET2],0,0,0,0);CHKERRQ(ierr); 496 ierr = MatGetSize( Aarr[level1], &M, &N );CHKERRQ(ierr); 497 ierr = MatGetInfo(Aarr[level1],MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 498 PetscPrintf(PETSC_COMM_WORLD,"\t\t[%d]%s %d) N=%d, bs=%d, n data cols=%d, nnz/row (ave)=%d, %d active pes\n", 499 mype,__FUNCT__,level1,N,bs,pc_gamg->m_data_cols,(PetscInt)(info.nz_used/(PetscReal)N),nactivepe); 500 /* coarse grids with SA can have zero row/cols from singleton aggregates */ 501 /* aggregation method can probably gaurrentee this does not happen! - be safe for now */ 502 503 if( PETSC_TRUE ){ 504 Vec diag; PetscScalar *data_arr,v; PetscInt Istart,Iend,kk,nloceq,id; 505 v = 1.e-10; /* LU factor has hard wired numbers for small diags so this needs to match (yuk) */ 506 ierr = MatGetOwnershipRange(Aarr[level1], &Istart, &Iend); CHKERRQ(ierr); 507 nloceq = Iend-Istart; 508 ierr = MatGetVecs( Aarr[level1], &diag, 0 ); CHKERRQ(ierr); 509 ierr = MatGetDiagonal( Aarr[level1], diag ); CHKERRQ(ierr); 510 ierr = VecGetArray( diag, &data_arr ); CHKERRQ(ierr); 511 for(kk=0;kk<nloceq;kk++){ 512 if(data_arr[kk]==0.0) { 513 id = kk + Istart; 514 ierr = MatSetValues(Aarr[level1],1,&id,1,&id,&v,INSERT_VALUES); 515 CHKERRQ(ierr); 516 PetscPrintf(PETSC_COMM_SELF,"\t[%d]%s warning: added diag to zero (%d) on level %d \n",mype,__FUNCT__,id,level); 517 } 518 } 519 ierr = VecRestoreArray( diag, &data_arr ); CHKERRQ(ierr); 520 ierr = VecDestroy( &diag ); CHKERRQ(ierr); 521 ierr = MatAssemblyBegin(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 522 ierr = MatAssemblyEnd(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 523 } 524 } 525 else{ 526 coarse_data = 0; 527 break; 528 } 529 data = coarse_data; 530 } 531 if( coarse_data ) { 532 ierr = PetscFree( coarse_data ); CHKERRQ( ierr ); 533 } 534 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s %d levels\n",0,__FUNCT__,level + 1); 535 pc_gamg->m_data = 0; /* destroyed coordinate data */ 536 pc_gamg->m_Nlevels = level + 1; 537 fine_level = level; 538 ierr = PCMGSetLevels(a_pc,pc_gamg->m_Nlevels,PETSC_NULL);CHKERRQ(ierr); 539 540 /* set default smoothers */ 541 for ( lidx=1, level = pc_gamg->m_Nlevels-2; 542 lidx <= fine_level; 543 lidx++, level--) { 544 PetscReal emax, emin; 545 KSP smoother; PC subpc; 546 ierr = PCMGGetSmoother( a_pc, lidx, &smoother ); CHKERRQ(ierr); 547 ierr = KSPSetType( smoother, KSPCHEBYCHEV );CHKERRQ(ierr); 548 if( emaxs[level] > 0.0 ) emax=emaxs[level]; 549 else{ /* eigen estimate 'emax' */ 550 KSP eksp; Mat Lmat = Aarr[level]; 551 Vec bb, xx; PC pc; 552 553 ierr = MatGetVecs( Lmat, &bb, 0 ); CHKERRQ(ierr); 554 ierr = MatGetVecs( Lmat, &xx, 0 ); CHKERRQ(ierr); 555 { 556 PetscRandom rctx; 557 ierr = PetscRandomCreate(wcomm,&rctx);CHKERRQ(ierr); 558 ierr = PetscRandomSetFromOptions(rctx);CHKERRQ(ierr); 559 ierr = VecSetRandom(bb,rctx);CHKERRQ(ierr); 560 ierr = PetscRandomDestroy( &rctx ); CHKERRQ(ierr); 561 } 562 ierr = KSPCreate(wcomm,&eksp);CHKERRQ(ierr); 563 ierr = KSPSetType( eksp, KSPCG ); CHKERRQ(ierr); 564 ierr = KSPSetInitialGuessNonzero( eksp, PETSC_FALSE ); CHKERRQ(ierr); 565 ierr = KSPSetOperators( eksp, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); CHKERRQ( ierr ); 566 ierr = KSPGetPC( eksp, &pc );CHKERRQ( ierr ); 567 ierr = PCSetType( pc, PCPBJACOBI ); CHKERRQ(ierr); /* should be same as above */ 568 ierr = KSPSetTolerances( eksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 10 ); 569 CHKERRQ(ierr); 570 /* ierr = KSPSetConvergenceTest( eksp, KSPSkipConverged, 0, 0 ); CHKERRQ(ierr); */ 571 ierr = KSPSetNormType( eksp, KSP_NORM_NONE ); CHKERRQ(ierr); 572 573 ierr = KSPSetComputeSingularValues( eksp,PETSC_TRUE ); CHKERRQ(ierr); 574 ierr = KSPSolve( eksp, bb, xx ); CHKERRQ(ierr); 575 ierr = KSPComputeExtremeSingularValues( eksp, &emax, &emin ); CHKERRQ(ierr); 576 ierr = VecDestroy( &xx ); CHKERRQ(ierr); 577 ierr = VecDestroy( &bb ); CHKERRQ(ierr); 578 ierr = KSPDestroy( &eksp ); CHKERRQ(ierr); 579 PetscPrintf(PETSC_COMM_WORLD,"\t\t\t%s max eigen=%e min=%e PC=%s\n",__FUNCT__,emax,emin,PETSC_GAMG_SMOOTHER); 580 } 581 { 582 PetscInt N1, N0, tt; 583 ierr = MatGetSize( Aarr[level], &N1, &tt ); CHKERRQ(ierr); 584 ierr = MatGetSize( Aarr[level+1], &N0, &tt ); CHKERRQ(ierr); 585 emin = 1.*emax/((PetscReal)N1/(PetscReal)N0); /* this should be about the coarsening rate */ 586 emax *= 1.05; 587 588 } 589 590 ierr = KSPSetOperators( smoother, Aarr[level], Aarr[level], DIFFERENT_NONZERO_PATTERN ); 591 ierr = KSPChebychevSetEigenvalues( smoother, emax, emin );CHKERRQ(ierr); 592 /*ierr = KSPSetTolerances(smoother,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,2); CHKERRQ(ierr);*/ 593 ierr = KSPGetPC( smoother, &subpc ); CHKERRQ(ierr); 594 ierr = PCSetType( subpc, PETSC_GAMG_SMOOTHER ); CHKERRQ(ierr); 595 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 596 } 597 { 598 KSP smoother; /* coarse grid */ 599 Mat Lmat = Aarr[pc_gamg->m_Nlevels-1]; 600 ierr = PCMGGetSmoother( a_pc, 0, &smoother ); CHKERRQ(ierr); 601 ierr = KSPSetOperators( smoother, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); 602 CHKERRQ(ierr); 603 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 604 } 605 606 /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */ 607 ierr = PCSetFromOptions_MG(a_pc); CHKERRQ(ierr); 608 { 609 PetscBool galerkin; 610 ierr = PCMGGetGalerkin( a_pc, &galerkin); CHKERRQ(ierr); 611 if(galerkin){ 612 SETERRQ(wcomm,PETSC_ERR_ARG_WRONG, "GAMG does galerkin manually so it must not be used in PC_MG."); 613 } 614 } 615 616 /* set interpolation between the levels, create timer stages, clean up */ 617 if( PETSC_FALSE ) { 618 char str[32]; 619 sprintf(str,"MG Level %d (%d)",0,pc_gamg->m_Nlevels-1); 620 PetscLogStageRegister(str, &gamg_stages[fine_level]); 621 } 622 for (lidx=0,level=pc_gamg->m_Nlevels-1; 623 lidx<fine_level; 624 lidx++, level--){ 625 ierr = PCMGSetInterpolation( a_pc, lidx+1, Parr[level] );CHKERRQ(ierr); 626 if( !PETSC_TRUE ) { 627 PetscViewer viewer; char fname[32]; 628 sprintf(fname,"Pmat_%d.m",level); 629 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 630 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 631 ierr = MatView( Parr[level], viewer ); CHKERRQ(ierr); 632 ierr = PetscViewerDestroy( &viewer ); 633 sprintf(fname,"Amat_%d.m",level); 634 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 635 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 636 ierr = MatView( Aarr[level], viewer ); CHKERRQ(ierr); 637 ierr = PetscViewerDestroy( &viewer ); 638 } 639 ierr = MatDestroy( &Parr[level] ); CHKERRQ(ierr); 640 ierr = MatDestroy( &Aarr[level] ); CHKERRQ(ierr); 641 if( PETSC_FALSE ) { 642 char str[32]; 643 sprintf(str,"MG Level %d (%d)",lidx+1,level-1); 644 PetscLogStageRegister(str, &gamg_stages[level-1]); 645 } 646 } 647 648 /* setupcalled is set to 0 so that MG is setup from scratch */ 649 a_pc->setupcalled = 0; 650 ierr = PCSetUp_MG(a_pc);CHKERRQ(ierr); 651 652 PetscFunctionReturn(0); 653 } 654 655 /* ------------------------------------------------------------------------- */ 656 /* 657 PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner 658 that was created with PCCreate_GAMG(). 659 660 Input Parameter: 661 . pc - the preconditioner context 662 663 Application Interface Routine: PCDestroy() 664 */ 665 #undef __FUNCT__ 666 #define __FUNCT__ "PCDestroy_GAMG" 667 PetscErrorCode PCDestroy_GAMG(PC pc) 668 { 669 PetscErrorCode ierr; 670 PC_MG *mg = (PC_MG*)pc->data; 671 PC_GAMG *pc_gamg= (PC_GAMG*)mg->innerctx; 672 673 PetscFunctionBegin; 674 ierr = PCReset_GAMG(pc);CHKERRQ(ierr); 675 ierr = PetscFree(pc_gamg);CHKERRQ(ierr); 676 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 677 PetscFunctionReturn(0); 678 } 679 680 #undef __FUNCT__ 681 #define __FUNCT__ "PCSetFromOptions_GAMG" 682 PetscErrorCode PCSetFromOptions_GAMG(PC pc) 683 { 684 /* PetscErrorCode ierr; */ 685 /* PC_MG *mg = (PC_MG*)pc->data; */ 686 /* PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; */ 687 /* MPI_Comm comm = ((PetscObject)pc)->comm; */ 688 689 PetscFunctionBegin; 690 PetscFunctionReturn(0); 691 } 692 693 /* -------------------------------------------------------------------------- */ 694 /* 695 PCCreate_GAMG - Creates a GAMG preconditioner context, PC_GAMG 696 697 Input Parameter: 698 . pc - the preconditioner context 699 700 Application Interface Routine: PCCreate() 701 702 */ 703 /* MC 704 PCGAMG - Use algebraic multigrid preconditioning. This preconditioner requires you provide 705 fine grid discretization matrix and coordinates on the fine grid. 706 707 Options Database Key: 708 Multigrid options(inherited) 709 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 710 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 711 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 712 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 713 GAMG options: 714 715 Level: intermediate 716 Concepts: multigrid 717 718 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 719 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 720 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 721 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 722 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 723 M */ 724 725 EXTERN_C_BEGIN 726 #undef __FUNCT__ 727 #define __FUNCT__ "PCCreate_GAMG" 728 PetscErrorCode PCCreate_GAMG(PC pc) 729 { 730 PetscErrorCode ierr; 731 PC_GAMG *pc_gamg; 732 PC_MG *mg; 733 PetscClassId cookie; 734 735 PetscFunctionBegin; 736 /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */ 737 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 738 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCGAMG);CHKERRQ(ierr); 739 740 /* create a supporting struct and attach it to pc */ 741 ierr = PetscNewLog(pc,PC_GAMG,&pc_gamg);CHKERRQ(ierr); 742 mg = (PC_MG*)pc->data; 743 mg->innerctx = pc_gamg; 744 745 pc_gamg->m_Nlevels = -1; 746 747 /* overwrite the pointers of PCMG by the functions of PCGAMG */ 748 pc->ops->setfromoptions = PCSetFromOptions_GAMG; 749 pc->ops->setup = PCSetUp_GAMG; 750 pc->ops->reset = PCReset_GAMG; 751 pc->ops->destroy = PCDestroy_GAMG; 752 753 ierr = PetscObjectComposeFunctionDynamic( (PetscObject)pc, 754 "PCSetCoordinates_C", 755 "PCSetCoordinates_GAMG", 756 PCSetCoordinates_GAMG);CHKERRQ(ierr); 757 static int count = 0; 758 if( count++ == 0 ) { 759 PetscClassIdRegister("GAMG Setup",&cookie); 760 PetscLogEventRegister("GAMG: createProl", cookie, &gamg_setup_stages[SET1]); 761 PetscLogEventRegister(" make graph", cookie, &gamg_setup_stages[SET3]); 762 PetscLogEventRegister(" MIS/Agg", cookie, &gamg_setup_stages[SET4]); 763 PetscLogEventRegister(" geo: growSupp", cookie, &gamg_setup_stages[SET5]); 764 PetscLogEventRegister(" geo: triangle", cookie, &gamg_setup_stages[SET6]); 765 PetscLogEventRegister(" search & set", cookie, &gamg_setup_stages[FIND_V]); 766 PetscLogEventRegister(" SA: init", cookie, &gamg_setup_stages[SET7]); 767 /* PetscLogEventRegister(" SA: frmProl0", cookie, &gamg_setup_stages[SET8]); */ 768 PetscLogEventRegister(" SA: smooth", cookie, &gamg_setup_stages[SET9]); 769 PetscLogEventRegister("GAMG: partLevel", cookie, &gamg_setup_stages[SET2]); 770 PetscLogEventRegister(" PL repartition", cookie, &gamg_setup_stages[SET12]); 771 /* PetscLogEventRegister(" PL move data", cookie, &gamg_setup_stages[SET13]); */ 772 /* PetscLogEventRegister("GAMG: fix", cookie, &gamg_setup_stages[SET10]); */ 773 /* PetscLogEventRegister("GAMG: set levels", cookie, &gamg_setup_stages[SET11]); */ 774 } 775 776 PetscFunctionReturn(0); 777 } 778 EXTERN_C_END 779