1 /* 2 GAMG geometric-algebric multiogrid PC - Mark Adams 2011 3 */ 4 #include <../src/ksp/pc/impls/gamg/gamg.h> 5 6 #if defined PETSC_USE_LOG 7 PetscLogEvent gamg_setup_events[NUM_SET]; 8 #endif 9 10 #define GAMG_MAXLEVELS 30 11 12 /*#define GAMG_STAGES*/ 13 #if (defined PETSC_USE_LOG && defined GAMG_STAGES) 14 static PetscLogStage gamg_stages[GAMG_MAXLEVELS]; 15 #endif 16 17 /* Private context for the GAMG preconditioner */ 18 typedef struct gamg_TAG { 19 PetscInt m_dim; 20 PetscInt m_Nlevels; 21 PetscInt m_data_sz; 22 PetscInt m_data_rows; 23 PetscInt m_data_cols; 24 PetscBool m_useSA; 25 PetscReal *m_data; /* blocked vector of vertex data on fine grid (coordinates) */ 26 } PC_GAMG; 27 28 /* -------------------------------------------------------------------------- */ 29 /* 30 PCSetCoordinates_GAMG 31 32 Input Parameter: 33 . pc - the preconditioner context 34 */ 35 EXTERN_C_BEGIN 36 #undef __FUNCT__ 37 #define __FUNCT__ "PCSetCoordinates_GAMG" 38 PetscErrorCode PCSetCoordinates_GAMG( PC a_pc, PetscInt a_ndm, PetscReal *a_coords ) 39 { 40 PC_MG *mg = (PC_MG*)a_pc->data; 41 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 42 PetscErrorCode ierr; 43 PetscInt arrsz,bs,my0,kk,ii,jj,nloc,Iend; 44 Mat Amat = a_pc->pmat; 45 PetscBool flag; 46 char str[64]; 47 48 PetscFunctionBegin; 49 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ( ierr ); 50 ierr = MatGetOwnershipRange( Amat, &my0, &Iend ); CHKERRQ(ierr); 51 nloc = (Iend-my0)/bs; 52 if((Iend-my0)%bs!=0) SETERRQ1(((PetscObject)Amat)->comm,PETSC_ERR_ARG_WRONG, "Bad local size %d.",nloc); 53 54 ierr = PetscOptionsGetString(PETSC_NULL,"-pc_gamg_type",str,64,&flag); CHKERRQ( ierr ); 55 pc_gamg->m_useSA = (PetscBool)(flag && strcmp(str,"sa") == 0); 56 57 pc_gamg->m_data_rows = 1; 58 if(a_coords == 0) pc_gamg->m_useSA = PETSC_TRUE; /* use SA if no data */ 59 if( !pc_gamg->m_useSA ) pc_gamg->m_data_cols = a_ndm; /* coordinates */ 60 else{ /* SA: null space vectors */ 61 if(a_coords != 0 && bs==1 ) pc_gamg->m_data_cols = 1; /* scalar w/ coords and SA (not needed) */ 62 else if(a_coords != 0) pc_gamg->m_data_cols = (a_ndm==2 ? 3 : 6); /* elasticity */ 63 else pc_gamg->m_data_cols = bs; /* no data, force SA with constant null space vectors */ 64 pc_gamg->m_data_rows = bs; 65 } 66 arrsz = nloc*pc_gamg->m_data_rows*pc_gamg->m_data_cols; 67 68 /* create data - syntactic sugar that should be refactored at some point */ 69 if (!pc_gamg->m_data || (pc_gamg->m_data_sz != arrsz)) { 70 ierr = PetscFree( pc_gamg->m_data ); CHKERRQ(ierr); 71 ierr = PetscMalloc((arrsz+1)*sizeof(double), &pc_gamg->m_data ); CHKERRQ(ierr); 72 } 73 for(kk=0;kk<arrsz;kk++)pc_gamg->m_data[kk] = -999.; 74 pc_gamg->m_data[arrsz] = -99.; 75 /* copy data in - column oriented */ 76 if( pc_gamg->m_useSA ) { 77 const PetscInt M = Iend - my0; 78 for(kk=0;kk<nloc;kk++){ 79 PetscReal *data = &pc_gamg->m_data[kk*bs]; 80 if( pc_gamg->m_data_cols==1 ) *data = 1.0; 81 else { 82 for(ii=0;ii<bs;ii++) 83 for(jj=0;jj<bs;jj++) 84 if(ii==jj)data[ii*M + jj] = 1.0; /* translational modes */ 85 else data[ii*M + jj] = 0.0; 86 if( a_coords != 0 ) { 87 if( a_ndm == 2 ){ /* rotational modes */ 88 data += 2*M; 89 data[0] = -a_coords[2*kk+1]; 90 data[1] = a_coords[2*kk]; 91 } 92 else { 93 data += 3*M; 94 data[0] = 0.0; data[M+0] = a_coords[3*kk+2]; data[2*M+0] = -a_coords[3*kk+1]; 95 data[1] = -a_coords[3*kk+2]; data[M+1] = 0.0; data[2*M+1] = a_coords[3*kk]; 96 data[2] = a_coords[3*kk+1]; data[M+2] = -a_coords[3*kk]; data[2*M+2] = 0.0; 97 } 98 } 99 } 100 } 101 } 102 else { 103 for( kk = 0 ; kk < nloc ; kk++ ){ 104 for( ii = 0 ; ii < a_ndm ; ii++ ) { 105 pc_gamg->m_data[ii*nloc + kk] = a_coords[kk*a_ndm + ii]; 106 } 107 } 108 } 109 assert(pc_gamg->m_data[arrsz] == -99.); 110 111 pc_gamg->m_data_sz = arrsz; 112 pc_gamg->m_dim = a_ndm; 113 114 PetscFunctionReturn(0); 115 } 116 EXTERN_C_END 117 118 119 /* -----------------------------------------------------------------------------*/ 120 #undef __FUNCT__ 121 #define __FUNCT__ "PCReset_GAMG" 122 PetscErrorCode PCReset_GAMG(PC pc) 123 { 124 PetscErrorCode ierr; 125 PC_MG *mg = (PC_MG*)pc->data; 126 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 127 128 PetscFunctionBegin; 129 ierr = PetscFree(pc_gamg->m_data);CHKERRQ(ierr); 130 pc_gamg->m_data = 0; pc_gamg->m_data_sz = 0; 131 PetscFunctionReturn(0); 132 } 133 134 /* -------------------------------------------------------------------------- */ 135 /* 136 partitionLevel 137 138 Input Parameter: 139 . a_Amat_fine - matrix on this fine (k) level 140 . a_ndata_rows - size of data to move (coarse grid) 141 . a_ndata_cols - size of data to move (coarse grid) 142 In/Output Parameter: 143 . a_P_inout - prolongation operator to the next level (k-1) 144 . a_coarse_data - data that need to be moved 145 . a_nactive_proc - number of active procs 146 Output Parameter: 147 . a_Amat_crs - coarse matrix that is created (k-1) 148 */ 149 150 #define MIN_EQ_PROC 800 151 #define TOP_GRID_LIM 1000 152 153 #undef __FUNCT__ 154 #define __FUNCT__ "partitionLevel" 155 PetscErrorCode partitionLevel( Mat a_Amat_fine, 156 PetscInt a_ndata_rows, 157 PetscInt a_ndata_cols, 158 PetscInt a_cbs, 159 Mat *a_P_inout, 160 PetscReal **a_coarse_data, 161 PetscMPIInt *a_nactive_proc, 162 Mat *a_Amat_crs 163 ) 164 { 165 PetscErrorCode ierr; 166 Mat Cmat,Pnew,Pold=*a_P_inout; 167 IS new_indices,isnum; 168 MPI_Comm wcomm = ((PetscObject)a_Amat_fine)->comm; 169 PetscMPIInt mype,npe,new_npe,nactive; 170 PetscInt neq,NN,Istart,Iend,Istart0,Iend0,ncrs_new,ncrs0; 171 PetscBool flag = PETSC_FALSE; 172 173 PetscFunctionBegin; 174 ierr = MPI_Comm_rank( wcomm, &mype ); CHKERRQ(ierr); 175 ierr = MPI_Comm_size( wcomm, &npe ); CHKERRQ(ierr); 176 /* RAP */ 177 ierr = MatPtAP( a_Amat_fine, Pold, MAT_INITIAL_MATRIX, 2.0, &Cmat ); CHKERRQ(ierr); 178 179 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 180 ierr = MatGetOwnershipRange( Cmat, &Istart0, &Iend0 ); CHKERRQ(ierr); 181 ncrs0 = (Iend0-Istart0)/a_cbs; assert((Iend0-Istart0)%a_cbs == 0); 182 183 ierr = PetscOptionsHasName(PETSC_NULL,"-pc_gamg_avoid_repartitioning",&flag); 184 CHKERRQ( ierr ); 185 if( flag ) { 186 *a_Amat_crs = Cmat; /* output */ 187 } 188 else { 189 /* Repartition Cmat_{k} and move colums of P^{k}_{k-1} and coordinates accordingly */ 190 Mat adj; 191 const PetscInt *idx,data_sz=a_ndata_rows*a_ndata_cols; 192 const PetscInt stride0=ncrs0*a_ndata_rows; 193 PetscInt is_sz,*isnewproc_idx,ii,jj,kk,strideNew,*tidx; 194 /* create sub communicator */ 195 MPI_Comm cm,new_comm; 196 MPI_Group wg, g2; 197 PetscInt *counts,inpe; 198 PetscMPIInt *ranks; 199 IS isscat; 200 PetscScalar *array; 201 Vec src_crd, dest_crd; 202 PetscReal *data = *a_coarse_data; 203 VecScatter vecscat; 204 IS isnewproc; 205 206 /* get number of PEs to make active, reduce */ 207 ierr = MatGetSize( Cmat, &neq, &NN );CHKERRQ(ierr); 208 new_npe = neq/MIN_EQ_PROC; /* hardwire min. number of eq/proc */ 209 if( new_npe == 0 || neq < TOP_GRID_LIM ) new_npe = 1; 210 else if (new_npe >= *a_nactive_proc ) new_npe = *a_nactive_proc; /* no change, rare */ 211 212 ierr = PetscMalloc( npe*sizeof(PetscMPIInt), &ranks ); CHKERRQ(ierr); 213 ierr = PetscMalloc( npe*sizeof(PetscInt), &counts ); CHKERRQ(ierr); 214 215 ierr = MPI_Allgather( &ncrs0, 1, MPIU_INT, counts, 1, MPIU_INT, wcomm ); CHKERRQ(ierr); 216 assert(counts[mype]==ncrs0); 217 /* count real active pes */ 218 for( nactive = jj = 0 ; jj < npe ; jj++) { 219 if( counts[jj] != 0 ) { 220 ranks[nactive++] = jj; 221 } 222 } 223 assert(nactive>=new_npe); 224 225 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s npe (active): %d --> %d. new npe = %d, neq = %d\n",mype,__FUNCT__,*a_nactive_proc,nactive,new_npe,neq); 226 *a_nactive_proc = new_npe; /* output */ 227 228 ierr = MPI_Comm_group( wcomm, &wg ); CHKERRQ(ierr); 229 ierr = MPI_Group_incl( wg, nactive, ranks, &g2 ); CHKERRQ(ierr); 230 ierr = MPI_Comm_create( wcomm, g2, &cm ); CHKERRQ(ierr); 231 232 if( cm != MPI_COMM_NULL ) { 233 assert(ncrs0 != 0); 234 ierr = PetscCommDuplicate( cm, &new_comm, PETSC_NULL ); CHKERRQ(ierr); 235 ierr = MPI_Comm_free( &cm ); CHKERRQ(ierr); 236 } 237 else assert(ncrs0 == 0); 238 239 ierr = MPI_Group_free( &wg ); CHKERRQ(ierr); 240 ierr = MPI_Group_free( &g2 ); CHKERRQ(ierr); 241 242 /* MatPartitioningApply call MatConvert, which is collective */ 243 #if defined PETSC_USE_LOG 244 ierr = PetscLogEventBegin(gamg_setup_events[SET12],0,0,0,0);CHKERRQ(ierr); 245 #endif 246 if( a_cbs == 1) { 247 ierr = MatConvert( Cmat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 248 } 249 else{ 250 /* make a scalar matrix to partition */ 251 Mat tMat; 252 PetscInt ncols; 253 const PetscScalar *vals; 254 const PetscInt *idx; 255 MatInfo info; 256 257 ierr = MatGetInfo(Cmat,MAT_LOCAL,&info); CHKERRQ(ierr); 258 ncols = (PetscInt)info.nz_used/((ncrs0+1)*a_cbs*a_cbs)+1; 259 260 ierr = MatCreateMPIAIJ( wcomm, ncrs0, ncrs0, 261 PETSC_DETERMINE, PETSC_DETERMINE, 262 2*ncols, PETSC_NULL, ncols, PETSC_NULL, 263 &tMat ); 264 CHKERRQ(ierr); 265 266 for ( ii = Istart0; ii < Iend0; ii++ ) { 267 PetscInt dest_row = ii/a_cbs; 268 ierr = MatGetRow(Cmat,ii,&ncols,&idx,&vals); CHKERRQ(ierr); 269 for( jj = 0 ; jj < ncols ; jj++ ){ 270 PetscInt dest_col = idx[jj]/a_cbs; 271 PetscScalar v = 1.0; 272 ierr = MatSetValues(tMat,1,&dest_row,1,&dest_col,&v,ADD_VALUES); CHKERRQ(ierr); 273 } 274 ierr = MatRestoreRow(Cmat,ii,&ncols,&idx,&vals); CHKERRQ(ierr); 275 } 276 ierr = MatAssemblyBegin(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 277 ierr = MatAssemblyEnd(tMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 278 279 static int llev = 0; 280 if( llev++ == -1 ) { 281 PetscViewer viewer; char fname[32]; 282 sprintf(fname,"part_mat_%d.mat",llev); 283 PetscViewerBinaryOpen(wcomm,fname,FILE_MODE_WRITE,&viewer); 284 ierr = MatView( tMat, viewer ); CHKERRQ(ierr); 285 ierr = PetscViewerDestroy( &viewer ); 286 } 287 288 ierr = MatConvert( tMat, MATMPIADJ, MAT_INITIAL_MATRIX, &adj ); CHKERRQ(ierr); 289 290 ierr = MatDestroy( &tMat ); CHKERRQ(ierr); 291 } 292 if( ncrs0 != 0 ){ 293 const PetscInt *is_idx; 294 MatPartitioning mpart; 295 /* hack to fix global data that pmetis.c uses in 'adj' */ 296 for( nactive = jj = 0 ; jj < npe ; jj++ ) { 297 if( counts[jj] != 0 ) { 298 adj->rmap->range[nactive++] = adj->rmap->range[jj]; 299 } 300 } 301 adj->rmap->range[nactive] = adj->rmap->range[npe]; 302 303 ierr = MatPartitioningCreate( new_comm, &mpart ); CHKERRQ(ierr); 304 ierr = MatPartitioningSetAdjacency( mpart, adj ); CHKERRQ(ierr); 305 ierr = MatPartitioningSetFromOptions( mpart ); CHKERRQ(ierr); 306 ierr = MatPartitioningSetNParts( mpart, new_npe );CHKERRQ(ierr); 307 ierr = MatPartitioningApply( mpart, &isnewproc ); CHKERRQ(ierr); 308 ierr = MatPartitioningDestroy( &mpart ); CHKERRQ(ierr); 309 310 /* collect IS info */ 311 ierr = ISGetLocalSize( isnewproc, &is_sz ); CHKERRQ(ierr); 312 ierr = PetscMalloc( a_cbs*is_sz*sizeof(PetscInt), &isnewproc_idx ); CHKERRQ(ierr); 313 ierr = ISGetIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 314 /* spread partitioning across machine - best way ??? */ 315 NN = 1; /*npe/new_npe;*/ 316 for( kk = jj = 0 ; kk < is_sz ; kk++ ){ 317 for( ii = 0 ; ii < a_cbs ; ii++, jj++ ) { 318 isnewproc_idx[jj] = is_idx[kk] * NN; /* distribution */ 319 } 320 } 321 ierr = ISRestoreIndices( isnewproc, &is_idx ); CHKERRQ(ierr); 322 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 323 ierr = PetscCommDestroy( &new_comm ); CHKERRQ(ierr); 324 325 is_sz *= a_cbs; 326 } 327 else{ 328 isnewproc_idx = 0; 329 is_sz = 0; 330 } 331 ierr = MatDestroy( &adj ); CHKERRQ(ierr); 332 ierr = ISCreateGeneral( wcomm, is_sz, isnewproc_idx, PETSC_COPY_VALUES, &isnewproc ); 333 if( isnewproc_idx != 0 ) { 334 ierr = PetscFree( isnewproc_idx ); CHKERRQ(ierr); 335 } 336 337 /* 338 Create an index set from the isnewproc index set to indicate the mapping TO 339 */ 340 ierr = ISPartitioningToNumbering( isnewproc, &isnum ); CHKERRQ(ierr); 341 /* 342 Determine how many elements are assigned to each processor 343 */ 344 inpe = npe; 345 ierr = ISPartitioningCount( isnewproc, inpe, counts ); CHKERRQ(ierr); 346 ierr = ISDestroy( &isnewproc ); CHKERRQ(ierr); 347 ncrs_new = counts[mype]/a_cbs; 348 strideNew = ncrs_new*a_ndata_rows; 349 #if defined PETSC_USE_LOG 350 ierr = PetscLogEventEnd(gamg_setup_events[SET12],0,0,0,0); CHKERRQ(ierr); 351 #endif 352 /* Create a vector to contain the newly ordered element information */ 353 ierr = VecCreate( wcomm, &dest_crd ); 354 ierr = VecSetSizes( dest_crd, data_sz*ncrs_new, PETSC_DECIDE ); CHKERRQ(ierr); 355 ierr = VecSetFromOptions( dest_crd ); CHKERRQ(ierr); /*funny vector-get global options?*/ 356 /* 357 There are 'a_ndata_rows*a_ndata_cols' data items per node, (one can think of the vectors of having 358 a block size of ...). Note, ISs are expanded into equation space by 'a_cbs'. 359 */ 360 ierr = PetscMalloc( (ncrs0*data_sz)*sizeof(PetscInt), &tidx ); CHKERRQ(ierr); 361 ierr = ISGetIndices( isnum, &idx ); CHKERRQ(ierr); 362 for(ii=0,jj=0; ii<ncrs0 ; ii++) { 363 PetscInt id = idx[ii*a_cbs]/a_cbs; /* get node back */ 364 for( kk=0; kk<data_sz ; kk++, jj++) tidx[jj] = id*data_sz + kk; 365 } 366 ierr = ISRestoreIndices( isnum, &idx ); CHKERRQ(ierr); 367 ierr = ISCreateGeneral( wcomm, data_sz*ncrs0, tidx, PETSC_COPY_VALUES, &isscat ); 368 CHKERRQ(ierr); 369 ierr = PetscFree( tidx ); CHKERRQ(ierr); 370 /* 371 Create a vector to contain the original vertex information for each element 372 */ 373 ierr = VecCreateSeq( PETSC_COMM_SELF, data_sz*ncrs0, &src_crd ); CHKERRQ(ierr); 374 for( jj=0; jj<a_ndata_cols ; jj++ ) { 375 for( ii=0 ; ii<ncrs0 ; ii++) { 376 for( kk=0; kk<a_ndata_rows ; kk++ ) { 377 PetscInt ix = ii*a_ndata_rows + kk + jj*stride0, jx = ii*data_sz + kk*a_ndata_cols + jj; 378 ierr = VecSetValues( src_crd, 1, &jx, &data[ix], INSERT_VALUES ); CHKERRQ(ierr); 379 } 380 } 381 } 382 ierr = VecAssemblyBegin(src_crd); CHKERRQ(ierr); 383 ierr = VecAssemblyEnd(src_crd); CHKERRQ(ierr); 384 /* 385 Scatter the element vertex information (still in the original vertex ordering) 386 to the correct processor 387 */ 388 ierr = VecScatterCreate( src_crd, PETSC_NULL, dest_crd, isscat, &vecscat); 389 CHKERRQ(ierr); 390 ierr = ISDestroy( &isscat ); CHKERRQ(ierr); 391 ierr = VecScatterBegin(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 392 ierr = VecScatterEnd(vecscat,src_crd,dest_crd,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 393 ierr = VecScatterDestroy( &vecscat ); CHKERRQ(ierr); 394 ierr = VecDestroy( &src_crd ); CHKERRQ(ierr); 395 /* 396 Put the element vertex data into a new allocation of the gdata->ele 397 */ 398 ierr = PetscFree( *a_coarse_data ); CHKERRQ(ierr); 399 ierr = PetscMalloc( data_sz*ncrs_new*sizeof(PetscReal), a_coarse_data ); CHKERRQ(ierr); 400 401 ierr = VecGetArray( dest_crd, &array ); CHKERRQ(ierr); 402 data = *a_coarse_data; 403 for( jj=0; jj<a_ndata_cols ; jj++ ) { 404 for( ii=0 ; ii<ncrs_new ; ii++) { 405 for( kk=0; kk<a_ndata_rows ; kk++ ) { 406 PetscInt ix = ii*a_ndata_rows + kk + jj*strideNew, jx = ii*data_sz + kk*a_ndata_cols + jj; 407 data[ix] = PetscRealPart(array[jx]); 408 array[jx] = 1.e300; 409 } 410 } 411 } 412 ierr = VecRestoreArray( dest_crd, &array ); CHKERRQ(ierr); 413 ierr = VecDestroy( &dest_crd ); CHKERRQ(ierr); 414 /* 415 Invert for MatGetSubMatrix 416 */ 417 ierr = ISInvertPermutation( isnum, ncrs_new*a_cbs, &new_indices ); CHKERRQ(ierr); 418 ierr = ISSort( new_indices ); CHKERRQ(ierr); /* is this needed? */ 419 ierr = ISDestroy( &isnum ); CHKERRQ(ierr); 420 /* A_crs output */ 421 ierr = MatGetSubMatrix( Cmat, new_indices, new_indices, MAT_INITIAL_MATRIX, a_Amat_crs ); 422 CHKERRQ(ierr); 423 424 ierr = MatDestroy( &Cmat ); CHKERRQ(ierr); 425 Cmat = *a_Amat_crs; /* output */ 426 ierr = MatSetBlockSize( Cmat, a_cbs ); CHKERRQ(ierr); 427 428 /* prolongator */ 429 ierr = MatGetOwnershipRange( Pold, &Istart, &Iend ); CHKERRQ(ierr); 430 { 431 IS findices; 432 ierr = ISCreateStride(wcomm,Iend-Istart,Istart,1,&findices); CHKERRQ(ierr); 433 434 ierr = MatGetSubMatrix( Pold, findices, new_indices, MAT_INITIAL_MATRIX, &Pnew ); 435 CHKERRQ(ierr); 436 437 ierr = ISDestroy( &findices ); CHKERRQ(ierr); 438 } 439 440 ierr = MatDestroy( a_P_inout ); CHKERRQ(ierr); 441 *a_P_inout = Pnew; /* output */ 442 443 ierr = ISDestroy( &new_indices ); CHKERRQ(ierr); 444 ierr = PetscFree( counts ); CHKERRQ(ierr); 445 ierr = PetscFree( ranks ); CHKERRQ(ierr); 446 } 447 448 PetscFunctionReturn(0); 449 } 450 451 /* -------------------------------------------------------------------------- */ 452 /* 453 PCSetUp_GAMG - Prepares for the use of the GAMG preconditioner 454 by setting data structures and options. 455 456 Input Parameter: 457 . pc - the preconditioner context 458 459 Application Interface Routine: PCSetUp() 460 461 Notes: 462 The interface routine PCSetUp() is not usually called directly by 463 the user, but instead is called by PCApply() if necessary. 464 */ 465 #undef __FUNCT__ 466 #define __FUNCT__ "PCSetUp_GAMG" 467 PetscErrorCode PCSetUp_GAMG( PC a_pc ) 468 { 469 PetscErrorCode ierr; 470 PC_MG *mg = (PC_MG*)a_pc->data; 471 PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; 472 Mat Amat = a_pc->mat, Pmat = a_pc->pmat; 473 PetscInt fine_level, level, level1, M, N, bs, nloc, lidx, Istart, Iend; 474 MPI_Comm wcomm = ((PetscObject)a_pc)->comm; 475 PetscMPIInt mype,npe,nactivepe; 476 PetscBool isOK; 477 Mat Aarr[GAMG_MAXLEVELS], Parr[GAMG_MAXLEVELS]; 478 PetscReal *coarse_data = 0, *data, emaxs[GAMG_MAXLEVELS]; 479 MatInfo info; 480 481 PetscFunctionBegin; 482 if( a_pc->setupcalled ) { 483 /* no state data in GAMG to destroy */ 484 ierr = PCReset_MG( a_pc ); CHKERRQ(ierr); 485 } 486 ierr = MPI_Comm_rank(wcomm,&mype);CHKERRQ(ierr); 487 ierr = MPI_Comm_size(wcomm,&npe);CHKERRQ(ierr); 488 /* GAMG requires input of fine-grid matrix. It determines nlevels. */ 489 ierr = MatGetBlockSize( Amat, &bs ); CHKERRQ(ierr); 490 ierr = MatGetSize( Amat, &M, &N );CHKERRQ(ierr); 491 ierr = MatGetOwnershipRange( Amat, &Istart, &Iend ); CHKERRQ(ierr); 492 nloc = (Iend-Istart)/bs; assert((Iend-Istart)%bs == 0); 493 494 /* get data of not around */ 495 if( pc_gamg->m_data == 0 && nloc > 0 ) { 496 ierr = PCSetCoordinates_GAMG( a_pc, -1, 0 ); CHKERRQ( ierr ); 497 } 498 data = pc_gamg->m_data; 499 500 /* Get A_i and R_i */ 501 ierr = MatGetInfo(Amat,MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 502 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s level %d N=%ld, n data rows=%d, n data cols=%d, nnz/row (ave)=%d, np=%d\n", 503 mype,__FUNCT__,0,(long long int)N,(int)pc_gamg->m_data_rows,(int)pc_gamg->m_data_cols, 504 (int)(info.nz_used/(PetscReal)N),(int)npe); 505 for ( level=0, Aarr[0] = Pmat, nactivepe = npe; /* hard wired stopping logic */ 506 level < GAMG_MAXLEVELS-1 && (level==0 || M>TOP_GRID_LIM) && (npe==1 || nactivepe>1); 507 level++ ){ 508 level1 = level + 1; 509 #if (defined PETSC_USE_LOG && defined GAMG_STAGES) 510 ierr = PetscLogStagePush(gamg_stages[level]); CHKERRQ( ierr ); 511 #endif 512 #if defined PETSC_USE_LOG 513 ierr = PetscLogEventBegin(gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr); 514 #endif 515 ierr = createProlongation(Aarr[level], data, pc_gamg->m_dim, pc_gamg->m_data_cols, pc_gamg->m_useSA, 516 level, &bs, &Parr[level1], &coarse_data, &isOK, &emaxs[level] ); 517 CHKERRQ(ierr); 518 ierr = PetscFree( data ); CHKERRQ( ierr ); 519 #if defined PETSC_USE_LOG 520 ierr = PetscLogEventEnd(gamg_setup_events[SET1],0,0,0,0);CHKERRQ(ierr); 521 #endif 522 if(level==0) Aarr[0] = Amat; /* use Pmat for finest level setup, but use mat for solver */ 523 524 if( isOK ) { 525 #if defined PETSC_USE_LOG 526 ierr = PetscLogEventBegin(gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr); 527 #endif 528 ierr = partitionLevel( Aarr[level], pc_gamg->m_useSA ? bs : 1, pc_gamg->m_data_cols, bs, 529 &Parr[level1], &coarse_data, &nactivepe, &Aarr[level1] ); 530 CHKERRQ(ierr); 531 #if defined PETSC_USE_LOG 532 ierr = PetscLogEventEnd(gamg_setup_events[SET2],0,0,0,0);CHKERRQ(ierr); 533 #endif 534 ierr = MatGetSize( Aarr[level1], &M, &N );CHKERRQ(ierr); 535 ierr = MatGetInfo(Aarr[level1],MAT_GLOBAL_SUM,&info); CHKERRQ(ierr); 536 PetscPrintf(PETSC_COMM_WORLD,"\t\t[%d]%s %d) N=%ld, bs=%d, n data cols=%d, nnz/row (ave)=%d, %d active pes\n", 537 mype,__FUNCT__,(int)level1,(long long int)N,(int)bs,(int)pc_gamg->m_data_cols, 538 (int)(info.nz_used/(PetscReal)N),(int)nactivepe); 539 /* coarse grids with SA can have zero row/cols from singleton aggregates */ 540 /* aggregation method can probably gaurrentee this does not happen! - be safe for now */ 541 542 if( PETSC_TRUE ){ 543 Vec diag; PetscScalar *data_arr,v; PetscInt Istart,Iend,kk,nloceq,id; 544 v = 1.e-10; /* LU factor has hard wired numbers for small diags so this needs to match (yuk) */ 545 ierr = MatGetOwnershipRange(Aarr[level1], &Istart, &Iend); CHKERRQ(ierr); 546 nloceq = Iend-Istart; 547 ierr = MatGetVecs( Aarr[level1], &diag, 0 ); CHKERRQ(ierr); 548 ierr = MatGetDiagonal( Aarr[level1], diag ); CHKERRQ(ierr); 549 ierr = VecGetArray( diag, &data_arr ); CHKERRQ(ierr); 550 for(kk=0;kk<nloceq;kk++){ 551 if(data_arr[kk]==0.0) { 552 id = kk + Istart; 553 ierr = MatSetValues(Aarr[level1],1,&id,1,&id,&v,INSERT_VALUES); 554 CHKERRQ(ierr); 555 PetscPrintf(PETSC_COMM_SELF,"\t[%d]%s warning: added zero to diag (%d) on level %d \n",mype,__FUNCT__,id,level1); 556 } 557 } 558 ierr = VecRestoreArray( diag, &data_arr ); CHKERRQ(ierr); 559 ierr = VecDestroy( &diag ); CHKERRQ(ierr); 560 ierr = MatAssemblyBegin(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 561 ierr = MatAssemblyEnd(Aarr[level1],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 562 } 563 } 564 else{ 565 coarse_data = 0; 566 break; 567 } 568 data = coarse_data; 569 570 #if (defined PETSC_USE_LOG && defined GAMG_STAGES) 571 ierr = PetscLogStagePop(); CHKERRQ( ierr ); 572 #endif 573 } 574 if( coarse_data ) { 575 ierr = PetscFree( coarse_data ); CHKERRQ( ierr ); 576 } 577 PetscPrintf(PETSC_COMM_WORLD,"\t[%d]%s %d levels\n",0,__FUNCT__,level + 1); 578 pc_gamg->m_data = 0; /* destroyed coordinate data */ 579 pc_gamg->m_Nlevels = level + 1; 580 fine_level = level; 581 ierr = PCMGSetLevels(a_pc,pc_gamg->m_Nlevels,PETSC_NULL);CHKERRQ(ierr); 582 583 /* set default smoothers */ 584 for ( lidx=1, level = pc_gamg->m_Nlevels-2; 585 lidx <= fine_level; 586 lidx++, level--) { 587 PetscReal emax, emin; 588 KSP smoother; PC subpc; 589 ierr = PCMGGetSmoother( a_pc, lidx, &smoother ); CHKERRQ(ierr); 590 ierr = KSPSetType( smoother, KSPCHEBYCHEV );CHKERRQ(ierr); 591 if( emaxs[level] > 0.0 ) emax=emaxs[level]; 592 else{ /* eigen estimate 'emax' */ 593 KSP eksp; Mat Lmat = Aarr[level]; 594 Vec bb, xx; PC pc; 595 596 ierr = MatGetVecs( Lmat, &bb, 0 ); CHKERRQ(ierr); 597 ierr = MatGetVecs( Lmat, &xx, 0 ); CHKERRQ(ierr); 598 { 599 PetscRandom rctx; 600 ierr = PetscRandomCreate(wcomm,&rctx);CHKERRQ(ierr); 601 ierr = PetscRandomSetFromOptions(rctx);CHKERRQ(ierr); 602 ierr = VecSetRandom(bb,rctx);CHKERRQ(ierr); 603 ierr = PetscRandomDestroy( &rctx ); CHKERRQ(ierr); 604 } 605 ierr = KSPCreate(wcomm,&eksp);CHKERRQ(ierr); 606 ierr = KSPSetType( eksp, KSPCG ); CHKERRQ(ierr); 607 ierr = KSPSetInitialGuessNonzero( eksp, PETSC_FALSE ); CHKERRQ(ierr); 608 ierr = KSPSetOperators( eksp, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); CHKERRQ( ierr ); 609 ierr = KSPGetPC( eksp, &pc );CHKERRQ( ierr ); 610 ierr = PCSetType( pc, PCPBJACOBI ); CHKERRQ(ierr); /* should be same as above */ 611 ierr = KSPSetTolerances( eksp, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT, 10 ); 612 CHKERRQ(ierr); 613 //ierr = KSPSetConvergenceTest( eksp, KSPSkipConverged, 0, 0 ); CHKERRQ(ierr); 614 ierr = KSPSetNormType( eksp, KSP_NORM_NONE ); CHKERRQ(ierr); 615 616 ierr = KSPSetComputeSingularValues( eksp,PETSC_TRUE ); CHKERRQ(ierr); 617 ierr = KSPSolve( eksp, bb, xx ); CHKERRQ(ierr); 618 ierr = KSPComputeExtremeSingularValues( eksp, &emax, &emin ); CHKERRQ(ierr); 619 ierr = VecDestroy( &xx ); CHKERRQ(ierr); 620 ierr = VecDestroy( &bb ); CHKERRQ(ierr); 621 ierr = KSPDestroy( &eksp ); CHKERRQ(ierr); 622 PetscPrintf(PETSC_COMM_WORLD,"\t\t\t%s max eigen=%e min=%e PC=%s\n",__FUNCT__,emax,emin,PETSC_GAMG_SMOOTHER); 623 } 624 { 625 PetscInt N1, N0, tt; 626 ierr = MatGetSize( Aarr[level], &N1, &tt ); CHKERRQ(ierr); 627 ierr = MatGetSize( Aarr[level+1], &N0, &tt ); CHKERRQ(ierr); 628 emin = 1.*emax/((PetscReal)N1/(PetscReal)N0); /* this should be about the coarsening rate */ 629 emax *= 1.05; 630 631 } 632 633 ierr = KSPSetOperators( smoother, Aarr[level], Aarr[level], DIFFERENT_NONZERO_PATTERN ); 634 ierr = KSPChebychevSetEigenvalues( smoother, emax, emin );CHKERRQ(ierr); 635 /* ierr = KSPSetTolerances(smoother,PETSC_DEFAULT,PETSC_DEFAULT,PETSC_DEFAULT,2); CHKERRQ(ierr); */ 636 ierr = KSPGetPC( smoother, &subpc ); CHKERRQ(ierr); 637 ierr = PCSetType( subpc, PETSC_GAMG_SMOOTHER ); CHKERRQ(ierr); 638 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 639 } 640 { 641 /* coarse grid */ 642 KSP smoother,*k2; PC subpc,pc2; PetscInt ii,first; 643 Mat Lmat = Aarr[pc_gamg->m_Nlevels-1]; 644 ierr = PCMGGetSmoother( a_pc, 0, &smoother ); CHKERRQ(ierr); 645 ierr = KSPSetOperators( smoother, Lmat, Lmat, DIFFERENT_NONZERO_PATTERN ); 646 CHKERRQ(ierr); 647 ierr = KSPSetNormType( smoother, KSP_NORM_NONE ); CHKERRQ(ierr); 648 ierr = KSPGetPC( smoother, &subpc ); CHKERRQ(ierr); 649 ierr = PCSetType( subpc, PCBJACOBI ); CHKERRQ(ierr); 650 ierr = PCSetUp( subpc ); CHKERRQ(ierr); 651 ierr = PCBJacobiGetSubKSP(subpc,&ii,&first,&k2);CHKERRQ(ierr); 652 assert(ii==1); 653 ierr = KSPGetPC(k2[0],&pc2);CHKERRQ(ierr); 654 ierr = PCSetType( pc2, PCLU ); CHKERRQ(ierr); 655 } 656 657 /* should be called in PCSetFromOptions_GAMG(), but cannot be called prior to PCMGSetLevels() */ 658 ierr = PCSetFromOptions_MG(a_pc); CHKERRQ(ierr); 659 { 660 PetscBool galerkin; 661 ierr = PCMGGetGalerkin( a_pc, &galerkin); CHKERRQ(ierr); 662 if(galerkin){ 663 SETERRQ(wcomm,PETSC_ERR_ARG_WRONG, "GAMG does galerkin manually so it must not be used in PC_MG."); 664 } 665 } 666 667 /* set interpolation between the levels, clean up */ 668 for (lidx=0,level=pc_gamg->m_Nlevels-1; 669 lidx<fine_level; 670 lidx++, level--){ 671 ierr = PCMGSetInterpolation( a_pc, lidx+1, Parr[level] );CHKERRQ(ierr); 672 if( !PETSC_TRUE ) { 673 PetscViewer viewer; char fname[32]; 674 sprintf(fname,"Pmat_%d.m",(int)level); 675 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 676 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 677 ierr = MatView( Parr[level], viewer ); CHKERRQ(ierr); 678 ierr = PetscViewerDestroy( &viewer ); 679 sprintf(fname,"Amat_%d.m",(int)level); 680 ierr = PetscViewerASCIIOpen( wcomm, fname, &viewer ); CHKERRQ(ierr); 681 ierr = PetscViewerSetFormat( viewer, PETSC_VIEWER_ASCII_MATLAB); CHKERRQ(ierr); 682 ierr = MatView( Aarr[level], viewer ); CHKERRQ(ierr); 683 ierr = PetscViewerDestroy( &viewer ); 684 } 685 ierr = MatDestroy( &Parr[level] ); CHKERRQ(ierr); 686 ierr = MatDestroy( &Aarr[level] ); CHKERRQ(ierr); 687 } 688 689 /* setupcalled is set to 0 so that MG is setup from scratch */ 690 a_pc->setupcalled = 0; 691 ierr = PCSetUp_MG(a_pc);CHKERRQ(ierr); 692 693 PetscFunctionReturn(0); 694 } 695 696 /* ------------------------------------------------------------------------- */ 697 /* 698 PCDestroy_GAMG - Destroys the private context for the GAMG preconditioner 699 that was created with PCCreate_GAMG(). 700 701 Input Parameter: 702 . pc - the preconditioner context 703 704 Application Interface Routine: PCDestroy() 705 */ 706 #undef __FUNCT__ 707 #define __FUNCT__ "PCDestroy_GAMG" 708 PetscErrorCode PCDestroy_GAMG(PC pc) 709 { 710 PetscErrorCode ierr; 711 PC_MG *mg = (PC_MG*)pc->data; 712 PC_GAMG *pc_gamg= (PC_GAMG*)mg->innerctx; 713 714 PetscFunctionBegin; 715 ierr = PCReset_GAMG(pc);CHKERRQ(ierr); 716 ierr = PetscFree(pc_gamg);CHKERRQ(ierr); 717 ierr = PCDestroy_MG(pc);CHKERRQ(ierr); 718 PetscFunctionReturn(0); 719 } 720 721 #undef __FUNCT__ 722 #define __FUNCT__ "PCSetFromOptions_GAMG" 723 PetscErrorCode PCSetFromOptions_GAMG(PC pc) 724 { 725 /* PetscErrorCode ierr; */ 726 /* PC_MG *mg = (PC_MG*)pc->data; */ 727 /* PC_GAMG *pc_gamg = (PC_GAMG*)mg->innerctx; */ 728 /* MPI_Comm comm = ((PetscObject)pc)->comm; */ 729 730 PetscFunctionBegin; 731 PetscFunctionReturn(0); 732 } 733 734 /* -------------------------------------------------------------------------- */ 735 /* 736 PCCreate_GAMG - Creates a GAMG preconditioner context, PC_GAMG 737 738 Input Parameter: 739 . pc - the preconditioner context 740 741 Application Interface Routine: PCCreate() 742 743 */ 744 /* MC 745 PCGAMG - Use algebraic multigrid preconditioning. This preconditioner requires you provide 746 fine grid discretization matrix and coordinates on the fine grid. 747 748 Options Database Key: 749 Multigrid options(inherited) 750 + -pc_mg_cycles <1>: 1 for V cycle, 2 for W-cycle (MGSetCycles) 751 . -pc_mg_smoothup <1>: Number of post-smoothing steps (MGSetNumberSmoothUp) 752 . -pc_mg_smoothdown <1>: Number of pre-smoothing steps (MGSetNumberSmoothDown) 753 -pc_mg_type <multiplicative>: (one of) additive multiplicative full cascade kascade 754 GAMG options: 755 756 Level: intermediate 757 Concepts: multigrid 758 759 .seealso: PCCreate(), PCSetType(), PCType (for list of available types), PC, PCMGType, 760 PCMGSetLevels(), PCMGGetLevels(), PCMGSetType(), MPSetCycles(), PCMGSetNumberSmoothDown(), 761 PCMGSetNumberSmoothUp(), PCMGGetCoarseSolve(), PCMGSetResidual(), PCMGSetInterpolation(), 762 PCMGSetRestriction(), PCMGGetSmoother(), PCMGGetSmootherUp(), PCMGGetSmootherDown(), 763 PCMGSetCyclesOnLevel(), PCMGSetRhs(), PCMGSetX(), PCMGSetR() 764 M */ 765 766 EXTERN_C_BEGIN 767 #undef __FUNCT__ 768 #define __FUNCT__ "PCCreate_GAMG" 769 PetscErrorCode PCCreate_GAMG(PC pc) 770 { 771 PetscErrorCode ierr; 772 PC_GAMG *pc_gamg; 773 PC_MG *mg; 774 PetscClassId cookie; 775 776 PetscFunctionBegin; 777 /* PCGAMG is an inherited class of PCMG. Initialize pc as PCMG */ 778 ierr = PCSetType(pc,PCMG);CHKERRQ(ierr); /* calls PCCreate_MG() and MGCreate_Private() */ 779 ierr = PetscObjectChangeTypeName((PetscObject)pc,PCGAMG);CHKERRQ(ierr); 780 781 /* create a supporting struct and attach it to pc */ 782 ierr = PetscNewLog(pc,PC_GAMG,&pc_gamg);CHKERRQ(ierr); 783 mg = (PC_MG*)pc->data; 784 mg->innerctx = pc_gamg; 785 786 pc_gamg->m_Nlevels = -1; 787 788 /* overwrite the pointers of PCMG by the functions of PCGAMG */ 789 pc->ops->setfromoptions = PCSetFromOptions_GAMG; 790 pc->ops->setup = PCSetUp_GAMG; 791 pc->ops->reset = PCReset_GAMG; 792 pc->ops->destroy = PCDestroy_GAMG; 793 794 ierr = PetscObjectComposeFunctionDynamic( (PetscObject)pc, 795 "PCSetCoordinates_C", 796 "PCSetCoordinates_GAMG", 797 PCSetCoordinates_GAMG);CHKERRQ(ierr); 798 #if defined PETSC_USE_LOG 799 static int count = 0; 800 if( count++ == 0 ) { 801 PetscClassIdRegister("GAMG Setup",&cookie); 802 PetscLogEventRegister("GAMG: createProl", cookie, &gamg_setup_events[SET1]); 803 PetscLogEventRegister(" make graph", cookie, &gamg_setup_events[SET3]); 804 PetscLogEventRegister(" MIS/Agg", cookie, &gamg_setup_events[SET4]); 805 PetscLogEventRegister(" geo: growSupp", cookie, &gamg_setup_events[SET5]); 806 PetscLogEventRegister(" geo: triangle", cookie, &gamg_setup_events[SET6]); 807 PetscLogEventRegister(" search & set", cookie, &gamg_setup_events[FIND_V]); 808 PetscLogEventRegister(" SA: init", cookie, &gamg_setup_events[SET7]); 809 /* PetscLogEventRegister(" SA: frmProl0", cookie, &gamg_setup_events[SET8]); */ 810 PetscLogEventRegister(" SA: smooth", cookie, &gamg_setup_events[SET9]); 811 PetscLogEventRegister("GAMG: partLevel", cookie, &gamg_setup_events[SET2]); 812 PetscLogEventRegister(" PL repartition", cookie, &gamg_setup_events[SET12]); 813 /* PetscLogEventRegister(" PL move data", cookie, &gamg_setup_events[SET13]); */ 814 /* PetscLogEventRegister("GAMG: fix", cookie, &gamg_setup_events[SET10]); */ 815 /* PetscLogEventRegister("GAMG: set levels", cookie, &gamg_setup_events[SET11]); */ 816 817 /* create timer stages */ 818 #if defined GAMG_STAGES 819 { 820 char str[32]; 821 sprintf(str,"MG Level %d (finest)",0); 822 PetscLogStageRegister(str, &gamg_stages[0]); 823 PetscInt lidx; 824 for (lidx=1;lidx<9;lidx++){ 825 sprintf(str,"MG Level %d",lidx); 826 PetscLogStageRegister(str, &gamg_stages[lidx]); 827 } 828 } 829 #endif 830 } 831 #endif 832 PetscFunctionReturn(0); 833 } 834 EXTERN_C_END 835