1 #include <../src/mat/impls/aij/mpi/mpiaij.h> /*I "petscmat.h" I*/ 2 #include <../src/mat/impls/sell/mpi/mpisell.h> /*I "petscmat.h" I*/ 3 #include <petsc/private/vecimpl.h> 4 #include <petsc/private/isimpl.h> 5 #include <petscblaslapack.h> 6 #include <petscsf.h> 7 8 /*MC 9 MATSELL - MATSELL = "sell" - A matrix type to be used for sparse matrices. 10 11 This matrix type is identical to MATSEQSELL when constructed with a single process communicator, 12 and MATMPISELL otherwise. As a result, for single process communicators, 13 MatSeqSELLSetPreallocation is supported, and similarly MatMPISELLSetPreallocation is supported 14 for communicators controlling multiple processes. It is recommended that you call both of 15 the above preallocation routines for simplicity. 16 17 Options Database Keys: 18 . -mat_type sell - sets the matrix type to "sell" during a call to MatSetFromOptions() 19 20 Developer Notes: 21 Subclasses include MATSELLCUSP, MATSELLCUSPARSE, MATSELLPERM, MATSELLCRL, and also automatically switches over to use inodes when 22 enough exist. 23 24 Level: beginner 25 26 .seealso: MatCreateSELL(), MatCreateSeqSELL(), MATSEQSELL, MATMPISELL 27 M*/ 28 29 PetscErrorCode MatDiagonalSet_MPISELL(Mat Y,Vec D,InsertMode is) 30 { 31 PetscErrorCode ierr; 32 Mat_MPISELL *sell=(Mat_MPISELL*)Y->data; 33 34 PetscFunctionBegin; 35 if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) { 36 ierr = MatDiagonalSet(sell->A,D,is);CHKERRQ(ierr); 37 } else { 38 ierr = MatDiagonalSet_Default(Y,D,is);CHKERRQ(ierr); 39 } 40 PetscFunctionReturn(0); 41 } 42 43 /* 44 Local utility routine that creates a mapping from the global column 45 number to the local number in the off-diagonal part of the local 46 storage of the matrix. When PETSC_USE_CTABLE is used this is scalable at 47 a slightly higher hash table cost; without it it is not scalable (each processor 48 has an order N integer array but is fast to acess. 49 */ 50 PetscErrorCode MatCreateColmap_MPISELL_Private(Mat mat) 51 { 52 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 53 PetscErrorCode ierr; 54 PetscInt n=sell->B->cmap->n,i; 55 56 PetscFunctionBegin; 57 if (!sell->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPISELL Matrix was assembled but is missing garray"); 58 #if defined(PETSC_USE_CTABLE) 59 ierr = PetscTableCreate(n,mat->cmap->N+1,&sell->colmap);CHKERRQ(ierr); 60 for (i=0; i<n; i++) { 61 ierr = PetscTableAdd(sell->colmap,sell->garray[i]+1,i+1,INSERT_VALUES);CHKERRQ(ierr); 62 } 63 #else 64 ierr = PetscCalloc1(mat->cmap->N+1,&sell->colmap);CHKERRQ(ierr); 65 ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));CHKERRQ(ierr); 66 for (i=0; i<n; i++) sell->colmap[sell->garray[i]] = i+1; 67 #endif 68 PetscFunctionReturn(0); 69 } 70 71 #define MatSetValues_SeqSELL_A_Private(row,col,value,addv,orow,ocol) \ 72 { \ 73 if (col <= lastcol1) low1 = 0; \ 74 else high1 = nrow1; \ 75 lastcol1 = col; \ 76 while (high1-low1 > 5) { \ 77 t = (low1+high1)/2; \ 78 if (*(cp1+8*t) > col) high1 = t; \ 79 else low1 = t; \ 80 } \ 81 for (_i=low1; _i<high1; _i++) { \ 82 if (*(cp1+8*_i) > col) break; \ 83 if (*(cp1+8*_i) == col) { \ 84 if (addv == ADD_VALUES) *(vp1+8*_i) += value; \ 85 else *(vp1+8*_i) = value; \ 86 goto a_noinsert; \ 87 } \ 88 } \ 89 if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \ 90 if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;} \ 91 if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \ 92 MatSeqXSELLReallocateSELL(A,am,1,nrow1,a->sliidx,row/8,row,col,a->colidx,a->val,cp1,vp1,nonew,MatScalar); \ 93 /* shift up all the later entries in this row */ \ 94 for (ii=nrow1-1; ii>=_i; ii--) { \ 95 *(cp1+8*(ii+1)) = *(cp1+8*ii); \ 96 *(vp1+8*(ii+1)) = *(vp1+8*ii); \ 97 } \ 98 *(cp1+8*_i) = col; \ 99 *(vp1+8*_i) = value; \ 100 a->nz++; nrow1++; A->nonzerostate++; \ 101 a_noinsert: ; \ 102 a->rlen[row] = nrow1; \ 103 } 104 105 #define MatSetValues_SeqSELL_B_Private(row,col,value,addv,orow,ocol) \ 106 { \ 107 if (col <= lastcol2) low2 = 0; \ 108 else high2 = nrow2; \ 109 lastcol2 = col; \ 110 while (high2-low2 > 5) { \ 111 t = (low2+high2)/2; \ 112 if (*(cp2+8*t) > col) high2 = t; \ 113 else low2 = t; \ 114 } \ 115 for (_i=low2; _i<high2; _i++) { \ 116 if (*(cp2+8*_i) > col) break; \ 117 if (*(cp2+8*_i) == col) { \ 118 if (addv == ADD_VALUES) *(vp2+8*_i) += value; \ 119 else *(vp2+8*_i) = value; \ 120 goto b_noinsert; \ 121 } \ 122 } \ 123 if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \ 124 if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;} \ 125 if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \ 126 MatSeqXSELLReallocateSELL(B,bm,1,nrow2,b->sliidx,row/8,row,col,b->colidx,b->val,cp2,vp2,nonew,MatScalar); \ 127 /* shift up all the later entries in this row */ \ 128 for (ii=nrow2-1; ii>=_i; ii--) { \ 129 *(cp2+8*(ii+1)) = *(cp2+8*ii); \ 130 *(vp2+8*(ii+1)) = *(vp2+8*ii); \ 131 } \ 132 *(cp2+8*_i) = col; \ 133 *(vp2+8*_i) = value; \ 134 b->nz++; nrow2++; B->nonzerostate++; \ 135 b_noinsert: ; \ 136 b->rlen[row] = nrow2; \ 137 } 138 139 PetscErrorCode MatSetValues_MPISELL(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 140 { 141 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 142 PetscScalar value; 143 PetscErrorCode ierr; 144 PetscInt i,j,rstart=mat->rmap->rstart,rend=mat->rmap->rend,shift1,shift2; 145 PetscInt cstart=mat->cmap->rstart,cend=mat->cmap->rend,row,col; 146 PetscBool roworiented=sell->roworiented; 147 148 /* Some Variables required in the macro */ 149 Mat A=sell->A; 150 Mat_SeqSELL *a=(Mat_SeqSELL*)A->data; 151 PetscBool ignorezeroentries=a->ignorezeroentries,found; 152 Mat B=sell->B; 153 Mat_SeqSELL *b=(Mat_SeqSELL*)B->data; 154 PetscInt *cp1,*cp2,ii,_i,nrow1,nrow2,low1,high1,low2,high2,t,lastcol1,lastcol2; 155 MatScalar *vp1,*vp2; 156 157 PetscFunctionBegin; 158 for (i=0; i<m; i++) { 159 if (im[i] < 0) continue; 160 #if defined(PETSC_USE_DEBUG) 161 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 162 #endif 163 if (im[i] >= rstart && im[i] < rend) { 164 row = im[i] - rstart; 165 lastcol1 = -1; 166 shift1 = a->sliidx[row>>3]+(row&0x07); /* starting index of the row */ 167 cp1 = a->colidx+shift1; 168 vp1 = a->val+shift1; 169 nrow1 = a->rlen[row]; 170 low1 = 0; 171 high1 = nrow1; 172 lastcol2 = -1; 173 shift2 = b->sliidx[row>>3]+(row&0x07); /* starting index of the row */ 174 cp2 = b->colidx+shift2; 175 vp2 = b->val+shift2; 176 nrow2 = b->rlen[row]; 177 low2 = 0; 178 high2 = nrow2; 179 180 for (j=0; j<n; j++) { 181 if (roworiented) value = v[i*n+j]; 182 else value = v[i+j*m]; 183 if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue; 184 if (in[j] >= cstart && in[j] < cend) { 185 col = in[j] - cstart; 186 MatSetValue_SeqSELL_Private(A,row,col,value,addv,im[i],in[j],cp1,vp1,lastcol1,low1,high1); /* set one value */ 187 } else if (in[j] < 0) continue; 188 #if defined(PETSC_USE_DEBUG) 189 else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1); 190 #endif 191 else { 192 if (mat->was_assembled) { 193 if (!sell->colmap) { 194 ierr = MatCreateColmap_MPISELL_Private(mat);CHKERRQ(ierr); 195 } 196 #if defined(PETSC_USE_CTABLE) 197 ierr = PetscTableFind(sell->colmap,in[j]+1,&col);CHKERRQ(ierr); 198 col--; 199 #else 200 col = sell->colmap[in[j]] - 1; 201 #endif 202 if (col < 0 && !((Mat_SeqSELL*)(sell->B->data))->nonew) { 203 ierr = MatDisAssemble_MPISELL(mat);CHKERRQ(ierr); 204 col = in[j]; 205 /* Reinitialize the variables required by MatSetValues_SeqSELL_B_Private() */ 206 B = sell->B; 207 b = (Mat_SeqSELL*)B->data; 208 shift2 = b->sliidx[row>>3]+(row&0x07); /* starting index of the row */ 209 cp2 = b->colidx+shift2; 210 vp2 = b->val+shift2; 211 nrow2 = b->rlen[row]; 212 low2 = 0; 213 high2 = nrow2; 214 } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]); 215 } else col = in[j]; 216 MatSetValue_SeqSELL_Private(B,row,col,value,addv,im[i],in[j],cp2,vp2,lastcol2,low2,high2); /* set one value */ 217 } 218 } 219 } else { 220 if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]); 221 if (!sell->donotstash) { 222 mat->assembled = PETSC_FALSE; 223 if (roworiented) { 224 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 225 } else { 226 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr); 227 } 228 } 229 } 230 } 231 PetscFunctionReturn(0); 232 } 233 234 PetscErrorCode MatGetValues_MPISELL(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[]) 235 { 236 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 237 PetscErrorCode ierr; 238 PetscInt i,j,rstart=mat->rmap->rstart,rend=mat->rmap->rend; 239 PetscInt cstart=mat->cmap->rstart,cend=mat->cmap->rend,row,col; 240 241 PetscFunctionBegin; 242 for (i=0; i<m; i++) { 243 if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/ 244 if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1); 245 if (idxm[i] >= rstart && idxm[i] < rend) { 246 row = idxm[i] - rstart; 247 for (j=0; j<n; j++) { 248 if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */ 249 if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1); 250 if (idxn[j] >= cstart && idxn[j] < cend) { 251 col = idxn[j] - cstart; 252 ierr = MatGetValues(sell->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 253 } else { 254 if (!sell->colmap) { 255 ierr = MatCreateColmap_MPISELL_Private(mat);CHKERRQ(ierr); 256 } 257 #if defined(PETSC_USE_CTABLE) 258 ierr = PetscTableFind(sell->colmap,idxn[j]+1,&col);CHKERRQ(ierr); 259 col--; 260 #else 261 col = sell->colmap[idxn[j]] - 1; 262 #endif 263 if ((col < 0) || (sell->garray[col] != idxn[j])) *(v+i*n+j) = 0.0; 264 else { 265 ierr = MatGetValues(sell->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 266 } 267 } 268 } 269 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported"); 270 } 271 PetscFunctionReturn(0); 272 } 273 274 extern PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat,Vec,Vec); 275 276 PetscErrorCode MatAssemblyBegin_MPISELL(Mat mat,MatAssemblyType mode) 277 { 278 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 279 PetscErrorCode ierr; 280 PetscInt nstash,reallocs; 281 282 PetscFunctionBegin; 283 if (sell->donotstash || mat->nooffprocentries) PetscFunctionReturn(0); 284 285 ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr); 286 ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr); 287 ierr = PetscInfo2(sell->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 288 PetscFunctionReturn(0); 289 } 290 291 PetscErrorCode MatAssemblyEnd_MPISELL(Mat mat,MatAssemblyType mode) 292 { 293 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 294 PetscErrorCode ierr; 295 PetscMPIInt n; 296 PetscInt i,flg; 297 PetscInt *row,*col; 298 PetscScalar *val; 299 PetscBool other_disassembled; 300 /* do not use 'b = (Mat_SeqSELL*)sell->B->data' as B can be reset in disassembly */ 301 PetscFunctionBegin; 302 if (!sell->donotstash && !mat->nooffprocentries) { 303 while (1) { 304 ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 305 if (!flg) break; 306 307 for (i=0; i<n; i++) { /* assemble one by one */ 308 ierr = MatSetValues_MPISELL(mat,1,row+i,1,col+i,val+i,mat->insertmode);CHKERRQ(ierr); 309 } 310 } 311 ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr); 312 } 313 ierr = MatAssemblyBegin(sell->A,mode);CHKERRQ(ierr); 314 ierr = MatAssemblyEnd(sell->A,mode);CHKERRQ(ierr); 315 316 /* 317 determine if any processor has disassembled, if so we must 318 also disassemble ourselfs, in order that we may reassemble. 319 */ 320 /* 321 if nonzero structure of submatrix B cannot change then we know that 322 no processor disassembled thus we can skip this stuff 323 */ 324 if (!((Mat_SeqSELL*)sell->B->data)->nonew) { 325 ierr = MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 326 if (mat->was_assembled && !other_disassembled) { 327 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatDisAssemble not implemented yet\n"); 328 ierr = MatDisAssemble_MPISELL(mat);CHKERRQ(ierr); 329 } 330 } 331 if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) { 332 ierr = MatSetUpMultiply_MPISELL(mat);CHKERRQ(ierr); 333 } 334 /* 335 ierr = MatSetOption(sell->B,MAT_USE_INODES,PETSC_FALSE);CHKERRQ(ierr); 336 */ 337 ierr = MatAssemblyBegin(sell->B,mode);CHKERRQ(ierr); 338 ierr = MatAssemblyEnd(sell->B,mode);CHKERRQ(ierr); 339 ierr = PetscFree2(sell->rowvalues,sell->rowindices);CHKERRQ(ierr); 340 sell->rowvalues = 0; 341 ierr = VecDestroy(&sell->diag);CHKERRQ(ierr); 342 343 /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */ 344 if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqSELL*)(sell->A->data))->nonew) { 345 PetscObjectState state = sell->A->nonzerostate + sell->B->nonzerostate; 346 ierr = MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 347 } 348 PetscFunctionReturn(0); 349 } 350 351 PetscErrorCode MatZeroEntries_MPISELL(Mat A) 352 { 353 Mat_MPISELL *l=(Mat_MPISELL*)A->data; 354 PetscErrorCode ierr; 355 356 PetscFunctionBegin; 357 ierr = MatZeroEntries(l->A);CHKERRQ(ierr); 358 ierr = MatZeroEntries(l->B);CHKERRQ(ierr); 359 PetscFunctionReturn(0); 360 } 361 362 PetscErrorCode MatMult_MPISELL(Mat A,Vec xx,Vec yy) 363 { 364 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 365 PetscErrorCode ierr; 366 PetscInt nt; 367 368 PetscFunctionBegin; 369 ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); 370 if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt); 371 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 372 ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); 373 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 374 ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); 375 PetscFunctionReturn(0); 376 } 377 378 PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat A,Vec bb,Vec xx) 379 { 380 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 381 PetscErrorCode ierr; 382 383 PetscFunctionBegin; 384 ierr = MatMultDiagonalBlock(a->A,bb,xx);CHKERRQ(ierr); 385 PetscFunctionReturn(0); 386 } 387 388 PetscErrorCode MatMultAdd_MPISELL(Mat A,Vec xx,Vec yy,Vec zz) 389 { 390 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 391 PetscErrorCode ierr; 392 393 PetscFunctionBegin; 394 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 395 ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 396 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 397 ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); 398 PetscFunctionReturn(0); 399 } 400 401 PetscErrorCode MatMultTranspose_MPISELL(Mat A,Vec xx,Vec yy) 402 { 403 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 404 PetscErrorCode ierr; 405 PetscBool merged; 406 407 PetscFunctionBegin; 408 ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr); 409 /* do nondiagonal part */ 410 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 411 if (!merged) { 412 /* send it on its way */ 413 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 414 /* do local part */ 415 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 416 /* receive remote parts: note this assumes the values are not actually */ 417 /* added in yy until the next line, */ 418 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 419 } else { 420 /* do local part */ 421 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 422 /* send it on its way */ 423 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 424 /* values actually were received in the Begin() but we need to call this nop */ 425 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 426 } 427 PetscFunctionReturn(0); 428 } 429 430 PetscErrorCode MatIsTranspose_MPISELL(Mat Amat,Mat Bmat,PetscReal tol,PetscBool *f) 431 { 432 MPI_Comm comm; 433 Mat_MPISELL *Asell=(Mat_MPISELL*)Amat->data,*Bsell; 434 Mat Adia=Asell->A,Bdia,Aoff,Boff,*Aoffs,*Boffs; 435 IS Me,Notme; 436 PetscErrorCode ierr; 437 PetscInt M,N,first,last,*notme,i; 438 PetscMPIInt size; 439 440 PetscFunctionBegin; 441 /* Easy test: symmetric diagonal block */ 442 Bsell = (Mat_MPISELL*)Bmat->data; Bdia = Bsell->A; 443 ierr = MatIsTranspose(Adia,Bdia,tol,f);CHKERRQ(ierr); 444 if (!*f) PetscFunctionReturn(0); 445 ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr); 446 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 447 if (size == 1) PetscFunctionReturn(0); 448 449 /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */ 450 ierr = MatGetSize(Amat,&M,&N);CHKERRQ(ierr); 451 ierr = MatGetOwnershipRange(Amat,&first,&last);CHKERRQ(ierr); 452 ierr = PetscMalloc1(N-last+first,¬me);CHKERRQ(ierr); 453 for (i=0; i<first; i++) notme[i] = i; 454 for (i=last; i<M; i++) notme[i-last+first] = i; 455 ierr = ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);CHKERRQ(ierr); 456 ierr = ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);CHKERRQ(ierr); 457 ierr = MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);CHKERRQ(ierr); 458 Aoff = Aoffs[0]; 459 ierr = MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);CHKERRQ(ierr); 460 Boff = Boffs[0]; 461 ierr = MatIsTranspose(Aoff,Boff,tol,f);CHKERRQ(ierr); 462 ierr = MatDestroyMatrices(1,&Aoffs);CHKERRQ(ierr); 463 ierr = MatDestroyMatrices(1,&Boffs);CHKERRQ(ierr); 464 ierr = ISDestroy(&Me);CHKERRQ(ierr); 465 ierr = ISDestroy(&Notme);CHKERRQ(ierr); 466 ierr = PetscFree(notme);CHKERRQ(ierr); 467 PetscFunctionReturn(0); 468 } 469 470 PetscErrorCode MatMultTransposeAdd_MPISELL(Mat A,Vec xx,Vec yy,Vec zz) 471 { 472 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 473 PetscErrorCode ierr; 474 475 PetscFunctionBegin; 476 /* do nondiagonal part */ 477 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 478 /* send it on its way */ 479 ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 480 /* do local part */ 481 ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 482 /* receive remote parts */ 483 ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 484 PetscFunctionReturn(0); 485 } 486 487 /* 488 This only works correctly for square matrices where the subblock A->A is the 489 diagonal block 490 */ 491 PetscErrorCode MatGetDiagonal_MPISELL(Mat A,Vec v) 492 { 493 PetscErrorCode ierr; 494 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 495 496 PetscFunctionBegin; 497 if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); 498 if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition"); 499 ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr); 500 PetscFunctionReturn(0); 501 } 502 503 PetscErrorCode MatScale_MPISELL(Mat A,PetscScalar aa) 504 { 505 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 506 PetscErrorCode ierr; 507 508 PetscFunctionBegin; 509 ierr = MatScale(a->A,aa);CHKERRQ(ierr); 510 ierr = MatScale(a->B,aa);CHKERRQ(ierr); 511 PetscFunctionReturn(0); 512 } 513 514 PetscErrorCode MatDestroy_MPISELL(Mat mat) 515 { 516 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 517 PetscErrorCode ierr; 518 519 PetscFunctionBegin; 520 #if defined(PETSC_USE_LOG) 521 PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N); 522 #endif 523 ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr); 524 ierr = VecDestroy(&sell->diag);CHKERRQ(ierr); 525 ierr = MatDestroy(&sell->A);CHKERRQ(ierr); 526 ierr = MatDestroy(&sell->B);CHKERRQ(ierr); 527 #if defined(PETSC_USE_CTABLE) 528 ierr = PetscTableDestroy(&sell->colmap);CHKERRQ(ierr); 529 #else 530 ierr = PetscFree(sell->colmap);CHKERRQ(ierr); 531 #endif 532 ierr = PetscFree(sell->garray);CHKERRQ(ierr); 533 ierr = VecDestroy(&sell->lvec);CHKERRQ(ierr); 534 ierr = VecScatterDestroy(&sell->Mvctx);CHKERRQ(ierr); 535 ierr = PetscFree2(sell->rowvalues,sell->rowindices);CHKERRQ(ierr); 536 ierr = PetscFree(sell->ld);CHKERRQ(ierr); 537 ierr = PetscFree(mat->data);CHKERRQ(ierr); 538 539 ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr); 540 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);CHKERRQ(ierr); 541 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);CHKERRQ(ierr); 542 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);CHKERRQ(ierr); 543 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPISELLSetPreallocation_C",NULL);CHKERRQ(ierr); 544 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpisell_mpiaij_C",NULL);CHKERRQ(ierr); 545 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);CHKERRQ(ierr); 546 PetscFunctionReturn(0); 547 } 548 549 #include <petscdraw.h> 550 PetscErrorCode MatView_MPISELL_ASCIIorDraworSocket(Mat mat,PetscViewer viewer) 551 { 552 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 553 PetscErrorCode ierr; 554 PetscMPIInt rank=sell->rank,size=sell->size; 555 PetscBool isdraw,iascii,isbinary; 556 PetscViewer sviewer; 557 PetscViewerFormat format; 558 559 PetscFunctionBegin; 560 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 561 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 562 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 563 if (iascii) { 564 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 565 if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 566 MatInfo info; 567 PetscBool inodes; 568 569 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr); 570 ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr); 571 ierr = MatInodeGetInodeSizes(sell->A,NULL,(PetscInt**)&inodes,NULL);CHKERRQ(ierr); 572 ierr = PetscViewerASCIIPushSynchronized(viewer);CHKERRQ(ierr); 573 if (!inodes) { 574 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n", 575 rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr); 576 } else { 577 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n", 578 rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr); 579 } 580 ierr = MatGetInfo(sell->A,MAT_LOCAL,&info);CHKERRQ(ierr); 581 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 582 ierr = MatGetInfo(sell->B,MAT_LOCAL,&info);CHKERRQ(ierr); 583 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 584 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 585 ierr = PetscViewerASCIIPopSynchronized(viewer);CHKERRQ(ierr); 586 ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr); 587 ierr = VecScatterView(sell->Mvctx,viewer);CHKERRQ(ierr); 588 PetscFunctionReturn(0); 589 } else if (format == PETSC_VIEWER_ASCII_INFO) { 590 PetscInt inodecount,inodelimit,*inodes; 591 ierr = MatInodeGetInodeSizes(sell->A,&inodecount,&inodes,&inodelimit);CHKERRQ(ierr); 592 if (inodes) { 593 ierr = PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);CHKERRQ(ierr); 594 } else { 595 ierr = PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");CHKERRQ(ierr); 596 } 597 PetscFunctionReturn(0); 598 } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) { 599 PetscFunctionReturn(0); 600 } 601 } else if (isbinary) { 602 if (size == 1) { 603 ierr = PetscObjectSetName((PetscObject)sell->A,((PetscObject)mat)->name);CHKERRQ(ierr); 604 ierr = MatView(sell->A,viewer);CHKERRQ(ierr); 605 } else { 606 /* ierr = MatView_MPISELL_Binary(mat,viewer);CHKERRQ(ierr); */ 607 } 608 PetscFunctionReturn(0); 609 } else if (isdraw) { 610 PetscDraw draw; 611 PetscBool isnull; 612 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 613 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); 614 if (isnull) PetscFunctionReturn(0); 615 } 616 617 { 618 /* assemble the entire matrix onto first processor. */ 619 Mat A; 620 Mat_SeqSELL *Aloc; 621 PetscInt M=mat->rmap->N,N=mat->cmap->N,*acolidx,row,col,i,j; 622 MatScalar *aval; 623 PetscBool isnonzero; 624 625 ierr = MatCreate(PetscObjectComm((PetscObject)mat),&A);CHKERRQ(ierr); 626 if (!rank) { 627 ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr); 628 } else { 629 ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr); 630 } 631 /* This is just a temporary matrix, so explicitly using MATMPISELL is probably best */ 632 ierr = MatSetType(A,MATMPISELL);CHKERRQ(ierr); 633 ierr = MatMPISELLSetPreallocation(A,0,NULL,0,NULL);CHKERRQ(ierr); 634 ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); 635 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)A);CHKERRQ(ierr); 636 637 /* copy over the A part */ 638 Aloc = (Mat_SeqSELL*)sell->A->data; 639 acolidx = Aloc->colidx; aval = Aloc->val; 640 for (i=0; i<Aloc->totalslices; i++) { /* loop over slices */ 641 for (j=Aloc->sliidx[i]; j<Aloc->sliidx[i+1]; j++) { 642 isnonzero = (PetscBool)((j-Aloc->sliidx[i])/8 < Aloc->rlen[(i<<3)+(j&0x07)]); 643 if (isnonzero) { /* check the mask bit */ 644 row = (i<<3)+(j&0x07) + mat->rmap->rstart; /* i<<3 is the starting row of this slice */ 645 col = *acolidx + mat->rmap->rstart; 646 ierr = MatSetValues(A,1,&row,1,&col,aval,INSERT_VALUES);CHKERRQ(ierr); 647 } 648 aval++; acolidx++; 649 } 650 } 651 652 /* copy over the B part */ 653 Aloc = (Mat_SeqSELL*)sell->B->data; 654 acolidx = Aloc->colidx; aval = Aloc->val; 655 for (i=0; i<Aloc->totalslices; i++) { 656 for (j=Aloc->sliidx[i]; j<Aloc->sliidx[i+1]; j++) { 657 isnonzero = (PetscBool)((j-Aloc->sliidx[i])/8 < Aloc->rlen[(i<<3)+(j&0x07)]); 658 if (isnonzero) { 659 row = (i<<3)+(j&0x07) + mat->rmap->rstart; 660 col = sell->garray[*acolidx]; 661 ierr = MatSetValues(A,1,&row,1,&col,aval,INSERT_VALUES);CHKERRQ(ierr); 662 } 663 aval++; acolidx++; 664 } 665 } 666 667 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 668 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 669 /* 670 Everyone has to call to draw the matrix since the graphics waits are 671 synchronized across all processors that share the PetscDraw object 672 */ 673 ierr = PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); 674 if (!rank) { 675 ierr = PetscObjectSetName((PetscObject)((Mat_MPISELL*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr); 676 ierr = MatView_SeqSELL(((Mat_MPISELL*)(A->data))->A,sviewer);CHKERRQ(ierr); 677 } 678 ierr = PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr); 679 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 680 ierr = MatDestroy(&A);CHKERRQ(ierr); 681 } 682 PetscFunctionReturn(0); 683 } 684 685 PetscErrorCode MatView_MPISELL(Mat mat,PetscViewer viewer) 686 { 687 PetscErrorCode ierr; 688 PetscBool iascii,isdraw,issocket,isbinary; 689 690 PetscFunctionBegin; 691 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 692 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 693 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 694 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);CHKERRQ(ierr); 695 if (iascii || isdraw || isbinary || issocket) { 696 ierr = MatView_MPISELL_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr); 697 } 698 PetscFunctionReturn(0); 699 } 700 701 PetscErrorCode MatGetGhosts_MPISELL(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[]) 702 { 703 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 704 PetscErrorCode ierr; 705 706 PetscFunctionBegin; 707 ierr = MatGetSize(sell->B,NULL,nghosts);CHKERRQ(ierr); 708 if (ghosts) *ghosts = sell->garray; 709 PetscFunctionReturn(0); 710 } 711 712 PetscErrorCode MatGetInfo_MPISELL(Mat matin,MatInfoType flag,MatInfo *info) 713 { 714 Mat_MPISELL *mat=(Mat_MPISELL*)matin->data; 715 Mat A=mat->A,B=mat->B; 716 PetscErrorCode ierr; 717 PetscReal isend[5],irecv[5]; 718 719 PetscFunctionBegin; 720 info->block_size = 1.0; 721 ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr); 722 723 isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded; 724 isend[3] = info->memory; isend[4] = info->mallocs; 725 726 ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr); 727 728 isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded; 729 isend[3] += info->memory; isend[4] += info->mallocs; 730 if (flag == MAT_LOCAL) { 731 info->nz_used = isend[0]; 732 info->nz_allocated = isend[1]; 733 info->nz_unneeded = isend[2]; 734 info->memory = isend[3]; 735 info->mallocs = isend[4]; 736 } else if (flag == MAT_GLOBAL_MAX) { 737 ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 738 739 info->nz_used = irecv[0]; 740 info->nz_allocated = irecv[1]; 741 info->nz_unneeded = irecv[2]; 742 info->memory = irecv[3]; 743 info->mallocs = irecv[4]; 744 } else if (flag == MAT_GLOBAL_SUM) { 745 ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 746 747 info->nz_used = irecv[0]; 748 info->nz_allocated = irecv[1]; 749 info->nz_unneeded = irecv[2]; 750 info->memory = irecv[3]; 751 info->mallocs = irecv[4]; 752 } 753 info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */ 754 info->fill_ratio_needed = 0; 755 info->factor_mallocs = 0; 756 PetscFunctionReturn(0); 757 } 758 759 PetscErrorCode MatSetOption_MPISELL(Mat A,MatOption op,PetscBool flg) 760 { 761 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 762 PetscErrorCode ierr; 763 764 PetscFunctionBegin; 765 switch (op) { 766 case MAT_NEW_NONZERO_LOCATIONS: 767 case MAT_NEW_NONZERO_ALLOCATION_ERR: 768 case MAT_UNUSED_NONZERO_LOCATION_ERR: 769 case MAT_KEEP_NONZERO_PATTERN: 770 case MAT_NEW_NONZERO_LOCATION_ERR: 771 case MAT_USE_INODES: 772 case MAT_IGNORE_ZERO_ENTRIES: 773 MatCheckPreallocated(A,1); 774 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 775 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 776 break; 777 case MAT_ROW_ORIENTED: 778 MatCheckPreallocated(A,1); 779 a->roworiented = flg; 780 781 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 782 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 783 break; 784 case MAT_NEW_DIAGONALS: 785 ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr); 786 break; 787 case MAT_IGNORE_OFF_PROC_ENTRIES: 788 a->donotstash = flg; 789 break; 790 case MAT_SPD: 791 A->spd_set = PETSC_TRUE; 792 A->spd = flg; 793 if (flg) { 794 A->symmetric = PETSC_TRUE; 795 A->structurally_symmetric = PETSC_TRUE; 796 A->symmetric_set = PETSC_TRUE; 797 A->structurally_symmetric_set = PETSC_TRUE; 798 } 799 break; 800 case MAT_SYMMETRIC: 801 MatCheckPreallocated(A,1); 802 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 803 break; 804 case MAT_STRUCTURALLY_SYMMETRIC: 805 MatCheckPreallocated(A,1); 806 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 807 break; 808 case MAT_HERMITIAN: 809 MatCheckPreallocated(A,1); 810 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 811 break; 812 case MAT_SYMMETRY_ETERNAL: 813 MatCheckPreallocated(A,1); 814 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 815 break; 816 default: 817 SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op); 818 } 819 PetscFunctionReturn(0); 820 } 821 822 823 PetscErrorCode MatDiagonalScale_MPISELL(Mat mat,Vec ll,Vec rr) 824 { 825 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 826 Mat a=sell->A,b=sell->B; 827 PetscErrorCode ierr; 828 PetscInt s1,s2,s3; 829 830 PetscFunctionBegin; 831 ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr); 832 if (rr) { 833 ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr); 834 if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size"); 835 /* Overlap communication with computation. */ 836 ierr = VecScatterBegin(sell->Mvctx,rr,sell->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 837 } 838 if (ll) { 839 ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr); 840 if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size"); 841 ierr = (*b->ops->diagonalscale)(b,ll,0);CHKERRQ(ierr); 842 } 843 /* scale the diagonal block */ 844 ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr); 845 846 if (rr) { 847 /* Do a scatter end and then right scale the off-diagonal block */ 848 ierr = VecScatterEnd(sell->Mvctx,rr,sell->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 849 ierr = (*b->ops->diagonalscale)(b,0,sell->lvec);CHKERRQ(ierr); 850 } 851 PetscFunctionReturn(0); 852 } 853 854 PetscErrorCode MatSetUnfactored_MPISELL(Mat A) 855 { 856 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 857 PetscErrorCode ierr; 858 859 PetscFunctionBegin; 860 ierr = MatSetUnfactored(a->A);CHKERRQ(ierr); 861 PetscFunctionReturn(0); 862 } 863 864 PetscErrorCode MatEqual_MPISELL(Mat A,Mat B,PetscBool *flag) 865 { 866 Mat_MPISELL *matB=(Mat_MPISELL*)B->data,*matA=(Mat_MPISELL*)A->data; 867 Mat a,b,c,d; 868 PetscBool flg; 869 PetscErrorCode ierr; 870 871 PetscFunctionBegin; 872 a = matA->A; b = matA->B; 873 c = matB->A; d = matB->B; 874 875 ierr = MatEqual(a,c,&flg);CHKERRQ(ierr); 876 if (flg) { 877 ierr = MatEqual(b,d,&flg);CHKERRQ(ierr); 878 } 879 ierr = MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 880 PetscFunctionReturn(0); 881 } 882 883 PetscErrorCode MatCopy_MPISELL(Mat A,Mat B,MatStructure str) 884 { 885 PetscErrorCode ierr; 886 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 887 Mat_MPISELL *b=(Mat_MPISELL*)B->data; 888 889 PetscFunctionBegin; 890 /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ 891 if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) { 892 /* because of the column compression in the off-processor part of the matrix a->B, 893 the number of columns in a->B and b->B may be different, hence we cannot call 894 the MatCopy() directly on the two parts. If need be, we can provide a more 895 efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices 896 then copying the submatrices */ 897 ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); 898 } else { 899 ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr); 900 ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr); 901 } 902 PetscFunctionReturn(0); 903 } 904 905 PetscErrorCode MatSetUp_MPISELL(Mat A) 906 { 907 PetscErrorCode ierr; 908 909 PetscFunctionBegin; 910 ierr = MatMPISELLSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr); 911 PetscFunctionReturn(0); 912 } 913 914 915 extern PetscErrorCode MatConjugate_SeqSELL(Mat); 916 917 PetscErrorCode MatConjugate_MPISELL(Mat mat) 918 { 919 #if defined(PETSC_USE_COMPLEX) 920 PetscErrorCode ierr; 921 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 922 923 PetscFunctionBegin; 924 ierr = MatConjugate_SeqSELL(sell->A);CHKERRQ(ierr); 925 ierr = MatConjugate_SeqSELL(sell->B);CHKERRQ(ierr); 926 #else 927 PetscFunctionBegin; 928 #endif 929 PetscFunctionReturn(0); 930 } 931 932 PetscErrorCode MatRealPart_MPISELL(Mat A) 933 { 934 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 935 PetscErrorCode ierr; 936 937 PetscFunctionBegin; 938 ierr = MatRealPart(a->A);CHKERRQ(ierr); 939 ierr = MatRealPart(a->B);CHKERRQ(ierr); 940 PetscFunctionReturn(0); 941 } 942 943 PetscErrorCode MatImaginaryPart_MPISELL(Mat A) 944 { 945 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 946 PetscErrorCode ierr; 947 948 PetscFunctionBegin; 949 ierr = MatImaginaryPart(a->A);CHKERRQ(ierr); 950 ierr = MatImaginaryPart(a->B);CHKERRQ(ierr); 951 PetscFunctionReturn(0); 952 } 953 954 PetscErrorCode MatInvertBlockDiagonal_MPISELL(Mat A,const PetscScalar **values) 955 { 956 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 957 PetscErrorCode ierr; 958 959 PetscFunctionBegin; 960 ierr = MatInvertBlockDiagonal(a->A,values);CHKERRQ(ierr); 961 A->factorerrortype = a->A->factorerrortype; 962 PetscFunctionReturn(0); 963 } 964 965 static PetscErrorCode MatSetRandom_MPISELL(Mat x,PetscRandom rctx) 966 { 967 PetscErrorCode ierr; 968 Mat_MPISELL *sell=(Mat_MPISELL*)x->data; 969 970 PetscFunctionBegin; 971 ierr = MatSetRandom(sell->A,rctx);CHKERRQ(ierr); 972 ierr = MatSetRandom(sell->B,rctx);CHKERRQ(ierr); 973 ierr = MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 974 ierr = MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 975 PetscFunctionReturn(0); 976 } 977 978 PetscErrorCode MatSetFromOptions_MPISELL(PetscOptionItems *PetscOptionsObject,Mat A) 979 { 980 PetscErrorCode ierr; 981 982 PetscFunctionBegin; 983 ierr = PetscOptionsHead(PetscOptionsObject,"MPISELL options");CHKERRQ(ierr); 984 ierr = PetscOptionsTail();CHKERRQ(ierr); 985 PetscFunctionReturn(0); 986 } 987 988 PetscErrorCode MatShift_MPISELL(Mat Y,PetscScalar a) 989 { 990 PetscErrorCode ierr; 991 Mat_MPISELL *msell=(Mat_MPISELL*)Y->data; 992 Mat_SeqSELL *sell=(Mat_SeqSELL*)msell->A->data; 993 994 PetscFunctionBegin; 995 if (!Y->preallocated) { 996 ierr = MatMPISELLSetPreallocation(Y,1,NULL,0,NULL);CHKERRQ(ierr); 997 } else if (!sell->nz) { 998 PetscInt nonew = sell->nonew; 999 ierr = MatSeqSELLSetPreallocation(msell->A,1,NULL);CHKERRQ(ierr); 1000 sell->nonew = nonew; 1001 } 1002 ierr = MatShift_Basic(Y,a);CHKERRQ(ierr); 1003 PetscFunctionReturn(0); 1004 } 1005 1006 PetscErrorCode MatMissingDiagonal_MPISELL(Mat A,PetscBool *missing,PetscInt *d) 1007 { 1008 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 1009 PetscErrorCode ierr; 1010 1011 PetscFunctionBegin; 1012 if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices"); 1013 ierr = MatMissingDiagonal(a->A,missing,d);CHKERRQ(ierr); 1014 if (d) { 1015 PetscInt rstart; 1016 ierr = MatGetOwnershipRange(A,&rstart,NULL);CHKERRQ(ierr); 1017 *d += rstart; 1018 1019 } 1020 PetscFunctionReturn(0); 1021 } 1022 1023 PetscErrorCode MatGetDiagonalBlock_MPISELL(Mat A,Mat *a) 1024 { 1025 PetscFunctionBegin; 1026 *a = ((Mat_MPISELL*)A->data)->A; 1027 PetscFunctionReturn(0); 1028 } 1029 1030 /* -------------------------------------------------------------------*/ 1031 static struct _MatOps MatOps_Values = {MatSetValues_MPISELL, 1032 0, 1033 0, 1034 MatMult_MPISELL, 1035 /* 4*/ MatMultAdd_MPISELL, 1036 MatMultTranspose_MPISELL, 1037 MatMultTransposeAdd_MPISELL, 1038 0, 1039 0, 1040 0, 1041 /*10*/ 0, 1042 0, 1043 0, 1044 MatSOR_MPISELL, 1045 0, 1046 /*15*/ MatGetInfo_MPISELL, 1047 MatEqual_MPISELL, 1048 MatGetDiagonal_MPISELL, 1049 MatDiagonalScale_MPISELL, 1050 0, 1051 /*20*/ MatAssemblyBegin_MPISELL, 1052 MatAssemblyEnd_MPISELL, 1053 MatSetOption_MPISELL, 1054 MatZeroEntries_MPISELL, 1055 /*24*/ 0, 1056 0, 1057 0, 1058 0, 1059 0, 1060 /*29*/ MatSetUp_MPISELL, 1061 0, 1062 0, 1063 MatGetDiagonalBlock_MPISELL, 1064 0, 1065 /*34*/ MatDuplicate_MPISELL, 1066 0, 1067 0, 1068 0, 1069 0, 1070 /*39*/ 0, 1071 0, 1072 0, 1073 MatGetValues_MPISELL, 1074 MatCopy_MPISELL, 1075 /*44*/ 0, 1076 MatScale_MPISELL, 1077 MatShift_MPISELL, 1078 MatDiagonalSet_MPISELL, 1079 0, 1080 /*49*/ MatSetRandom_MPISELL, 1081 0, 1082 0, 1083 0, 1084 0, 1085 /*54*/ MatFDColoringCreate_MPIXAIJ, 1086 0, 1087 MatSetUnfactored_MPISELL, 1088 0, 1089 0, 1090 /*59*/ 0, 1091 MatDestroy_MPISELL, 1092 MatView_MPISELL, 1093 0, 1094 0, 1095 /*64*/ 0, 1096 0, 1097 0, 1098 0, 1099 0, 1100 /*69*/ 0, 1101 0, 1102 0, 1103 0, 1104 0, 1105 0, 1106 /*75*/ MatFDColoringApply_AIJ, /* reuse AIJ function */ 1107 MatSetFromOptions_MPISELL, 1108 0, 1109 0, 1110 0, 1111 /*80*/ 0, 1112 0, 1113 0, 1114 /*83*/ 0, 1115 0, 1116 0, 1117 0, 1118 0, 1119 0, 1120 /*89*/ 0, 1121 0, 1122 0, 1123 0, 1124 0, 1125 /*94*/ 0, 1126 0, 1127 0, 1128 0, 1129 0, 1130 /*99*/ 0, 1131 0, 1132 0, 1133 MatConjugate_MPISELL, 1134 0, 1135 /*104*/0, 1136 MatRealPart_MPISELL, 1137 MatImaginaryPart_MPISELL, 1138 0, 1139 0, 1140 /*109*/0, 1141 0, 1142 0, 1143 0, 1144 MatMissingDiagonal_MPISELL, 1145 /*114*/0, 1146 0, 1147 MatGetGhosts_MPISELL, 1148 0, 1149 0, 1150 /*119*/0, 1151 0, 1152 0, 1153 0, 1154 0, 1155 /*124*/0, 1156 0, 1157 MatInvertBlockDiagonal_MPISELL, 1158 0, 1159 0, 1160 /*129*/0, 1161 0, 1162 0, 1163 0, 1164 0, 1165 /*134*/0, 1166 0, 1167 0, 1168 0, 1169 0, 1170 /*139*/0, 1171 0, 1172 0, 1173 MatFDColoringSetUp_MPIXAIJ, 1174 0, 1175 /*144*/0 1176 }; 1177 1178 /* ----------------------------------------------------------------------------------------*/ 1179 1180 PetscErrorCode MatStoreValues_MPISELL(Mat mat) 1181 { 1182 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 1183 PetscErrorCode ierr; 1184 1185 PetscFunctionBegin; 1186 ierr = MatStoreValues(sell->A);CHKERRQ(ierr); 1187 ierr = MatStoreValues(sell->B);CHKERRQ(ierr); 1188 PetscFunctionReturn(0); 1189 } 1190 1191 PetscErrorCode MatRetrieveValues_MPISELL(Mat mat) 1192 { 1193 Mat_MPISELL *sell=(Mat_MPISELL*)mat->data; 1194 PetscErrorCode ierr; 1195 1196 PetscFunctionBegin; 1197 ierr = MatRetrieveValues(sell->A);CHKERRQ(ierr); 1198 ierr = MatRetrieveValues(sell->B);CHKERRQ(ierr); 1199 PetscFunctionReturn(0); 1200 } 1201 1202 PetscErrorCode MatMPISELLSetPreallocation_MPISELL(Mat B,PetscInt d_rlenmax,const PetscInt d_rlen[],PetscInt o_rlenmax,const PetscInt o_rlen[]) 1203 { 1204 Mat_MPISELL *b; 1205 PetscErrorCode ierr; 1206 1207 PetscFunctionBegin; 1208 ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); 1209 ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); 1210 b = (Mat_MPISELL*)B->data; 1211 1212 if (!B->preallocated) { 1213 /* Explicitly create 2 MATSEQSELL matrices. */ 1214 ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); 1215 ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); 1216 ierr = MatSetBlockSizesFromMats(b->A,B,B);CHKERRQ(ierr); 1217 ierr = MatSetType(b->A,MATSEQSELL);CHKERRQ(ierr); 1218 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); 1219 ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); 1220 ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); 1221 ierr = MatSetBlockSizesFromMats(b->B,B,B);CHKERRQ(ierr); 1222 ierr = MatSetType(b->B,MATSEQSELL);CHKERRQ(ierr); 1223 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); 1224 } 1225 1226 ierr = MatSeqSELLSetPreallocation(b->A,d_rlenmax,d_rlen);CHKERRQ(ierr); 1227 ierr = MatSeqSELLSetPreallocation(b->B,o_rlenmax,o_rlen);CHKERRQ(ierr); 1228 B->preallocated = PETSC_TRUE; 1229 B->was_assembled = PETSC_FALSE; 1230 /* 1231 critical for MatAssemblyEnd to work. 1232 MatAssemblyBegin checks it to set up was_assembled 1233 and MatAssemblyEnd checks was_assembled to determine whether to build garray 1234 */ 1235 B->assembled = PETSC_FALSE; 1236 PetscFunctionReturn(0); 1237 } 1238 1239 PetscErrorCode MatDuplicate_MPISELL(Mat matin,MatDuplicateOption cpvalues,Mat *newmat) 1240 { 1241 Mat mat; 1242 Mat_MPISELL *a,*oldmat=(Mat_MPISELL*)matin->data; 1243 PetscErrorCode ierr; 1244 1245 PetscFunctionBegin; 1246 *newmat = 0; 1247 ierr = MatCreate(PetscObjectComm((PetscObject)matin),&mat);CHKERRQ(ierr); 1248 ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr); 1249 ierr = MatSetBlockSizesFromMats(mat,matin,matin);CHKERRQ(ierr); 1250 ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr); 1251 ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr); 1252 a = (Mat_MPISELL*)mat->data; 1253 1254 mat->factortype = matin->factortype; 1255 mat->assembled = PETSC_TRUE; 1256 mat->insertmode = NOT_SET_VALUES; 1257 mat->preallocated = PETSC_TRUE; 1258 1259 a->size = oldmat->size; 1260 a->rank = oldmat->rank; 1261 a->donotstash = oldmat->donotstash; 1262 a->roworiented = oldmat->roworiented; 1263 a->rowindices = 0; 1264 a->rowvalues = 0; 1265 a->getrowactive = PETSC_FALSE; 1266 1267 ierr = PetscLayoutReference(matin->rmap,&mat->rmap);CHKERRQ(ierr); 1268 ierr = PetscLayoutReference(matin->cmap,&mat->cmap);CHKERRQ(ierr); 1269 1270 if (oldmat->colmap) { 1271 #if defined(PETSC_USE_CTABLE) 1272 ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr); 1273 #else 1274 ierr = PetscMalloc1(mat->cmap->N,&a->colmap);CHKERRQ(ierr); 1275 ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr); 1276 ierr = PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr); 1277 #endif 1278 } else a->colmap = 0; 1279 if (oldmat->garray) { 1280 PetscInt len; 1281 len = oldmat->B->cmap->n; 1282 ierr = PetscMalloc1(len+1,&a->garray);CHKERRQ(ierr); 1283 ierr = PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));CHKERRQ(ierr); 1284 if (len) { ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); } 1285 } else a->garray = 0; 1286 1287 ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr); 1288 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);CHKERRQ(ierr); 1289 ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr); 1290 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);CHKERRQ(ierr); 1291 ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr); 1292 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);CHKERRQ(ierr); 1293 ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr); 1294 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);CHKERRQ(ierr); 1295 ierr = PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr); 1296 *newmat = mat; 1297 PetscFunctionReturn(0); 1298 } 1299 1300 /*@C 1301 MatMPISELLSetPreallocation - Preallocates memory for a sparse parallel matrix in sell format. 1302 For good matrix assembly performance the user should preallocate the matrix storage by 1303 setting the parameters d_nz (or d_nnz) and o_nz (or o_nnz). 1304 1305 Collective on MPI_Comm 1306 1307 Input Parameters: 1308 + B - the matrix 1309 . d_nz - number of nonzeros per row in DIAGONAL portion of local submatrix 1310 (same value is used for all local rows) 1311 . d_nnz - array containing the number of nonzeros in the various rows of the 1312 DIAGONAL portion of the local submatrix (possibly different for each row) 1313 or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure. 1314 The size of this array is equal to the number of local rows, i.e 'm'. 1315 For matrices that will be factored, you must leave room for (and set) 1316 the diagonal entry even if it is zero. 1317 . o_nz - number of nonzeros per row in the OFF-DIAGONAL portion of local 1318 submatrix (same value is used for all local rows). 1319 - o_nnz - array containing the number of nonzeros in the various rows of the 1320 OFF-DIAGONAL portion of the local submatrix (possibly different for 1321 each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero 1322 structure. The size of this array is equal to the number 1323 of local rows, i.e 'm'. 1324 1325 If the *_nnz parameter is given then the *_nz parameter is ignored 1326 1327 The stored row and column indices begin with zero. 1328 1329 The parallel matrix is partitioned such that the first m0 rows belong to 1330 process 0, the next m1 rows belong to process 1, the next m2 rows belong 1331 to process 2 etc.. where m0,m1,m2... are the input parameter 'm'. 1332 1333 The DIAGONAL portion of the local submatrix of a processor can be defined 1334 as the submatrix which is obtained by extraction the part corresponding to 1335 the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the 1336 first row that belongs to the processor, r2 is the last row belonging to 1337 the this processor, and c1-c2 is range of indices of the local part of a 1338 vector suitable for applying the matrix to. This is an mxn matrix. In the 1339 common case of a square matrix, the row and column ranges are the same and 1340 the DIAGONAL part is also square. The remaining portion of the local 1341 submatrix (mxN) constitute the OFF-DIAGONAL portion. 1342 1343 If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored. 1344 1345 You can call MatGetInfo() to get information on how effective the preallocation was; 1346 for example the fields mallocs,nz_allocated,nz_used,nz_unneeded; 1347 You can also run with the option -info and look for messages with the string 1348 malloc in them to see if additional memory allocation was needed. 1349 1350 Example usage: 1351 1352 Consider the following 8x8 matrix with 34 non-zero values, that is 1353 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 1354 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 1355 as follows: 1356 1357 .vb 1358 1 2 0 | 0 3 0 | 0 4 1359 Proc0 0 5 6 | 7 0 0 | 8 0 1360 9 0 10 | 11 0 0 | 12 0 1361 ------------------------------------- 1362 13 0 14 | 15 16 17 | 0 0 1363 Proc1 0 18 0 | 19 20 21 | 0 0 1364 0 0 0 | 22 23 0 | 24 0 1365 ------------------------------------- 1366 Proc2 25 26 27 | 0 0 28 | 29 0 1367 30 0 0 | 31 32 33 | 0 34 1368 .ve 1369 1370 This can be represented as a collection of submatrices as: 1371 1372 .vb 1373 A B C 1374 D E F 1375 G H I 1376 .ve 1377 1378 Where the submatrices A,B,C are owned by proc0, D,E,F are 1379 owned by proc1, G,H,I are owned by proc2. 1380 1381 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 1382 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 1383 The 'M','N' parameters are 8,8, and have the same values on all procs. 1384 1385 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 1386 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 1387 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 1388 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 1389 part as SeqSELL matrices. for eg: proc1 will store [E] as a SeqSELL 1390 matrix, ans [DF] as another SeqSELL matrix. 1391 1392 When d_nz, o_nz parameters are specified, d_nz storage elements are 1393 allocated for every row of the local diagonal submatrix, and o_nz 1394 storage locations are allocated for every row of the OFF-DIAGONAL submat. 1395 One way to choose d_nz and o_nz is to use the max nonzerors per local 1396 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 1397 In this case, the values of d_nz,o_nz are: 1398 .vb 1399 proc0 : dnz = 2, o_nz = 2 1400 proc1 : dnz = 3, o_nz = 2 1401 proc2 : dnz = 1, o_nz = 4 1402 .ve 1403 We are allocating m*(d_nz+o_nz) storage locations for every proc. This 1404 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 1405 for proc3. i.e we are using 12+15+10=37 storage locations to store 1406 34 values. 1407 1408 When d_nnz, o_nnz parameters are specified, the storage is specified 1409 for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 1410 In the above case the values for d_nnz,o_nnz are: 1411 .vb 1412 proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2] 1413 proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1] 1414 proc2: d_nnz = [1,1] and o_nnz = [4,4] 1415 .ve 1416 Here the space allocated is according to nz (or maximum values in the nnz 1417 if nnz is provided) for DIAGONAL and OFF-DIAGONAL submatrices, i.e (2+2+3+2)*3+(1+4)*2=37 1418 1419 Level: intermediate 1420 1421 .keywords: matrix, sell, sparse, parallel 1422 1423 .seealso: MatCreate(), MatCreateSeqSELL(), MatSetValues(), MatCreatesell(), 1424 MATMPISELL, MatGetInfo(), PetscSplitOwnership() 1425 @*/ 1426 PetscErrorCode MatMPISELLSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) 1427 { 1428 PetscErrorCode ierr; 1429 1430 PetscFunctionBegin; 1431 PetscValidHeaderSpecific(B,MAT_CLASSID,1); 1432 PetscValidType(B,1); 1433 ierr = PetscTryMethod(B,"MatMPISELLSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));CHKERRQ(ierr); 1434 PetscFunctionReturn(0); 1435 } 1436 1437 /*@C 1438 MatCreateSELL - Creates a sparse parallel matrix in SELL format. 1439 1440 Collective on MPI_Comm 1441 1442 Input Parameters: 1443 + comm - MPI communicator 1444 . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) 1445 This value should be the same as the local size used in creating the 1446 y vector for the matrix-vector product y = Ax. 1447 . n - This value should be the same as the local size used in creating the 1448 x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have 1449 calculated if N is given) For square matrices n is almost always m. 1450 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 1451 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 1452 . d_rlenmax - max number of nonzeros per row in DIAGONAL portion of local submatrix 1453 (same value is used for all local rows) 1454 . d_rlen - array containing the number of nonzeros in the various rows of the 1455 DIAGONAL portion of the local submatrix (possibly different for each row) 1456 or NULL, if d_rlenmax is used to specify the nonzero structure. 1457 The size of this array is equal to the number of local rows, i.e 'm'. 1458 . o_rlenmax - max number of nonzeros per row in the OFF-DIAGONAL portion of local 1459 submatrix (same value is used for all local rows). 1460 - o_rlen - array containing the number of nonzeros in the various rows of the 1461 OFF-DIAGONAL portion of the local submatrix (possibly different for 1462 each row) or NULL, if o_rlenmax is used to specify the nonzero 1463 structure. The size of this array is equal to the number 1464 of local rows, i.e 'm'. 1465 1466 Output Parameter: 1467 . A - the matrix 1468 1469 It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 1470 MatXXXXSetPreallocation() paradgm instead of this routine directly. 1471 [MatXXXXSetPreallocation() is, for example, MatSeqSELLSetPreallocation] 1472 1473 Notes: 1474 If the *_rlen parameter is given then the *_rlenmax parameter is ignored 1475 1476 m,n,M,N parameters specify the size of the matrix, and its partitioning across 1477 processors, while d_rlenmax,d_rlen,o_rlenmax,o_rlen parameters specify the approximate 1478 storage requirements for this matrix. 1479 1480 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one 1481 processor than it must be used on all processors that share the object for 1482 that argument. 1483 1484 The user MUST specify either the local or global matrix dimensions 1485 (possibly both). 1486 1487 The parallel matrix is partitioned across processors such that the 1488 first m0 rows belong to process 0, the next m1 rows belong to 1489 process 1, the next m2 rows belong to process 2 etc.. where 1490 m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores 1491 values corresponding to [m x N] submatrix. 1492 1493 The columns are logically partitioned with the n0 columns belonging 1494 to 0th partition, the next n1 columns belonging to the next 1495 partition etc.. where n0,n1,n2... are the input parameter 'n'. 1496 1497 The DIAGONAL portion of the local submatrix on any given processor 1498 is the submatrix corresponding to the rows and columns m,n 1499 corresponding to the given processor. i.e diagonal matrix on 1500 process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1] 1501 etc. The remaining portion of the local submatrix [m x (N-n)] 1502 constitute the OFF-DIAGONAL portion. The example below better 1503 illustrates this concept. 1504 1505 For a square global matrix we define each processor's diagonal portion 1506 to be its local rows and the corresponding columns (a square submatrix); 1507 each processor's off-diagonal portion encompasses the remainder of the 1508 local matrix (a rectangular submatrix). 1509 1510 If o_rlen, d_rlen are specified, then o_rlenmax, and d_rlenmax are ignored. 1511 1512 When calling this routine with a single process communicator, a matrix of 1513 type SEQSELL is returned. If a matrix of type MATMPISELL is desired for this 1514 type of communicator, use the construction mechanism: 1515 MatCreate(...,&A); MatSetType(A,MATMPISELL); MatSetSizes(A, m,n,M,N); MatMPISELLSetPreallocation(A,...); 1516 1517 Options Database Keys: 1518 - -mat_sell_oneindex - Internally use indexing starting at 1 1519 rather than 0. Note that when calling MatSetValues(), 1520 the user still MUST index entries starting at 0! 1521 1522 1523 Example usage: 1524 1525 Consider the following 8x8 matrix with 34 non-zero values, that is 1526 assembled across 3 processors. Lets assume that proc0 owns 3 rows, 1527 proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown 1528 as follows: 1529 1530 .vb 1531 1 2 0 | 0 3 0 | 0 4 1532 Proc0 0 5 6 | 7 0 0 | 8 0 1533 9 0 10 | 11 0 0 | 12 0 1534 ------------------------------------- 1535 13 0 14 | 15 16 17 | 0 0 1536 Proc1 0 18 0 | 19 20 21 | 0 0 1537 0 0 0 | 22 23 0 | 24 0 1538 ------------------------------------- 1539 Proc2 25 26 27 | 0 0 28 | 29 0 1540 30 0 0 | 31 32 33 | 0 34 1541 .ve 1542 1543 This can be represented as a collection of submatrices as: 1544 1545 .vb 1546 A B C 1547 D E F 1548 G H I 1549 .ve 1550 1551 Where the submatrices A,B,C are owned by proc0, D,E,F are 1552 owned by proc1, G,H,I are owned by proc2. 1553 1554 The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 1555 The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively. 1556 The 'M','N' parameters are 8,8, and have the same values on all procs. 1557 1558 The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are 1559 submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices 1560 corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively. 1561 Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL 1562 part as SeqSELL matrices. for eg: proc1 will store [E] as a SeqSELL 1563 matrix, ans [DF] as another SeqSELL matrix. 1564 1565 When d_rlenmax, o_rlenmax parameters are specified, d_rlenmax storage elements are 1566 allocated for every row of the local diagonal submatrix, and o_rlenmax 1567 storage locations are allocated for every row of the OFF-DIAGONAL submat. 1568 One way to choose d_rlenmax and o_rlenmax is to use the max nonzerors per local 1569 rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices. 1570 In this case, the values of d_rlenmax,o_rlenmax are: 1571 .vb 1572 proc0 : d_rlenmax = 2, o_rlenmax = 2 1573 proc1 : d_rlenmax = 3, o_rlenmax = 2 1574 proc2 : d_rlenmax = 1, o_rlenmax = 4 1575 .ve 1576 We are allocating m*(d_rlenmax+o_rlenmax) storage locations for every proc. This 1577 translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10 1578 for proc3. i.e we are using 12+15+10=37 storage locations to store 1579 34 values. 1580 1581 When d_rlen, o_rlen parameters are specified, the storage is specified 1582 for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices. 1583 In the above case the values for d_nnz,o_nnz are: 1584 .vb 1585 proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2] 1586 proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1] 1587 proc2: d_nnz = [1,1] and o_nnz = [4,4] 1588 .ve 1589 Here the space allocated is still 37 though there are 34 nonzeros because 1590 the allocation is always done according to rlenmax. 1591 1592 Level: intermediate 1593 1594 .keywords: matrix, sell, sparse, parallel 1595 1596 .seealso: MatCreate(), MatCreateSeqSELL(), MatSetValues(), MatMPISELLSetPreallocation(), MatMPISELLSetPreallocationSELL(), 1597 MATMPISELL, MatCreateMPISELLWithArrays() 1598 @*/ 1599 PetscErrorCode MatCreateSELL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_rlenmax,const PetscInt d_rlen[],PetscInt o_rlenmax,const PetscInt o_rlen[],Mat *A) 1600 { 1601 PetscErrorCode ierr; 1602 PetscMPIInt size; 1603 1604 PetscFunctionBegin; 1605 ierr = MatCreate(comm,A);CHKERRQ(ierr); 1606 ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); 1607 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1608 if (size > 1) { 1609 ierr = MatSetType(*A,MATMPISELL);CHKERRQ(ierr); 1610 ierr = MatMPISELLSetPreallocation(*A,d_rlenmax,d_rlen,o_rlenmax,o_rlen);CHKERRQ(ierr); 1611 } else { 1612 ierr = MatSetType(*A,MATSEQSELL);CHKERRQ(ierr); 1613 ierr = MatSeqSELLSetPreallocation(*A,d_rlenmax,d_rlen);CHKERRQ(ierr); 1614 } 1615 PetscFunctionReturn(0); 1616 } 1617 1618 PetscErrorCode MatMPISELLGetSeqSELL(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[]) 1619 { 1620 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 1621 PetscBool flg; 1622 PetscErrorCode ierr; 1623 1624 PetscFunctionBegin; 1625 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPISELL,&flg);CHKERRQ(ierr); 1626 if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPISELL matrix as input"); 1627 if (Ad) *Ad = a->A; 1628 if (Ao) *Ao = a->B; 1629 if (colmap) *colmap = a->garray; 1630 PetscFunctionReturn(0); 1631 } 1632 1633 /*@C 1634 MatMPISELLGetLocalMatCondensed - Creates a SeqSELL matrix from an MATMPISELL matrix by taking all its local rows and NON-ZERO columns 1635 1636 Not Collective 1637 1638 Input Parameters: 1639 + A - the matrix 1640 . scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX 1641 - row, col - index sets of rows and columns to extract (or NULL) 1642 1643 Output Parameter: 1644 . A_loc - the local sequential matrix generated 1645 1646 Level: developer 1647 1648 .seealso: MatGetOwnershipRange(), MatMPISELLGetLocalMat() 1649 1650 @*/ 1651 PetscErrorCode MatMPISELLGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc) 1652 { 1653 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 1654 PetscErrorCode ierr; 1655 PetscInt i,start,end,ncols,nzA,nzB,*cmap,imark,*idx; 1656 IS isrowa,iscola; 1657 Mat *aloc; 1658 PetscBool match; 1659 1660 PetscFunctionBegin; 1661 ierr = PetscObjectTypeCompare((PetscObject)A,MATMPISELL,&match);CHKERRQ(ierr); 1662 if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPISELL matrix as input"); 1663 ierr = PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr); 1664 if (!row) { 1665 start = A->rmap->rstart; end = A->rmap->rend; 1666 ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);CHKERRQ(ierr); 1667 } else { 1668 isrowa = *row; 1669 } 1670 if (!col) { 1671 start = A->cmap->rstart; 1672 cmap = a->garray; 1673 nzA = a->A->cmap->n; 1674 nzB = a->B->cmap->n; 1675 ierr = PetscMalloc1(nzA+nzB, &idx);CHKERRQ(ierr); 1676 ncols = 0; 1677 for (i=0; i<nzB; i++) { 1678 if (cmap[i] < start) idx[ncols++] = cmap[i]; 1679 else break; 1680 } 1681 imark = i; 1682 for (i=0; i<nzA; i++) idx[ncols++] = start + i; 1683 for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; 1684 ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);CHKERRQ(ierr); 1685 } else { 1686 iscola = *col; 1687 } 1688 if (scall != MAT_INITIAL_MATRIX) { 1689 ierr = PetscMalloc1(1,&aloc);CHKERRQ(ierr); 1690 aloc[0] = *A_loc; 1691 } 1692 ierr = MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);CHKERRQ(ierr); 1693 *A_loc = aloc[0]; 1694 ierr = PetscFree(aloc);CHKERRQ(ierr); 1695 if (!row) { 1696 ierr = ISDestroy(&isrowa);CHKERRQ(ierr); 1697 } 1698 if (!col) { 1699 ierr = ISDestroy(&iscola);CHKERRQ(ierr); 1700 } 1701 ierr = PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr); 1702 PetscFunctionReturn(0); 1703 } 1704 1705 #include <../src/mat/impls/aij/mpi/mpiaij.h> 1706 1707 PetscErrorCode MatConvert_MPISELL_MPIAIJ(Mat A,MatType newtype,MatReuse reuse,Mat *newmat) 1708 { 1709 PetscErrorCode ierr; 1710 Mat_MPISELL *a=(Mat_MPISELL*)A->data; 1711 Mat B; 1712 Mat_MPIAIJ *b; 1713 1714 PetscFunctionBegin; 1715 if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix must be assembled"); 1716 1717 if (reuse == MAT_REUSE_MATRIX) { 1718 B = *newmat; 1719 } else { 1720 ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); 1721 ierr = MatSetType(B,MATMPIAIJ);CHKERRQ(ierr); 1722 ierr = MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);CHKERRQ(ierr); 1723 ierr = MatSetBlockSizes(B,A->rmap->bs,A->cmap->bs);CHKERRQ(ierr); 1724 ierr = MatSeqAIJSetPreallocation(B,0,NULL);CHKERRQ(ierr); 1725 ierr = MatMPIAIJSetPreallocation(B,0,NULL,0,NULL);CHKERRQ(ierr); 1726 } 1727 b = (Mat_MPIAIJ*) B->data; 1728 1729 if (reuse == MAT_REUSE_MATRIX) { 1730 ierr = MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_REUSE_MATRIX, &b->A);CHKERRQ(ierr); 1731 ierr = MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_REUSE_MATRIX, &b->B);CHKERRQ(ierr); 1732 } else { 1733 ierr = MatDestroy(&b->A);CHKERRQ(ierr); 1734 ierr = MatDestroy(&b->B);CHKERRQ(ierr); 1735 ierr = MatDisAssemble_MPISELL(A);CHKERRQ(ierr); 1736 ierr = MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A);CHKERRQ(ierr); 1737 ierr = MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B);CHKERRQ(ierr); 1738 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1739 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1740 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1741 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1742 } 1743 1744 if (reuse == MAT_INPLACE_MATRIX) { 1745 ierr = MatHeaderReplace(A,&B);CHKERRQ(ierr); 1746 } else { 1747 *newmat = B; 1748 } 1749 PetscFunctionReturn(0); 1750 } 1751 1752 PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat A,MatType newtype,MatReuse reuse,Mat *newmat) 1753 { 1754 PetscErrorCode ierr; 1755 Mat_MPIAIJ *a=(Mat_MPIAIJ*)A->data; 1756 Mat B; 1757 Mat_MPISELL *b; 1758 1759 PetscFunctionBegin; 1760 if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix must be assembled"); 1761 1762 if (reuse == MAT_REUSE_MATRIX) { 1763 B = *newmat; 1764 } else { 1765 ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); 1766 ierr = MatSetType(B,MATMPISELL);CHKERRQ(ierr); 1767 ierr = MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);CHKERRQ(ierr); 1768 ierr = MatSetBlockSizes(B,A->rmap->bs,A->cmap->bs);CHKERRQ(ierr); 1769 ierr = MatSeqAIJSetPreallocation(B,0,NULL);CHKERRQ(ierr); 1770 ierr = MatMPIAIJSetPreallocation(B,0,NULL,0,NULL);CHKERRQ(ierr); 1771 } 1772 b = (Mat_MPISELL*) B->data; 1773 1774 if (reuse == MAT_REUSE_MATRIX) { 1775 ierr = MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_REUSE_MATRIX, &b->A);CHKERRQ(ierr); 1776 ierr = MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_REUSE_MATRIX, &b->B);CHKERRQ(ierr); 1777 } else { 1778 ierr = MatDestroy(&b->A);CHKERRQ(ierr); 1779 ierr = MatDestroy(&b->B);CHKERRQ(ierr); 1780 ierr = MatDisAssemble_MPIAIJ(A);CHKERRQ(ierr); 1781 ierr = MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_INITIAL_MATRIX, &b->A);CHKERRQ(ierr); 1782 ierr = MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_INITIAL_MATRIX, &b->B);CHKERRQ(ierr); 1783 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1784 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1785 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1786 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1787 } 1788 1789 if (reuse == MAT_INPLACE_MATRIX) { 1790 ierr = MatHeaderReplace(A,&B);CHKERRQ(ierr); 1791 } else { 1792 *newmat = B; 1793 } 1794 PetscFunctionReturn(0); 1795 } 1796 1797 PetscErrorCode MatSOR_MPISELL(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx) 1798 { 1799 Mat_MPISELL *mat=(Mat_MPISELL*)matin->data; 1800 PetscErrorCode ierr; 1801 Vec bb1=0; 1802 1803 PetscFunctionBegin; 1804 if (flag == SOR_APPLY_UPPER) { 1805 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1806 PetscFunctionReturn(0); 1807 } 1808 1809 if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) { 1810 ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr); 1811 } 1812 1813 if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) { 1814 if (flag & SOR_ZERO_INITIAL_GUESS) { 1815 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1816 its--; 1817 } 1818 1819 while (its--) { 1820 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1821 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1822 1823 /* update rhs: bb1 = bb - B*x */ 1824 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1825 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1826 1827 /* local sweep */ 1828 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1829 } 1830 } else if (flag & SOR_LOCAL_FORWARD_SWEEP) { 1831 if (flag & SOR_ZERO_INITIAL_GUESS) { 1832 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1833 its--; 1834 } 1835 while (its--) { 1836 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1837 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1838 1839 /* update rhs: bb1 = bb - B*x */ 1840 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1841 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1842 1843 /* local sweep */ 1844 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1845 } 1846 } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) { 1847 if (flag & SOR_ZERO_INITIAL_GUESS) { 1848 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 1849 its--; 1850 } 1851 while (its--) { 1852 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1853 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1854 1855 /* update rhs: bb1 = bb - B*x */ 1856 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 1857 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 1858 1859 /* local sweep */ 1860 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 1861 } 1862 } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported"); 1863 1864 ierr = VecDestroy(&bb1);CHKERRQ(ierr); 1865 1866 matin->factorerrortype = mat->A->factorerrortype; 1867 PetscFunctionReturn(0); 1868 } 1869 1870 /*MC 1871 MATMPISELL - MATMPISELL = "MPISELL" - A matrix type to be used for parallel sparse matrices. 1872 1873 Options Database Keys: 1874 . -mat_type MPISELL - sets the matrix type to "MPISELL" during a call to MatSetFromOptions() 1875 1876 Level: beginner 1877 1878 .seealso: MatCreateSELL() 1879 M*/ 1880 PETSC_EXTERN PetscErrorCode MatCreate_MPISELL(Mat B) 1881 { 1882 Mat_MPISELL *b; 1883 PetscErrorCode ierr; 1884 PetscMPIInt size; 1885 1886 PetscFunctionBegin; 1887 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);CHKERRQ(ierr); 1888 ierr = PetscNewLog(B,&b);CHKERRQ(ierr); 1889 B->data = (void*)b; 1890 ierr = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr); 1891 B->assembled = PETSC_FALSE; 1892 B->insertmode = NOT_SET_VALUES; 1893 b->size = size; 1894 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);CHKERRQ(ierr); 1895 /* build cache for off array entries formed */ 1896 ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);CHKERRQ(ierr); 1897 1898 b->donotstash = PETSC_FALSE; 1899 b->colmap = 0; 1900 b->garray = 0; 1901 b->roworiented = PETSC_TRUE; 1902 1903 /* stuff used for matrix vector multiply */ 1904 b->lvec = NULL; 1905 b->Mvctx = NULL; 1906 1907 /* stuff for MatGetRow() */ 1908 b->rowindices = 0; 1909 b->rowvalues = 0; 1910 b->getrowactive = PETSC_FALSE; 1911 1912 ierr = PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPISELL);CHKERRQ(ierr); 1913 ierr = PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPISELL);CHKERRQ(ierr); 1914 ierr = PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPISELL);CHKERRQ(ierr); 1915 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPISELLSetPreallocation_C",MatMPISELLSetPreallocation_MPISELL);CHKERRQ(ierr); 1916 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpisell_mpiaij_C",MatConvert_MPISELL_MPIAIJ);CHKERRQ(ierr); 1917 ierr = PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPISELL);CHKERRQ(ierr); 1918 ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPISELL);CHKERRQ(ierr); 1919 PetscFunctionReturn(0); 1920 } 1921