1 #define PETSCMAT_DLL 2 3 #include "../src/mat/impls/baij/mpi/mpibaij.h" /*I "petscmat.h" I*/ 4 5 EXTERN PetscErrorCode MatSetUpMultiply_MPIBAIJ(Mat); 6 EXTERN PetscErrorCode DisAssemble_MPIBAIJ(Mat); 7 EXTERN PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat,PetscInt,IS[],PetscInt); 8 EXTERN PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat *[]); 9 EXTERN PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],PetscScalar []); 10 EXTERN PetscErrorCode MatSetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],const PetscScalar [],InsertMode); 11 EXTERN PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode); 12 EXTERN PetscErrorCode MatGetRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]); 13 EXTERN PetscErrorCode MatRestoreRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]); 14 EXTERN PetscErrorCode MatZeroRows_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscScalar); 15 16 #undef __FUNCT__ 17 #define __FUNCT__ "MatGetRowMaxAbs_MPIBAIJ" 18 PetscErrorCode MatGetRowMaxAbs_MPIBAIJ(Mat A,Vec v,PetscInt idx[]) 19 { 20 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 21 PetscErrorCode ierr; 22 PetscInt i,*idxb = 0; 23 PetscScalar *va,*vb; 24 Vec vtmp; 25 26 PetscFunctionBegin; 27 ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr); 28 ierr = VecGetArray(v,&va);CHKERRQ(ierr); 29 if (idx) { 30 for (i=0; i<A->rmap->n; i++) {if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;} 31 } 32 33 ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr); 34 if (idx) {ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);} 35 ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr); 36 ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr); 37 38 for (i=0; i<A->rmap->n; i++){ 39 if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {va[i] = vb[i]; if (idx) idx[i] = A->cmap->bs*a->garray[idxb[i]/A->cmap->bs] + (idxb[i] % A->cmap->bs);} 40 } 41 42 ierr = VecRestoreArray(v,&va);CHKERRQ(ierr); 43 ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr); 44 if (idxb) {ierr = PetscFree(idxb);CHKERRQ(ierr);} 45 ierr = VecDestroy(vtmp);CHKERRQ(ierr); 46 PetscFunctionReturn(0); 47 } 48 49 EXTERN_C_BEGIN 50 #undef __FUNCT__ 51 #define __FUNCT__ "MatStoreValues_MPIBAIJ" 52 PetscErrorCode PETSCMAT_DLLEXPORT MatStoreValues_MPIBAIJ(Mat mat) 53 { 54 Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data; 55 PetscErrorCode ierr; 56 57 PetscFunctionBegin; 58 ierr = MatStoreValues(aij->A);CHKERRQ(ierr); 59 ierr = MatStoreValues(aij->B);CHKERRQ(ierr); 60 PetscFunctionReturn(0); 61 } 62 EXTERN_C_END 63 64 EXTERN_C_BEGIN 65 #undef __FUNCT__ 66 #define __FUNCT__ "MatRetrieveValues_MPIBAIJ" 67 PetscErrorCode PETSCMAT_DLLEXPORT MatRetrieveValues_MPIBAIJ(Mat mat) 68 { 69 Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data; 70 PetscErrorCode ierr; 71 72 PetscFunctionBegin; 73 ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr); 74 ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr); 75 PetscFunctionReturn(0); 76 } 77 EXTERN_C_END 78 79 /* 80 Local utility routine that creates a mapping from the global column 81 number to the local number in the off-diagonal part of the local 82 storage of the matrix. This is done in a non scable way since the 83 length of colmap equals the global matrix length. 84 */ 85 #undef __FUNCT__ 86 #define __FUNCT__ "CreateColmap_MPIBAIJ_Private" 87 PetscErrorCode CreateColmap_MPIBAIJ_Private(Mat mat) 88 { 89 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 90 Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data; 91 PetscErrorCode ierr; 92 PetscInt nbs = B->nbs,i,bs=mat->rmap->bs; 93 94 PetscFunctionBegin; 95 #if defined (PETSC_USE_CTABLE) 96 ierr = PetscTableCreate(baij->nbs,&baij->colmap);CHKERRQ(ierr); 97 for (i=0; i<nbs; i++){ 98 ierr = PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1);CHKERRQ(ierr); 99 } 100 #else 101 ierr = PetscMalloc((baij->Nbs+1)*sizeof(PetscInt),&baij->colmap);CHKERRQ(ierr); 102 ierr = PetscLogObjectMemory(mat,baij->Nbs*sizeof(PetscInt));CHKERRQ(ierr); 103 ierr = PetscMemzero(baij->colmap,baij->Nbs*sizeof(PetscInt));CHKERRQ(ierr); 104 for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1; 105 #endif 106 PetscFunctionReturn(0); 107 } 108 109 #define CHUNKSIZE 10 110 111 #define MatSetValues_SeqBAIJ_A_Private(row,col,value,addv) \ 112 { \ 113 \ 114 brow = row/bs; \ 115 rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \ 116 rmax = aimax[brow]; nrow = ailen[brow]; \ 117 bcol = col/bs; \ 118 ridx = row % bs; cidx = col % bs; \ 119 low = 0; high = nrow; \ 120 while (high-low > 3) { \ 121 t = (low+high)/2; \ 122 if (rp[t] > bcol) high = t; \ 123 else low = t; \ 124 } \ 125 for (_i=low; _i<high; _i++) { \ 126 if (rp[_i] > bcol) break; \ 127 if (rp[_i] == bcol) { \ 128 bap = ap + bs2*_i + bs*cidx + ridx; \ 129 if (addv == ADD_VALUES) *bap += value; \ 130 else *bap = value; \ 131 goto a_noinsert; \ 132 } \ 133 } \ 134 if (a->nonew == 1) goto a_noinsert; \ 135 if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \ 136 MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \ 137 N = nrow++ - 1; \ 138 /* shift up all the later entries in this row */ \ 139 for (ii=N; ii>=_i; ii--) { \ 140 rp[ii+1] = rp[ii]; \ 141 ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \ 142 } \ 143 if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); } \ 144 rp[_i] = bcol; \ 145 ap[bs2*_i + bs*cidx + ridx] = value; \ 146 a_noinsert:; \ 147 ailen[brow] = nrow; \ 148 } 149 150 #define MatSetValues_SeqBAIJ_B_Private(row,col,value,addv) \ 151 { \ 152 brow = row/bs; \ 153 rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \ 154 rmax = bimax[brow]; nrow = bilen[brow]; \ 155 bcol = col/bs; \ 156 ridx = row % bs; cidx = col % bs; \ 157 low = 0; high = nrow; \ 158 while (high-low > 3) { \ 159 t = (low+high)/2; \ 160 if (rp[t] > bcol) high = t; \ 161 else low = t; \ 162 } \ 163 for (_i=low; _i<high; _i++) { \ 164 if (rp[_i] > bcol) break; \ 165 if (rp[_i] == bcol) { \ 166 bap = ap + bs2*_i + bs*cidx + ridx; \ 167 if (addv == ADD_VALUES) *bap += value; \ 168 else *bap = value; \ 169 goto b_noinsert; \ 170 } \ 171 } \ 172 if (b->nonew == 1) goto b_noinsert; \ 173 if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \ 174 MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \ 175 CHKMEMQ;\ 176 N = nrow++ - 1; \ 177 /* shift up all the later entries in this row */ \ 178 for (ii=N; ii>=_i; ii--) { \ 179 rp[ii+1] = rp[ii]; \ 180 ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \ 181 } \ 182 if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);} \ 183 rp[_i] = bcol; \ 184 ap[bs2*_i + bs*cidx + ridx] = value; \ 185 b_noinsert:; \ 186 bilen[brow] = nrow; \ 187 } 188 189 #undef __FUNCT__ 190 #define __FUNCT__ "MatSetValues_MPIBAIJ" 191 PetscErrorCode MatSetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 192 { 193 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 194 MatScalar value; 195 PetscTruth roworiented = baij->roworiented; 196 PetscErrorCode ierr; 197 PetscInt i,j,row,col; 198 PetscInt rstart_orig=mat->rmap->rstart; 199 PetscInt rend_orig=mat->rmap->rend,cstart_orig=mat->cmap->rstart; 200 PetscInt cend_orig=mat->cmap->rend,bs=mat->rmap->bs; 201 202 /* Some Variables required in the macro */ 203 Mat A = baij->A; 204 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data; 205 PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j; 206 MatScalar *aa=a->a; 207 208 Mat B = baij->B; 209 Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data; 210 PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j; 211 MatScalar *ba=b->a; 212 213 PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol; 214 PetscInt low,high,t,ridx,cidx,bs2=a->bs2; 215 MatScalar *ap,*bap; 216 217 PetscFunctionBegin; 218 for (i=0; i<m; i++) { 219 if (im[i] < 0) continue; 220 #if defined(PETSC_USE_DEBUG) 221 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 222 #endif 223 if (im[i] >= rstart_orig && im[i] < rend_orig) { 224 row = im[i] - rstart_orig; 225 for (j=0; j<n; j++) { 226 if (in[j] >= cstart_orig && in[j] < cend_orig){ 227 col = in[j] - cstart_orig; 228 if (roworiented) value = v[i*n+j]; else value = v[i+j*m]; 229 MatSetValues_SeqBAIJ_A_Private(row,col,value,addv); 230 /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */ 231 } else if (in[j] < 0) continue; 232 #if defined(PETSC_USE_DEBUG) 233 else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[i],mat->cmap->N-1);} 234 #endif 235 else { 236 if (mat->was_assembled) { 237 if (!baij->colmap) { 238 ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 239 } 240 #if defined (PETSC_USE_CTABLE) 241 ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr); 242 col = col - 1; 243 #else 244 col = baij->colmap[in[j]/bs] - 1; 245 #endif 246 if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) { 247 ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 248 col = in[j]; 249 /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */ 250 B = baij->B; 251 b = (Mat_SeqBAIJ*)(B)->data; 252 bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j; 253 ba=b->a; 254 } else col += in[j]%bs; 255 } else col = in[j]; 256 if (roworiented) value = v[i*n+j]; else value = v[i+j*m]; 257 MatSetValues_SeqBAIJ_B_Private(row,col,value,addv); 258 /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */ 259 } 260 } 261 } else { 262 if (!baij->donotstash) { 263 if (roworiented) { 264 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);CHKERRQ(ierr); 265 } else { 266 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);CHKERRQ(ierr); 267 } 268 } 269 } 270 } 271 PetscFunctionReturn(0); 272 } 273 274 #undef __FUNCT__ 275 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ" 276 PetscErrorCode MatSetValuesBlocked_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 277 { 278 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 279 const PetscScalar *value; 280 MatScalar *barray=baij->barray; 281 PetscTruth roworiented = baij->roworiented; 282 PetscErrorCode ierr; 283 PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs; 284 PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval; 285 PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2; 286 287 PetscFunctionBegin; 288 if(!barray) { 289 ierr = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr); 290 baij->barray = barray; 291 } 292 293 if (roworiented) { 294 stepval = (n-1)*bs; 295 } else { 296 stepval = (m-1)*bs; 297 } 298 for (i=0; i<m; i++) { 299 if (im[i] < 0) continue; 300 #if defined(PETSC_USE_DEBUG) 301 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1); 302 #endif 303 if (im[i] >= rstart && im[i] < rend) { 304 row = im[i] - rstart; 305 for (j=0; j<n; j++) { 306 /* If NumCol = 1 then a copy is not required */ 307 if ((roworiented) && (n == 1)) { 308 barray = (MatScalar*)v + i*bs2; 309 } else if((!roworiented) && (m == 1)) { 310 barray = (MatScalar*)v + j*bs2; 311 } else { /* Here a copy is required */ 312 if (roworiented) { 313 value = v + i*(stepval+bs)*bs + j*bs; 314 } else { 315 value = v + j*(stepval+bs)*bs + i*bs; 316 } 317 for (ii=0; ii<bs; ii++,value+=stepval) { 318 for (jj=0; jj<bs; jj++) { 319 *barray++ = *value++; 320 } 321 } 322 barray -=bs2; 323 } 324 325 if (in[j] >= cstart && in[j] < cend){ 326 col = in[j] - cstart; 327 ierr = MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 328 } 329 else if (in[j] < 0) continue; 330 #if defined(PETSC_USE_DEBUG) 331 else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);} 332 #endif 333 else { 334 if (mat->was_assembled) { 335 if (!baij->colmap) { 336 ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 337 } 338 339 #if defined(PETSC_USE_DEBUG) 340 #if defined (PETSC_USE_CTABLE) 341 { PetscInt data; 342 ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr); 343 if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap"); 344 } 345 #else 346 if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap"); 347 #endif 348 #endif 349 #if defined (PETSC_USE_CTABLE) 350 ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr); 351 col = (col - 1)/bs; 352 #else 353 col = (baij->colmap[in[j]] - 1)/bs; 354 #endif 355 if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) { 356 ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 357 col = in[j]; 358 } 359 } 360 else col = in[j]; 361 ierr = MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 362 } 363 } 364 } else { 365 if (!baij->donotstash) { 366 if (roworiented) { 367 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 368 } else { 369 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 370 } 371 } 372 } 373 } 374 PetscFunctionReturn(0); 375 } 376 377 #define HASH_KEY 0.6180339887 378 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp))) 379 /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */ 380 /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */ 381 #undef __FUNCT__ 382 #define __FUNCT__ "MatSetValues_MPIBAIJ_HT" 383 PetscErrorCode MatSetValues_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 384 { 385 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 386 PetscTruth roworiented = baij->roworiented; 387 PetscErrorCode ierr; 388 PetscInt i,j,row,col; 389 PetscInt rstart_orig=mat->rmap->rstart; 390 PetscInt rend_orig=mat->rmap->rend,Nbs=baij->Nbs; 391 PetscInt h1,key,size=baij->ht_size,bs=mat->rmap->bs,*HT=baij->ht,idx; 392 PetscReal tmp; 393 MatScalar **HD = baij->hd,value; 394 #if defined(PETSC_USE_DEBUG) 395 PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct; 396 #endif 397 398 PetscFunctionBegin; 399 400 for (i=0; i<m; i++) { 401 #if defined(PETSC_USE_DEBUG) 402 if (im[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); 403 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 404 #endif 405 row = im[i]; 406 if (row >= rstart_orig && row < rend_orig) { 407 for (j=0; j<n; j++) { 408 col = in[j]; 409 if (roworiented) value = v[i*n+j]; else value = v[i+j*m]; 410 /* Look up PetscInto the Hash Table */ 411 key = (row/bs)*Nbs+(col/bs)+1; 412 h1 = HASH(size,key,tmp); 413 414 415 idx = h1; 416 #if defined(PETSC_USE_DEBUG) 417 insert_ct++; 418 total_ct++; 419 if (HT[idx] != key) { 420 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++); 421 if (idx == size) { 422 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++); 423 if (idx == h1) { 424 SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 425 } 426 } 427 } 428 #else 429 if (HT[idx] != key) { 430 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++); 431 if (idx == size) { 432 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++); 433 if (idx == h1) { 434 SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 435 } 436 } 437 } 438 #endif 439 /* A HASH table entry is found, so insert the values at the correct address */ 440 if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value; 441 else *(HD[idx]+ (col % bs)*bs + (row % bs)) = value; 442 } 443 } else { 444 if (!baij->donotstash) { 445 if (roworiented) { 446 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);CHKERRQ(ierr); 447 } else { 448 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);CHKERRQ(ierr); 449 } 450 } 451 } 452 } 453 #if defined(PETSC_USE_DEBUG) 454 baij->ht_total_ct = total_ct; 455 baij->ht_insert_ct = insert_ct; 456 #endif 457 PetscFunctionReturn(0); 458 } 459 460 #undef __FUNCT__ 461 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ_HT" 462 PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 463 { 464 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 465 PetscTruth roworiented = baij->roworiented; 466 PetscErrorCode ierr; 467 PetscInt i,j,ii,jj,row,col; 468 PetscInt rstart=baij->rstartbs; 469 PetscInt rend=mat->rmap->rend,stepval,bs=mat->rmap->bs,bs2=baij->bs2,nbs2=n*bs2; 470 PetscInt h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs; 471 PetscReal tmp; 472 MatScalar **HD = baij->hd,*baij_a; 473 const PetscScalar *v_t,*value; 474 #if defined(PETSC_USE_DEBUG) 475 PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct; 476 #endif 477 478 PetscFunctionBegin; 479 480 if (roworiented) { 481 stepval = (n-1)*bs; 482 } else { 483 stepval = (m-1)*bs; 484 } 485 for (i=0; i<m; i++) { 486 #if defined(PETSC_USE_DEBUG) 487 if (im[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",im[i]); 488 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],baij->Mbs-1); 489 #endif 490 row = im[i]; 491 v_t = v + i*nbs2; 492 if (row >= rstart && row < rend) { 493 for (j=0; j<n; j++) { 494 col = in[j]; 495 496 /* Look up into the Hash Table */ 497 key = row*Nbs+col+1; 498 h1 = HASH(size,key,tmp); 499 500 idx = h1; 501 #if defined(PETSC_USE_DEBUG) 502 total_ct++; 503 insert_ct++; 504 if (HT[idx] != key) { 505 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++); 506 if (idx == size) { 507 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++); 508 if (idx == h1) { 509 SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 510 } 511 } 512 } 513 #else 514 if (HT[idx] != key) { 515 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++); 516 if (idx == size) { 517 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++); 518 if (idx == h1) { 519 SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 520 } 521 } 522 } 523 #endif 524 baij_a = HD[idx]; 525 if (roworiented) { 526 /*value = v + i*(stepval+bs)*bs + j*bs;*/ 527 /* value = v + (i*(stepval+bs)+j)*bs; */ 528 value = v_t; 529 v_t += bs; 530 if (addv == ADD_VALUES) { 531 for (ii=0; ii<bs; ii++,value+=stepval) { 532 for (jj=ii; jj<bs2; jj+=bs) { 533 baij_a[jj] += *value++; 534 } 535 } 536 } else { 537 for (ii=0; ii<bs; ii++,value+=stepval) { 538 for (jj=ii; jj<bs2; jj+=bs) { 539 baij_a[jj] = *value++; 540 } 541 } 542 } 543 } else { 544 value = v + j*(stepval+bs)*bs + i*bs; 545 if (addv == ADD_VALUES) { 546 for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) { 547 for (jj=0; jj<bs; jj++) { 548 baij_a[jj] += *value++; 549 } 550 } 551 } else { 552 for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) { 553 for (jj=0; jj<bs; jj++) { 554 baij_a[jj] = *value++; 555 } 556 } 557 } 558 } 559 } 560 } else { 561 if (!baij->donotstash) { 562 if (roworiented) { 563 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 564 } else { 565 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 566 } 567 } 568 } 569 } 570 #if defined(PETSC_USE_DEBUG) 571 baij->ht_total_ct = total_ct; 572 baij->ht_insert_ct = insert_ct; 573 #endif 574 PetscFunctionReturn(0); 575 } 576 577 #undef __FUNCT__ 578 #define __FUNCT__ "MatGetValues_MPIBAIJ" 579 PetscErrorCode MatGetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[]) 580 { 581 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 582 PetscErrorCode ierr; 583 PetscInt bs=mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend; 584 PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data; 585 586 PetscFunctionBegin; 587 for (i=0; i<m; i++) { 588 if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/ 589 if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1); 590 if (idxm[i] >= bsrstart && idxm[i] < bsrend) { 591 row = idxm[i] - bsrstart; 592 for (j=0; j<n; j++) { 593 if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */ 594 if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1); 595 if (idxn[j] >= bscstart && idxn[j] < bscend){ 596 col = idxn[j] - bscstart; 597 ierr = MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 598 } else { 599 if (!baij->colmap) { 600 ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 601 } 602 #if defined (PETSC_USE_CTABLE) 603 ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr); 604 data --; 605 #else 606 data = baij->colmap[idxn[j]/bs]-1; 607 #endif 608 if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0; 609 else { 610 col = data + idxn[j]%bs; 611 ierr = MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 612 } 613 } 614 } 615 } else { 616 SETERRQ(PETSC_ERR_SUP,"Only local values currently supported"); 617 } 618 } 619 PetscFunctionReturn(0); 620 } 621 622 #undef __FUNCT__ 623 #define __FUNCT__ "MatNorm_MPIBAIJ" 624 PetscErrorCode MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm) 625 { 626 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 627 Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data; 628 PetscErrorCode ierr; 629 PetscInt i,j,bs2=baij->bs2,bs=baij->A->rmap->bs,nz,row,col; 630 PetscReal sum = 0.0; 631 MatScalar *v; 632 633 PetscFunctionBegin; 634 if (baij->size == 1) { 635 ierr = MatNorm(baij->A,type,nrm);CHKERRQ(ierr); 636 } else { 637 if (type == NORM_FROBENIUS) { 638 v = amat->a; 639 nz = amat->nz*bs2; 640 for (i=0; i<nz; i++) { 641 #if defined(PETSC_USE_COMPLEX) 642 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 643 #else 644 sum += (*v)*(*v); v++; 645 #endif 646 } 647 v = bmat->a; 648 nz = bmat->nz*bs2; 649 for (i=0; i<nz; i++) { 650 #if defined(PETSC_USE_COMPLEX) 651 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 652 #else 653 sum += (*v)*(*v); v++; 654 #endif 655 } 656 ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr); 657 *nrm = sqrt(*nrm); 658 } else if (type == NORM_1) { /* max column sum */ 659 PetscReal *tmp,*tmp2; 660 PetscInt *jj,*garray=baij->garray,cstart=baij->rstartbs; 661 ierr = PetscMalloc((2*mat->cmap->N+1)*sizeof(PetscReal),&tmp);CHKERRQ(ierr); 662 tmp2 = tmp + mat->cmap->N; 663 ierr = PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));CHKERRQ(ierr); 664 v = amat->a; jj = amat->j; 665 for (i=0; i<amat->nz; i++) { 666 for (j=0; j<bs; j++){ 667 col = bs*(cstart + *jj) + j; /* column index */ 668 for (row=0; row<bs; row++){ 669 tmp[col] += PetscAbsScalar(*v); v++; 670 } 671 } 672 jj++; 673 } 674 v = bmat->a; jj = bmat->j; 675 for (i=0; i<bmat->nz; i++) { 676 for (j=0; j<bs; j++){ 677 col = bs*garray[*jj] + j; 678 for (row=0; row<bs; row++){ 679 tmp[col] += PetscAbsScalar(*v); v++; 680 } 681 } 682 jj++; 683 } 684 ierr = MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr); 685 *nrm = 0.0; 686 for (j=0; j<mat->cmap->N; j++) { 687 if (tmp2[j] > *nrm) *nrm = tmp2[j]; 688 } 689 ierr = PetscFree(tmp);CHKERRQ(ierr); 690 } else if (type == NORM_INFINITY) { /* max row sum */ 691 PetscReal *sums; 692 ierr = PetscMalloc(bs*sizeof(PetscReal),&sums);CHKERRQ(ierr) 693 sum = 0.0; 694 for (j=0; j<amat->mbs; j++) { 695 for (row=0; row<bs; row++) sums[row] = 0.0; 696 v = amat->a + bs2*amat->i[j]; 697 nz = amat->i[j+1]-amat->i[j]; 698 for (i=0; i<nz; i++) { 699 for (col=0; col<bs; col++){ 700 for (row=0; row<bs; row++){ 701 sums[row] += PetscAbsScalar(*v); v++; 702 } 703 } 704 } 705 v = bmat->a + bs2*bmat->i[j]; 706 nz = bmat->i[j+1]-bmat->i[j]; 707 for (i=0; i<nz; i++) { 708 for (col=0; col<bs; col++){ 709 for (row=0; row<bs; row++){ 710 sums[row] += PetscAbsScalar(*v); v++; 711 } 712 } 713 } 714 for (row=0; row<bs; row++){ 715 if (sums[row] > sum) sum = sums[row]; 716 } 717 } 718 ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_MAX,((PetscObject)mat)->comm);CHKERRQ(ierr); 719 ierr = PetscFree(sums);CHKERRQ(ierr); 720 } else { 721 SETERRQ(PETSC_ERR_SUP,"No support for this norm yet"); 722 } 723 } 724 PetscFunctionReturn(0); 725 } 726 727 /* 728 Creates the hash table, and sets the table 729 This table is created only once. 730 If new entried need to be added to the matrix 731 then the hash table has to be destroyed and 732 recreated. 733 */ 734 #undef __FUNCT__ 735 #define __FUNCT__ "MatCreateHashTable_MPIBAIJ_Private" 736 PetscErrorCode MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor) 737 { 738 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 739 Mat A = baij->A,B=baij->B; 740 Mat_SeqBAIJ *a=(Mat_SeqBAIJ *)A->data,*b=(Mat_SeqBAIJ *)B->data; 741 PetscInt i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 742 PetscErrorCode ierr; 743 PetscInt size,bs2=baij->bs2,rstart=baij->rstartbs; 744 PetscInt cstart=baij->cstartbs,*garray=baij->garray,row,col,Nbs=baij->Nbs; 745 PetscInt *HT,key; 746 MatScalar **HD; 747 PetscReal tmp; 748 #if defined(PETSC_USE_INFO) 749 PetscInt ct=0,max=0; 750 #endif 751 752 PetscFunctionBegin; 753 baij->ht_size=(PetscInt)(factor*nz); 754 size = baij->ht_size; 755 756 if (baij->ht) { 757 PetscFunctionReturn(0); 758 } 759 760 /* Allocate Memory for Hash Table */ 761 ierr = PetscMalloc((size)*(sizeof(PetscInt)+sizeof(MatScalar*))+1,&baij->hd);CHKERRQ(ierr); 762 baij->ht = (PetscInt*)(baij->hd + size); 763 HD = baij->hd; 764 HT = baij->ht; 765 766 767 ierr = PetscMemzero(HD,size*(sizeof(PetscInt)+sizeof(PetscScalar*)));CHKERRQ(ierr); 768 769 770 /* Loop Over A */ 771 for (i=0; i<a->mbs; i++) { 772 for (j=ai[i]; j<ai[i+1]; j++) { 773 row = i+rstart; 774 col = aj[j]+cstart; 775 776 key = row*Nbs + col + 1; 777 h1 = HASH(size,key,tmp); 778 for (k=0; k<size; k++){ 779 if (!HT[(h1+k)%size]) { 780 HT[(h1+k)%size] = key; 781 HD[(h1+k)%size] = a->a + j*bs2; 782 break; 783 #if defined(PETSC_USE_INFO) 784 } else { 785 ct++; 786 #endif 787 } 788 } 789 #if defined(PETSC_USE_INFO) 790 if (k> max) max = k; 791 #endif 792 } 793 } 794 /* Loop Over B */ 795 for (i=0; i<b->mbs; i++) { 796 for (j=bi[i]; j<bi[i+1]; j++) { 797 row = i+rstart; 798 col = garray[bj[j]]; 799 key = row*Nbs + col + 1; 800 h1 = HASH(size,key,tmp); 801 for (k=0; k<size; k++){ 802 if (!HT[(h1+k)%size]) { 803 HT[(h1+k)%size] = key; 804 HD[(h1+k)%size] = b->a + j*bs2; 805 break; 806 #if defined(PETSC_USE_INFO) 807 } else { 808 ct++; 809 #endif 810 } 811 } 812 #if defined(PETSC_USE_INFO) 813 if (k> max) max = k; 814 #endif 815 } 816 } 817 818 /* Print Summary */ 819 #if defined(PETSC_USE_INFO) 820 for (i=0,j=0; i<size; i++) { 821 if (HT[i]) {j++;} 822 } 823 ierr = PetscInfo2(mat,"Average Search = %5.2f,max search = %D\n",(!j)? 0.0:((PetscReal)(ct+j))/j,max);CHKERRQ(ierr); 824 #endif 825 PetscFunctionReturn(0); 826 } 827 828 #undef __FUNCT__ 829 #define __FUNCT__ "MatAssemblyBegin_MPIBAIJ" 830 PetscErrorCode MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode) 831 { 832 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 833 PetscErrorCode ierr; 834 PetscInt nstash,reallocs; 835 InsertMode addv; 836 837 PetscFunctionBegin; 838 if (baij->donotstash) { 839 PetscFunctionReturn(0); 840 } 841 842 /* make sure all processors are either in INSERTMODE or ADDMODE */ 843 ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);CHKERRQ(ierr); 844 if (addv == (ADD_VALUES|INSERT_VALUES)) { 845 SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added"); 846 } 847 mat->insertmode = addv; /* in case this processor had no cache */ 848 849 ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr); 850 ierr = MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);CHKERRQ(ierr); 851 ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr); 852 ierr = PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 853 ierr = MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);CHKERRQ(ierr); 854 ierr = PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 855 PetscFunctionReturn(0); 856 } 857 858 #undef __FUNCT__ 859 #define __FUNCT__ "MatAssemblyEnd_MPIBAIJ" 860 PetscErrorCode MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode) 861 { 862 Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data; 863 Mat_SeqBAIJ *a=(Mat_SeqBAIJ*)baij->A->data; 864 PetscErrorCode ierr; 865 PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2; 866 PetscInt *row,*col; 867 PetscTruth r1,r2,r3,other_disassembled; 868 MatScalar *val; 869 InsertMode addv = mat->insertmode; 870 PetscMPIInt n; 871 872 /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */ 873 PetscFunctionBegin; 874 if (!baij->donotstash) { 875 while (1) { 876 ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 877 if (!flg) break; 878 879 for (i=0; i<n;) { 880 /* Now identify the consecutive vals belonging to the same row */ 881 for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; } 882 if (j < n) ncols = j-i; 883 else ncols = n-i; 884 /* Now assemble all these values with a single function call */ 885 ierr = MatSetValues_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr); 886 i = j; 887 } 888 } 889 ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr); 890 /* Now process the block-stash. Since the values are stashed column-oriented, 891 set the roworiented flag to column oriented, and after MatSetValues() 892 restore the original flags */ 893 r1 = baij->roworiented; 894 r2 = a->roworiented; 895 r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented; 896 baij->roworiented = PETSC_FALSE; 897 a->roworiented = PETSC_FALSE; 898 (((Mat_SeqBAIJ*)baij->B->data))->roworiented = PETSC_FALSE; /* b->roworiented */ 899 while (1) { 900 ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 901 if (!flg) break; 902 903 for (i=0; i<n;) { 904 /* Now identify the consecutive vals belonging to the same row */ 905 for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; } 906 if (j < n) ncols = j-i; 907 else ncols = n-i; 908 ierr = MatSetValuesBlocked_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr); 909 i = j; 910 } 911 } 912 ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr); 913 baij->roworiented = r1; 914 a->roworiented = r2; 915 ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworiented */ 916 } 917 918 ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr); 919 ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr); 920 921 /* determine if any processor has disassembled, if so we must 922 also disassemble ourselfs, in order that we may reassemble. */ 923 /* 924 if nonzero structure of submatrix B cannot change then we know that 925 no processor disassembled thus we can skip this stuff 926 */ 927 if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) { 928 ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);CHKERRQ(ierr); 929 if (mat->was_assembled && !other_disassembled) { 930 ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 931 } 932 } 933 934 if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) { 935 ierr = MatSetUpMultiply_MPIBAIJ(mat);CHKERRQ(ierr); 936 } 937 ((Mat_SeqBAIJ*)baij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */ 938 ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr); 939 ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr); 940 941 #if defined(PETSC_USE_INFO) 942 if (baij->ht && mode== MAT_FINAL_ASSEMBLY) { 943 ierr = PetscInfo1(mat,"Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);CHKERRQ(ierr); 944 baij->ht_total_ct = 0; 945 baij->ht_insert_ct = 0; 946 } 947 #endif 948 if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) { 949 ierr = MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr); 950 mat->ops->setvalues = MatSetValues_MPIBAIJ_HT; 951 mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT; 952 } 953 954 ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr); 955 baij->rowvalues = 0; 956 PetscFunctionReturn(0); 957 } 958 959 #undef __FUNCT__ 960 #define __FUNCT__ "MatView_MPIBAIJ_ASCIIorDraworSocket" 961 static PetscErrorCode MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer) 962 { 963 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 964 PetscErrorCode ierr; 965 PetscMPIInt size = baij->size,rank = baij->rank; 966 PetscInt bs = mat->rmap->bs; 967 PetscTruth iascii,isdraw; 968 PetscViewer sviewer; 969 PetscViewerFormat format; 970 971 PetscFunctionBegin; 972 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr); 973 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr); 974 if (iascii) { 975 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 976 if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 977 MatInfo info; 978 ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr); 979 ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr); 980 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n", 981 rank,mat->rmap->N,(PetscInt)info.nz_used*bs,(PetscInt)info.nz_allocated*bs, 982 mat->rmap->bs,(PetscInt)info.memory);CHKERRQ(ierr); 983 ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr); 984 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);CHKERRQ(ierr); 985 ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr); 986 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used*bs);CHKERRQ(ierr); 987 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 988 ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr); 989 ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr); 990 PetscFunctionReturn(0); 991 } else if (format == PETSC_VIEWER_ASCII_INFO) { 992 ierr = PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);CHKERRQ(ierr); 993 PetscFunctionReturn(0); 994 } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) { 995 PetscFunctionReturn(0); 996 } 997 } 998 999 if (isdraw) { 1000 PetscDraw draw; 1001 PetscTruth isnull; 1002 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 1003 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0); 1004 } 1005 1006 if (size == 1) { 1007 ierr = PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);CHKERRQ(ierr); 1008 ierr = MatView(baij->A,viewer);CHKERRQ(ierr); 1009 } else { 1010 /* assemble the entire matrix onto first processor. */ 1011 Mat A; 1012 Mat_SeqBAIJ *Aloc; 1013 PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs; 1014 MatScalar *a; 1015 1016 /* Here we are creating a temporary matrix, so will assume MPIBAIJ is acceptable */ 1017 /* Perhaps this should be the type of mat? */ 1018 ierr = MatCreate(((PetscObject)mat)->comm,&A);CHKERRQ(ierr); 1019 if (!rank) { 1020 ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr); 1021 } else { 1022 ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr); 1023 } 1024 ierr = MatSetType(A,MATMPIBAIJ);CHKERRQ(ierr); 1025 ierr = MatMPIBAIJSetPreallocation(A,mat->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr); 1026 ierr = PetscLogObjectParent(mat,A);CHKERRQ(ierr); 1027 1028 /* copy over the A part */ 1029 Aloc = (Mat_SeqBAIJ*)baij->A->data; 1030 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1031 ierr = PetscMalloc(bs*sizeof(PetscInt),&rvals);CHKERRQ(ierr); 1032 1033 for (i=0; i<mbs; i++) { 1034 rvals[0] = bs*(baij->rstartbs + i); 1035 for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; } 1036 for (j=ai[i]; j<ai[i+1]; j++) { 1037 col = (baij->cstartbs+aj[j])*bs; 1038 for (k=0; k<bs; k++) { 1039 ierr = MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr); 1040 col++; a += bs; 1041 } 1042 } 1043 } 1044 /* copy over the B part */ 1045 Aloc = (Mat_SeqBAIJ*)baij->B->data; 1046 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1047 for (i=0; i<mbs; i++) { 1048 rvals[0] = bs*(baij->rstartbs + i); 1049 for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; } 1050 for (j=ai[i]; j<ai[i+1]; j++) { 1051 col = baij->garray[aj[j]]*bs; 1052 for (k=0; k<bs; k++) { 1053 ierr = MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr); 1054 col++; a += bs; 1055 } 1056 } 1057 } 1058 ierr = PetscFree(rvals);CHKERRQ(ierr); 1059 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1060 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1061 /* 1062 Everyone has to call to draw the matrix since the graphics waits are 1063 synchronized across all processors that share the PetscDraw object 1064 */ 1065 ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr); 1066 if (!rank) { 1067 ierr = PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr); 1068 ierr = MatView(((Mat_MPIBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr); 1069 } 1070 ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr); 1071 ierr = MatDestroy(A);CHKERRQ(ierr); 1072 } 1073 PetscFunctionReturn(0); 1074 } 1075 1076 #undef __FUNCT__ 1077 #define __FUNCT__ "MatView_MPIBAIJ" 1078 PetscErrorCode MatView_MPIBAIJ(Mat mat,PetscViewer viewer) 1079 { 1080 PetscErrorCode ierr; 1081 PetscTruth iascii,isdraw,issocket,isbinary; 1082 1083 PetscFunctionBegin; 1084 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr); 1085 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr); 1086 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr); 1087 ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr); 1088 if (iascii || isdraw || issocket || isbinary) { 1089 ierr = MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr); 1090 } else { 1091 SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIBAIJ matrices",((PetscObject)viewer)->type_name); 1092 } 1093 PetscFunctionReturn(0); 1094 } 1095 1096 #undef __FUNCT__ 1097 #define __FUNCT__ "MatDestroy_MPIBAIJ" 1098 PetscErrorCode MatDestroy_MPIBAIJ(Mat mat) 1099 { 1100 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1101 PetscErrorCode ierr; 1102 1103 PetscFunctionBegin; 1104 #if defined(PETSC_USE_LOG) 1105 PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N); 1106 #endif 1107 ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr); 1108 ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr); 1109 ierr = MatDestroy(baij->A);CHKERRQ(ierr); 1110 ierr = MatDestroy(baij->B);CHKERRQ(ierr); 1111 #if defined (PETSC_USE_CTABLE) 1112 if (baij->colmap) {ierr = PetscTableDestroy(baij->colmap);CHKERRQ(ierr);} 1113 #else 1114 ierr = PetscFree(baij->colmap);CHKERRQ(ierr); 1115 #endif 1116 ierr = PetscFree(baij->garray);CHKERRQ(ierr); 1117 if (baij->lvec) {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);} 1118 if (baij->Mvctx) {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);} 1119 ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr); 1120 ierr = PetscFree(baij->barray);CHKERRQ(ierr); 1121 ierr = PetscFree(baij->hd);CHKERRQ(ierr); 1122 ierr = PetscFree(baij->rangebs);CHKERRQ(ierr); 1123 ierr = PetscFree(baij);CHKERRQ(ierr); 1124 1125 ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr); 1126 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);CHKERRQ(ierr); 1127 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);CHKERRQ(ierr); 1128 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);CHKERRQ(ierr); 1129 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocation_C","",PETSC_NULL);CHKERRQ(ierr); 1130 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocationCSR_C","",PETSC_NULL);CHKERRQ(ierr); 1131 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);CHKERRQ(ierr); 1132 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatSetHashTableFactor_C","",PETSC_NULL);CHKERRQ(ierr); 1133 PetscFunctionReturn(0); 1134 } 1135 1136 #undef __FUNCT__ 1137 #define __FUNCT__ "MatMult_MPIBAIJ" 1138 PetscErrorCode MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy) 1139 { 1140 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1141 PetscErrorCode ierr; 1142 PetscInt nt; 1143 1144 PetscFunctionBegin; 1145 ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); 1146 if (nt != A->cmap->n) { 1147 SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx"); 1148 } 1149 ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr); 1150 if (nt != A->rmap->n) { 1151 SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy"); 1152 } 1153 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1154 ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); 1155 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1156 ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); 1157 PetscFunctionReturn(0); 1158 } 1159 1160 #undef __FUNCT__ 1161 #define __FUNCT__ "MatMultAdd_MPIBAIJ" 1162 PetscErrorCode MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz) 1163 { 1164 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1165 PetscErrorCode ierr; 1166 1167 PetscFunctionBegin; 1168 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1169 ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 1170 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1171 ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); 1172 PetscFunctionReturn(0); 1173 } 1174 1175 #undef __FUNCT__ 1176 #define __FUNCT__ "MatMultTranspose_MPIBAIJ" 1177 PetscErrorCode MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy) 1178 { 1179 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1180 PetscErrorCode ierr; 1181 PetscTruth merged; 1182 1183 PetscFunctionBegin; 1184 ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr); 1185 /* do nondiagonal part */ 1186 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 1187 if (!merged) { 1188 /* send it on its way */ 1189 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1190 /* do local part */ 1191 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 1192 /* receive remote parts: note this assumes the values are not actually */ 1193 /* inserted in yy until the next line */ 1194 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1195 } else { 1196 /* do local part */ 1197 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 1198 /* send it on its way */ 1199 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1200 /* values actually were received in the Begin() but we need to call this nop */ 1201 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1202 } 1203 PetscFunctionReturn(0); 1204 } 1205 1206 #undef __FUNCT__ 1207 #define __FUNCT__ "MatMultTransposeAdd_MPIBAIJ" 1208 PetscErrorCode MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz) 1209 { 1210 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1211 PetscErrorCode ierr; 1212 1213 PetscFunctionBegin; 1214 /* do nondiagonal part */ 1215 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 1216 /* send it on its way */ 1217 ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1218 /* do local part */ 1219 ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 1220 /* receive remote parts: note this assumes the values are not actually */ 1221 /* inserted in yy until the next line, which is true for my implementation*/ 1222 /* but is not perhaps always true. */ 1223 ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1224 PetscFunctionReturn(0); 1225 } 1226 1227 /* 1228 This only works correctly for square matrices where the subblock A->A is the 1229 diagonal block 1230 */ 1231 #undef __FUNCT__ 1232 #define __FUNCT__ "MatGetDiagonal_MPIBAIJ" 1233 PetscErrorCode MatGetDiagonal_MPIBAIJ(Mat A,Vec v) 1234 { 1235 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1236 PetscErrorCode ierr; 1237 1238 PetscFunctionBegin; 1239 if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); 1240 ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr); 1241 PetscFunctionReturn(0); 1242 } 1243 1244 #undef __FUNCT__ 1245 #define __FUNCT__ "MatScale_MPIBAIJ" 1246 PetscErrorCode MatScale_MPIBAIJ(Mat A,PetscScalar aa) 1247 { 1248 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1249 PetscErrorCode ierr; 1250 1251 PetscFunctionBegin; 1252 ierr = MatScale(a->A,aa);CHKERRQ(ierr); 1253 ierr = MatScale(a->B,aa);CHKERRQ(ierr); 1254 PetscFunctionReturn(0); 1255 } 1256 1257 #undef __FUNCT__ 1258 #define __FUNCT__ "MatGetRow_MPIBAIJ" 1259 PetscErrorCode MatGetRow_MPIBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1260 { 1261 Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data; 1262 PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p; 1263 PetscErrorCode ierr; 1264 PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB; 1265 PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend; 1266 PetscInt *cmap,*idx_p,cstart = mat->cstartbs; 1267 1268 PetscFunctionBegin; 1269 if (mat->getrowactive) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active"); 1270 mat->getrowactive = PETSC_TRUE; 1271 1272 if (!mat->rowvalues && (idx || v)) { 1273 /* 1274 allocate enough space to hold information from the longest row. 1275 */ 1276 Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data; 1277 PetscInt max = 1,mbs = mat->mbs,tmp; 1278 for (i=0; i<mbs; i++) { 1279 tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; 1280 if (max < tmp) { max = tmp; } 1281 } 1282 ierr = PetscMalloc(max*bs2*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr); 1283 mat->rowindices = (PetscInt*)(mat->rowvalues + max*bs2); 1284 } 1285 1286 if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows") 1287 lrow = row - brstart; 1288 1289 pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB; 1290 if (!v) {pvA = 0; pvB = 0;} 1291 if (!idx) {pcA = 0; if (!v) pcB = 0;} 1292 ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1293 ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1294 nztot = nzA + nzB; 1295 1296 cmap = mat->garray; 1297 if (v || idx) { 1298 if (nztot) { 1299 /* Sort by increasing column numbers, assuming A and B already sorted */ 1300 PetscInt imark = -1; 1301 if (v) { 1302 *v = v_p = mat->rowvalues; 1303 for (i=0; i<nzB; i++) { 1304 if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i]; 1305 else break; 1306 } 1307 imark = i; 1308 for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i]; 1309 for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i]; 1310 } 1311 if (idx) { 1312 *idx = idx_p = mat->rowindices; 1313 if (imark > -1) { 1314 for (i=0; i<imark; i++) { 1315 idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs; 1316 } 1317 } else { 1318 for (i=0; i<nzB; i++) { 1319 if (cmap[cworkB[i]/bs] < cstart) 1320 idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ; 1321 else break; 1322 } 1323 imark = i; 1324 } 1325 for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i]; 1326 for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ; 1327 } 1328 } else { 1329 if (idx) *idx = 0; 1330 if (v) *v = 0; 1331 } 1332 } 1333 *nz = nztot; 1334 ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1335 ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1336 PetscFunctionReturn(0); 1337 } 1338 1339 #undef __FUNCT__ 1340 #define __FUNCT__ "MatRestoreRow_MPIBAIJ" 1341 PetscErrorCode MatRestoreRow_MPIBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1342 { 1343 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1344 1345 PetscFunctionBegin; 1346 if (!baij->getrowactive) { 1347 SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called"); 1348 } 1349 baij->getrowactive = PETSC_FALSE; 1350 PetscFunctionReturn(0); 1351 } 1352 1353 #undef __FUNCT__ 1354 #define __FUNCT__ "MatZeroEntries_MPIBAIJ" 1355 PetscErrorCode MatZeroEntries_MPIBAIJ(Mat A) 1356 { 1357 Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data; 1358 PetscErrorCode ierr; 1359 1360 PetscFunctionBegin; 1361 ierr = MatZeroEntries(l->A);CHKERRQ(ierr); 1362 ierr = MatZeroEntries(l->B);CHKERRQ(ierr); 1363 PetscFunctionReturn(0); 1364 } 1365 1366 #undef __FUNCT__ 1367 #define __FUNCT__ "MatGetInfo_MPIBAIJ" 1368 PetscErrorCode MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info) 1369 { 1370 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data; 1371 Mat A = a->A,B = a->B; 1372 PetscErrorCode ierr; 1373 PetscReal isend[5],irecv[5]; 1374 1375 PetscFunctionBegin; 1376 info->block_size = (PetscReal)matin->rmap->bs; 1377 ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr); 1378 isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded; 1379 isend[3] = info->memory; isend[4] = info->mallocs; 1380 ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr); 1381 isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded; 1382 isend[3] += info->memory; isend[4] += info->mallocs; 1383 if (flag == MAT_LOCAL) { 1384 info->nz_used = isend[0]; 1385 info->nz_allocated = isend[1]; 1386 info->nz_unneeded = isend[2]; 1387 info->memory = isend[3]; 1388 info->mallocs = isend[4]; 1389 } else if (flag == MAT_GLOBAL_MAX) { 1390 ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)matin)->comm);CHKERRQ(ierr); 1391 info->nz_used = irecv[0]; 1392 info->nz_allocated = irecv[1]; 1393 info->nz_unneeded = irecv[2]; 1394 info->memory = irecv[3]; 1395 info->mallocs = irecv[4]; 1396 } else if (flag == MAT_GLOBAL_SUM) { 1397 ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)matin)->comm);CHKERRQ(ierr); 1398 info->nz_used = irecv[0]; 1399 info->nz_allocated = irecv[1]; 1400 info->nz_unneeded = irecv[2]; 1401 info->memory = irecv[3]; 1402 info->mallocs = irecv[4]; 1403 } else { 1404 SETERRQ1(PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag); 1405 } 1406 info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */ 1407 info->fill_ratio_needed = 0; 1408 info->factor_mallocs = 0; 1409 PetscFunctionReturn(0); 1410 } 1411 1412 #undef __FUNCT__ 1413 #define __FUNCT__ "MatSetOption_MPIBAIJ" 1414 PetscErrorCode MatSetOption_MPIBAIJ(Mat A,MatOption op,PetscTruth flg) 1415 { 1416 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1417 PetscErrorCode ierr; 1418 1419 PetscFunctionBegin; 1420 switch (op) { 1421 case MAT_NEW_NONZERO_LOCATIONS: 1422 case MAT_NEW_NONZERO_ALLOCATION_ERR: 1423 case MAT_UNUSED_NONZERO_LOCATION_ERR: 1424 case MAT_KEEP_NONZERO_PATTERN: 1425 case MAT_NEW_NONZERO_LOCATION_ERR: 1426 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1427 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1428 break; 1429 case MAT_ROW_ORIENTED: 1430 a->roworiented = flg; 1431 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1432 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1433 break; 1434 case MAT_NEW_DIAGONALS: 1435 ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr); 1436 break; 1437 case MAT_IGNORE_OFF_PROC_ENTRIES: 1438 a->donotstash = flg; 1439 break; 1440 case MAT_USE_HASH_TABLE: 1441 a->ht_flag = flg; 1442 break; 1443 case MAT_SYMMETRIC: 1444 case MAT_STRUCTURALLY_SYMMETRIC: 1445 case MAT_HERMITIAN: 1446 case MAT_SYMMETRY_ETERNAL: 1447 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1448 break; 1449 default: 1450 SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op); 1451 } 1452 PetscFunctionReturn(0); 1453 } 1454 1455 #undef __FUNCT__ 1456 #define __FUNCT__ "MatTranspose_MPIBAIJ(" 1457 PetscErrorCode MatTranspose_MPIBAIJ(Mat A,MatReuse reuse,Mat *matout) 1458 { 1459 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data; 1460 Mat_SeqBAIJ *Aloc; 1461 Mat B; 1462 PetscErrorCode ierr; 1463 PetscInt M=A->rmap->N,N=A->cmap->N,*ai,*aj,i,*rvals,j,k,col; 1464 PetscInt bs=A->rmap->bs,mbs=baij->mbs; 1465 MatScalar *a; 1466 1467 PetscFunctionBegin; 1468 if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place"); 1469 if (reuse == MAT_INITIAL_MATRIX || *matout == A) { 1470 ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr); 1471 ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr); 1472 ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr); 1473 ierr = MatMPIBAIJSetPreallocation(B,A->rmap->bs,0,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr); 1474 } else { 1475 B = *matout; 1476 } 1477 1478 /* copy over the A part */ 1479 Aloc = (Mat_SeqBAIJ*)baij->A->data; 1480 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1481 ierr = PetscMalloc(bs*sizeof(PetscInt),&rvals);CHKERRQ(ierr); 1482 1483 for (i=0; i<mbs; i++) { 1484 rvals[0] = bs*(baij->rstartbs + i); 1485 for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; } 1486 for (j=ai[i]; j<ai[i+1]; j++) { 1487 col = (baij->cstartbs+aj[j])*bs; 1488 for (k=0; k<bs; k++) { 1489 ierr = MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr); 1490 col++; a += bs; 1491 } 1492 } 1493 } 1494 /* copy over the B part */ 1495 Aloc = (Mat_SeqBAIJ*)baij->B->data; 1496 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1497 for (i=0; i<mbs; i++) { 1498 rvals[0] = bs*(baij->rstartbs + i); 1499 for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; } 1500 for (j=ai[i]; j<ai[i+1]; j++) { 1501 col = baij->garray[aj[j]]*bs; 1502 for (k=0; k<bs; k++) { 1503 ierr = MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr); 1504 col++; a += bs; 1505 } 1506 } 1507 } 1508 ierr = PetscFree(rvals);CHKERRQ(ierr); 1509 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1510 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1511 1512 if (reuse == MAT_INITIAL_MATRIX || *matout != A) { 1513 *matout = B; 1514 } else { 1515 ierr = MatHeaderCopy(A,B);CHKERRQ(ierr); 1516 } 1517 PetscFunctionReturn(0); 1518 } 1519 1520 #undef __FUNCT__ 1521 #define __FUNCT__ "MatDiagonalScale_MPIBAIJ" 1522 PetscErrorCode MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr) 1523 { 1524 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1525 Mat a = baij->A,b = baij->B; 1526 PetscErrorCode ierr; 1527 PetscInt s1,s2,s3; 1528 1529 PetscFunctionBegin; 1530 ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr); 1531 if (rr) { 1532 ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr); 1533 if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size"); 1534 /* Overlap communication with computation. */ 1535 ierr = VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1536 } 1537 if (ll) { 1538 ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr); 1539 if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size"); 1540 ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr); 1541 } 1542 /* scale the diagonal block */ 1543 ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr); 1544 1545 if (rr) { 1546 /* Do a scatter end and then right scale the off-diagonal block */ 1547 ierr = VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1548 ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr); 1549 } 1550 1551 PetscFunctionReturn(0); 1552 } 1553 1554 #undef __FUNCT__ 1555 #define __FUNCT__ "MatZeroRows_MPIBAIJ" 1556 PetscErrorCode MatZeroRows_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag) 1557 { 1558 Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data; 1559 PetscErrorCode ierr; 1560 PetscMPIInt imdex,size = l->size,n,rank = l->rank; 1561 PetscInt i,*owners = A->rmap->range; 1562 PetscInt *nprocs,j,idx,nsends,row; 1563 PetscInt nmax,*svalues,*starts,*owner,nrecvs; 1564 PetscInt *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source,lastidx = -1; 1565 PetscInt *lens,*lrows,*values,rstart_bs=A->rmap->rstart; 1566 MPI_Comm comm = ((PetscObject)A)->comm; 1567 MPI_Request *send_waits,*recv_waits; 1568 MPI_Status recv_status,*send_status; 1569 #if defined(PETSC_DEBUG) 1570 PetscTruth found = PETSC_FALSE; 1571 #endif 1572 1573 PetscFunctionBegin; 1574 /* first count number of contributors to each processor */ 1575 ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 1576 ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr); 1577 ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/ 1578 j = 0; 1579 for (i=0; i<N; i++) { 1580 if (lastidx > (idx = rows[i])) j = 0; 1581 lastidx = idx; 1582 for (; j<size; j++) { 1583 if (idx >= owners[j] && idx < owners[j+1]) { 1584 nprocs[2*j]++; 1585 nprocs[2*j+1] = 1; 1586 owner[i] = j; 1587 #if defined(PETSC_DEBUG) 1588 found = PETSC_TRUE; 1589 #endif 1590 break; 1591 } 1592 } 1593 #if defined(PETSC_DEBUG) 1594 if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range"); 1595 found = PETSC_FALSE; 1596 #endif 1597 } 1598 nsends = 0; for (i=0; i<size; i++) { nsends += nprocs[2*i+1];} 1599 1600 /* inform other processors of number of messages and max length*/ 1601 ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr); 1602 1603 /* post receives: */ 1604 ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr); 1605 ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 1606 for (i=0; i<nrecvs; i++) { 1607 ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr); 1608 } 1609 1610 /* do sends: 1611 1) starts[i] gives the starting index in svalues for stuff going to 1612 the ith processor 1613 */ 1614 ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr); 1615 ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 1616 ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 1617 starts[0] = 0; 1618 for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];} 1619 for (i=0; i<N; i++) { 1620 svalues[starts[owner[i]]++] = rows[i]; 1621 } 1622 1623 starts[0] = 0; 1624 for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];} 1625 count = 0; 1626 for (i=0; i<size; i++) { 1627 if (nprocs[2*i+1]) { 1628 ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr); 1629 } 1630 } 1631 ierr = PetscFree(starts);CHKERRQ(ierr); 1632 1633 base = owners[rank]; 1634 1635 /* wait on receives */ 1636 ierr = PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);CHKERRQ(ierr); 1637 source = lens + nrecvs; 1638 count = nrecvs; slen = 0; 1639 while (count) { 1640 ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr); 1641 /* unpack receives into our local space */ 1642 ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr); 1643 source[imdex] = recv_status.MPI_SOURCE; 1644 lens[imdex] = n; 1645 slen += n; 1646 count--; 1647 } 1648 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 1649 1650 /* move the data into the send scatter */ 1651 ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr); 1652 count = 0; 1653 for (i=0; i<nrecvs; i++) { 1654 values = rvalues + i*nmax; 1655 for (j=0; j<lens[i]; j++) { 1656 lrows[count++] = values[j] - base; 1657 } 1658 } 1659 ierr = PetscFree(rvalues);CHKERRQ(ierr); 1660 ierr = PetscFree(lens);CHKERRQ(ierr); 1661 ierr = PetscFree(owner);CHKERRQ(ierr); 1662 ierr = PetscFree(nprocs);CHKERRQ(ierr); 1663 1664 /* actually zap the local rows */ 1665 /* 1666 Zero the required rows. If the "diagonal block" of the matrix 1667 is square and the user wishes to set the diagonal we use separate 1668 code so that MatSetValues() is not called for each diagonal allocating 1669 new memory, thus calling lots of mallocs and slowing things down. 1670 1671 */ 1672 /* must zero l->B before l->A because the (diag) case below may put values into l->B*/ 1673 ierr = MatZeroRows_SeqBAIJ(l->B,slen,lrows,0.0);CHKERRQ(ierr); 1674 if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) { 1675 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,diag);CHKERRQ(ierr); 1676 } else if (diag != 0.0) { 1677 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0);CHKERRQ(ierr); 1678 if (((Mat_SeqBAIJ*)l->A->data)->nonew) { 1679 SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\ 1680 MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR"); 1681 } 1682 for (i=0; i<slen; i++) { 1683 row = lrows[i] + rstart_bs; 1684 ierr = MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);CHKERRQ(ierr); 1685 } 1686 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1687 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1688 } else { 1689 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0);CHKERRQ(ierr); 1690 } 1691 1692 ierr = PetscFree(lrows);CHKERRQ(ierr); 1693 1694 /* wait on sends */ 1695 if (nsends) { 1696 ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 1697 ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr); 1698 ierr = PetscFree(send_status);CHKERRQ(ierr); 1699 } 1700 ierr = PetscFree(send_waits);CHKERRQ(ierr); 1701 ierr = PetscFree(svalues);CHKERRQ(ierr); 1702 1703 PetscFunctionReturn(0); 1704 } 1705 1706 #undef __FUNCT__ 1707 #define __FUNCT__ "MatSetUnfactored_MPIBAIJ" 1708 PetscErrorCode MatSetUnfactored_MPIBAIJ(Mat A) 1709 { 1710 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1711 PetscErrorCode ierr; 1712 1713 PetscFunctionBegin; 1714 ierr = MatSetUnfactored(a->A);CHKERRQ(ierr); 1715 PetscFunctionReturn(0); 1716 } 1717 1718 static PetscErrorCode MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat *); 1719 1720 #undef __FUNCT__ 1721 #define __FUNCT__ "MatEqual_MPIBAIJ" 1722 PetscErrorCode MatEqual_MPIBAIJ(Mat A,Mat B,PetscTruth *flag) 1723 { 1724 Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data; 1725 Mat a,b,c,d; 1726 PetscTruth flg; 1727 PetscErrorCode ierr; 1728 1729 PetscFunctionBegin; 1730 a = matA->A; b = matA->B; 1731 c = matB->A; d = matB->B; 1732 1733 ierr = MatEqual(a,c,&flg);CHKERRQ(ierr); 1734 if (flg) { 1735 ierr = MatEqual(b,d,&flg);CHKERRQ(ierr); 1736 } 1737 ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);CHKERRQ(ierr); 1738 PetscFunctionReturn(0); 1739 } 1740 1741 #undef __FUNCT__ 1742 #define __FUNCT__ "MatCopy_MPIBAIJ" 1743 PetscErrorCode MatCopy_MPIBAIJ(Mat A,Mat B,MatStructure str) 1744 { 1745 PetscErrorCode ierr; 1746 Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data; 1747 Mat_MPIBAIJ *b = (Mat_MPIBAIJ *)B->data; 1748 1749 PetscFunctionBegin; 1750 /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ 1751 if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) { 1752 ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); 1753 } else { 1754 ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr); 1755 ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr); 1756 } 1757 PetscFunctionReturn(0); 1758 } 1759 1760 #undef __FUNCT__ 1761 #define __FUNCT__ "MatSetUpPreallocation_MPIBAIJ" 1762 PetscErrorCode MatSetUpPreallocation_MPIBAIJ(Mat A) 1763 { 1764 PetscErrorCode ierr; 1765 1766 PetscFunctionBegin; 1767 ierr = MatMPIBAIJSetPreallocation(A,-PetscMax(A->rmap->bs,1),PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr); 1768 PetscFunctionReturn(0); 1769 } 1770 1771 #include "petscblaslapack.h" 1772 #undef __FUNCT__ 1773 #define __FUNCT__ "MatAXPY_MPIBAIJ" 1774 PetscErrorCode MatAXPY_MPIBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str) 1775 { 1776 PetscErrorCode ierr; 1777 Mat_MPIBAIJ *xx=(Mat_MPIBAIJ *)X->data,*yy=(Mat_MPIBAIJ *)Y->data; 1778 PetscBLASInt bnz,one=1; 1779 Mat_SeqBAIJ *x,*y; 1780 1781 PetscFunctionBegin; 1782 if (str == SAME_NONZERO_PATTERN) { 1783 PetscScalar alpha = a; 1784 x = (Mat_SeqBAIJ *)xx->A->data; 1785 y = (Mat_SeqBAIJ *)yy->A->data; 1786 bnz = PetscBLASIntCast(x->nz); 1787 BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one); 1788 x = (Mat_SeqBAIJ *)xx->B->data; 1789 y = (Mat_SeqBAIJ *)yy->B->data; 1790 bnz = PetscBLASIntCast(x->nz); 1791 BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one); 1792 } else { 1793 ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr); 1794 } 1795 PetscFunctionReturn(0); 1796 } 1797 1798 #undef __FUNCT__ 1799 #define __FUNCT__ "MatRealPart_MPIBAIJ" 1800 PetscErrorCode MatRealPart_MPIBAIJ(Mat A) 1801 { 1802 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1803 PetscErrorCode ierr; 1804 1805 PetscFunctionBegin; 1806 ierr = MatRealPart(a->A);CHKERRQ(ierr); 1807 ierr = MatRealPart(a->B);CHKERRQ(ierr); 1808 PetscFunctionReturn(0); 1809 } 1810 1811 #undef __FUNCT__ 1812 #define __FUNCT__ "MatImaginaryPart_MPIBAIJ" 1813 PetscErrorCode MatImaginaryPart_MPIBAIJ(Mat A) 1814 { 1815 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1816 PetscErrorCode ierr; 1817 1818 PetscFunctionBegin; 1819 ierr = MatImaginaryPart(a->A);CHKERRQ(ierr); 1820 ierr = MatImaginaryPart(a->B);CHKERRQ(ierr); 1821 PetscFunctionReturn(0); 1822 } 1823 1824 #undef __FUNCT__ 1825 #define __FUNCT__ "MatGetSubMatrix_MPIBAIJ" 1826 PetscErrorCode MatGetSubMatrix_MPIBAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat) 1827 { 1828 PetscErrorCode ierr; 1829 IS iscol_local; 1830 PetscInt csize; 1831 1832 PetscFunctionBegin; 1833 ierr = ISGetLocalSize(iscol,&csize);CHKERRQ(ierr); 1834 if (call == MAT_REUSE_MATRIX) { 1835 ierr = PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);CHKERRQ(ierr); 1836 if (!iscol_local) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 1837 } else { 1838 ierr = ISAllGather(iscol,&iscol_local);CHKERRQ(ierr); 1839 } 1840 ierr = MatGetSubMatrix_MPIBAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);CHKERRQ(ierr); 1841 if (call == MAT_INITIAL_MATRIX) { 1842 ierr = PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);CHKERRQ(ierr); 1843 ierr = ISDestroy(iscol_local);CHKERRQ(ierr); 1844 } 1845 PetscFunctionReturn(0); 1846 } 1847 1848 #undef __FUNCT__ 1849 #define __FUNCT__ "MatGetSubMatrix_MPIBAIJ" 1850 /* 1851 Not great since it makes two copies of the submatrix, first an SeqBAIJ 1852 in local and then by concatenating the local matrices the end result. 1853 Writing it directly would be much like MatGetSubMatrices_MPIBAIJ() 1854 */ 1855 PetscErrorCode MatGetSubMatrix_MPIBAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat) 1856 { 1857 PetscErrorCode ierr; 1858 PetscMPIInt rank,size; 1859 PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs; 1860 PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal; 1861 Mat *local,M,Mreuse; 1862 MatScalar *vwork,*aa; 1863 MPI_Comm comm = ((PetscObject)mat)->comm; 1864 Mat_SeqBAIJ *aij; 1865 1866 1867 PetscFunctionBegin; 1868 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 1869 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 1870 1871 if (call == MAT_REUSE_MATRIX) { 1872 ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject *)&Mreuse);CHKERRQ(ierr); 1873 if (!Mreuse) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 1874 local = &Mreuse; 1875 ierr = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&local);CHKERRQ(ierr); 1876 } else { 1877 ierr = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&local);CHKERRQ(ierr); 1878 Mreuse = *local; 1879 ierr = PetscFree(local);CHKERRQ(ierr); 1880 } 1881 1882 /* 1883 m - number of local rows 1884 n - number of columns (same on all processors) 1885 rstart - first row in new global matrix generated 1886 */ 1887 ierr = MatGetBlockSize(mat,&bs);CHKERRQ(ierr); 1888 ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr); 1889 m = m/bs; 1890 n = n/bs; 1891 1892 if (call == MAT_INITIAL_MATRIX) { 1893 aij = (Mat_SeqBAIJ*)(Mreuse)->data; 1894 ii = aij->i; 1895 jj = aij->j; 1896 1897 /* 1898 Determine the number of non-zeros in the diagonal and off-diagonal 1899 portions of the matrix in order to do correct preallocation 1900 */ 1901 1902 /* first get start and end of "diagonal" columns */ 1903 if (csize == PETSC_DECIDE) { 1904 ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr); 1905 if (mglobal == n*bs) { /* square matrix */ 1906 nlocal = m; 1907 } else { 1908 nlocal = n/size + ((n % size) > rank); 1909 } 1910 } else { 1911 nlocal = csize/bs; 1912 } 1913 ierr = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 1914 rstart = rend - nlocal; 1915 if (rank == size - 1 && rend != n) { 1916 SETERRQ2(PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n); 1917 } 1918 1919 /* next, compute all the lengths */ 1920 ierr = PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);CHKERRQ(ierr); 1921 olens = dlens + m; 1922 for (i=0; i<m; i++) { 1923 jend = ii[i+1] - ii[i]; 1924 olen = 0; 1925 dlen = 0; 1926 for (j=0; j<jend; j++) { 1927 if (*jj < rstart || *jj >= rend) olen++; 1928 else dlen++; 1929 jj++; 1930 } 1931 olens[i] = olen; 1932 dlens[i] = dlen; 1933 } 1934 ierr = MatCreate(comm,&M);CHKERRQ(ierr); 1935 ierr = MatSetSizes(M,bs*m,bs*nlocal,PETSC_DECIDE,bs*n);CHKERRQ(ierr); 1936 ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr); 1937 ierr = MatMPIBAIJSetPreallocation(M,bs,0,dlens,0,olens);CHKERRQ(ierr); 1938 ierr = PetscFree(dlens);CHKERRQ(ierr); 1939 } else { 1940 PetscInt ml,nl; 1941 1942 M = *newmat; 1943 ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr); 1944 if (ml != m) SETERRQ(PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request"); 1945 ierr = MatZeroEntries(M);CHKERRQ(ierr); 1946 /* 1947 The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly, 1948 rather than the slower MatSetValues(). 1949 */ 1950 M->was_assembled = PETSC_TRUE; 1951 M->assembled = PETSC_FALSE; 1952 } 1953 ierr = MatSetOption(M,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr); 1954 ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr); 1955 aij = (Mat_SeqBAIJ*)(Mreuse)->data; 1956 ii = aij->i; 1957 jj = aij->j; 1958 aa = aij->a; 1959 for (i=0; i<m; i++) { 1960 row = rstart/bs + i; 1961 nz = ii[i+1] - ii[i]; 1962 cwork = jj; jj += nz; 1963 vwork = aa; aa += nz; 1964 ierr = MatSetValuesBlocked_MPIBAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr); 1965 } 1966 1967 ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1968 ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1969 *newmat = M; 1970 1971 /* save submatrix used in processor for next request */ 1972 if (call == MAT_INITIAL_MATRIX) { 1973 ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr); 1974 ierr = PetscObjectDereference((PetscObject)Mreuse);CHKERRQ(ierr); 1975 } 1976 1977 PetscFunctionReturn(0); 1978 } 1979 1980 #undef __FUNCT__ 1981 #define __FUNCT__ "MatPermute_MPIBAIJ" 1982 PetscErrorCode MatPermute_MPIBAIJ(Mat A,IS rowp,IS colp,Mat *B) 1983 { 1984 MPI_Comm comm,pcomm; 1985 PetscInt first,local_size,nrows; 1986 const PetscInt *rows; 1987 PetscMPIInt size; 1988 IS crowp,growp,irowp,lrowp,lcolp,icolp; 1989 PetscErrorCode ierr; 1990 1991 PetscFunctionBegin; 1992 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1993 /* make a collective version of 'rowp' */ 1994 ierr = PetscObjectGetComm((PetscObject)rowp,&pcomm);CHKERRQ(ierr); 1995 if (pcomm==comm) { 1996 crowp = rowp; 1997 } else { 1998 ierr = ISGetSize(rowp,&nrows);CHKERRQ(ierr); 1999 ierr = ISGetIndices(rowp,&rows);CHKERRQ(ierr); 2000 ierr = ISCreateGeneral(comm,nrows,rows,&crowp);CHKERRQ(ierr); 2001 ierr = ISRestoreIndices(rowp,&rows);CHKERRQ(ierr); 2002 } 2003 /* collect the global row permutation and invert it */ 2004 ierr = ISAllGather(crowp,&growp);CHKERRQ(ierr); 2005 ierr = ISSetPermutation(growp);CHKERRQ(ierr); 2006 if (pcomm!=comm) { 2007 ierr = ISDestroy(crowp);CHKERRQ(ierr); 2008 } 2009 ierr = ISInvertPermutation(growp,PETSC_DECIDE,&irowp);CHKERRQ(ierr); 2010 /* get the local target indices */ 2011 ierr = MatGetOwnershipRange(A,&first,PETSC_NULL);CHKERRQ(ierr); 2012 ierr = MatGetLocalSize(A,&local_size,PETSC_NULL);CHKERRQ(ierr); 2013 ierr = ISGetIndices(irowp,&rows);CHKERRQ(ierr); 2014 ierr = ISCreateGeneral(MPI_COMM_SELF,local_size,rows+first,&lrowp);CHKERRQ(ierr); 2015 ierr = ISRestoreIndices(irowp,&rows);CHKERRQ(ierr); 2016 ierr = ISDestroy(irowp);CHKERRQ(ierr); 2017 /* the column permutation is so much easier; 2018 make a local version of 'colp' and invert it */ 2019 ierr = PetscObjectGetComm((PetscObject)colp,&pcomm);CHKERRQ(ierr); 2020 ierr = MPI_Comm_size(pcomm,&size);CHKERRQ(ierr); 2021 if (size==1) { 2022 lcolp = colp; 2023 } else { 2024 ierr = ISGetSize(colp,&nrows);CHKERRQ(ierr); 2025 ierr = ISGetIndices(colp,&rows);CHKERRQ(ierr); 2026 ierr = ISCreateGeneral(MPI_COMM_SELF,nrows,rows,&lcolp);CHKERRQ(ierr); 2027 } 2028 ierr = ISSetPermutation(lcolp);CHKERRQ(ierr); 2029 ierr = ISInvertPermutation(lcolp,PETSC_DECIDE,&icolp);CHKERRQ(ierr); 2030 ierr = ISSetPermutation(icolp);CHKERRQ(ierr); 2031 if (size>1) { 2032 ierr = ISRestoreIndices(colp,&rows);CHKERRQ(ierr); 2033 ierr = ISDestroy(lcolp);CHKERRQ(ierr); 2034 } 2035 /* now we just get the submatrix */ 2036 ierr = MatGetSubMatrix_MPIBAIJ_Private(A,lrowp,icolp,local_size,MAT_INITIAL_MATRIX,B);CHKERRQ(ierr); 2037 /* clean up */ 2038 ierr = ISDestroy(lrowp);CHKERRQ(ierr); 2039 ierr = ISDestroy(icolp);CHKERRQ(ierr); 2040 PetscFunctionReturn(0); 2041 } 2042 2043 #undef __FUNCT__ 2044 #define __FUNCT__ "MatGetGhosts_MPIBAIJ" 2045 PetscErrorCode PETSCMAT_DLLEXPORT MatGetGhosts_MPIBAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[]) 2046 { 2047 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*) mat->data; 2048 Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data; 2049 2050 PetscFunctionBegin; 2051 if (nghosts) { *nghosts = B->nbs;} 2052 if (ghosts) {*ghosts = baij->garray;} 2053 PetscFunctionReturn(0); 2054 } 2055 2056 EXTERN PetscErrorCode CreateColmap_MPIBAIJ_Private(Mat); 2057 2058 #undef __FUNCT__ 2059 #define __FUNCT__ "MatFDColoringCreate_MPIBAIJ" 2060 /* 2061 This routine is almost identical to MatFDColoringCreate_MPIBAIJ()! 2062 */ 2063 PetscErrorCode MatFDColoringCreate_MPIBAIJ(Mat mat,ISColoring iscoloring,MatFDColoring c) 2064 { 2065 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 2066 PetscErrorCode ierr; 2067 PetscMPIInt size,*ncolsonproc,*disp,nn; 2068 PetscInt bs,i,n,nrows,j,k,m,*rows = 0,*A_ci,*A_cj,ncols,col; 2069 const PetscInt *is; 2070 PetscInt nis = iscoloring->n,nctot,*cols,*B_ci,*B_cj; 2071 PetscInt *rowhit,M,cstart,cend,colb; 2072 PetscInt *columnsforrow,l; 2073 IS *isa; 2074 PetscTruth done,flg; 2075 ISLocalToGlobalMapping map = mat->bmapping; 2076 PetscInt *ltog = (map ? map->indices : (PetscInt*) PETSC_NULL) ,ctype=c->ctype; 2077 2078 PetscFunctionBegin; 2079 if (!mat->assembled) { 2080 SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Matrix must be assembled first; MatAssemblyBegin/End();"); 2081 } 2082 if (ctype == IS_COLORING_GHOSTED && !map) SETERRQ(PETSC_ERR_ARG_INCOMP,"When using ghosted differencing matrix must have local to global mapping provided with MatSetLocalToGlobalMappingBlock"); 2083 2084 ierr = ISColoringGetIS(iscoloring,PETSC_IGNORE,&isa);CHKERRQ(ierr); 2085 2086 ierr = MatGetBlockSize(mat,&bs);CHKERRQ(ierr); 2087 M = mat->rmap->n/bs; 2088 cstart = mat->cmap->rstart/bs; 2089 cend = mat->cmap->rend/bs; 2090 c->M = mat->rmap->N/bs; /* set the global rows and columns and local rows */ 2091 c->N = mat->cmap->N/bs; 2092 c->m = mat->rmap->n/bs; 2093 c->rstart = mat->rmap->rstart/bs; 2094 2095 c->ncolors = nis; 2096 ierr = PetscMalloc(nis*sizeof(PetscInt),&c->ncolumns);CHKERRQ(ierr); 2097 ierr = PetscMalloc(nis*sizeof(PetscInt*),&c->columns);CHKERRQ(ierr); 2098 ierr = PetscMalloc(nis*sizeof(PetscInt),&c->nrows);CHKERRQ(ierr); 2099 ierr = PetscMalloc(nis*sizeof(PetscInt*),&c->rows);CHKERRQ(ierr); 2100 ierr = PetscMalloc(nis*sizeof(PetscInt*),&c->columnsforrow);CHKERRQ(ierr); 2101 ierr = PetscLogObjectMemory(c,5*nis*sizeof(PetscInt));CHKERRQ(ierr); 2102 2103 /* Allow access to data structures of local part of matrix */ 2104 if (!baij->colmap) { 2105 ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 2106 } 2107 ierr = MatGetColumnIJ(baij->A,0,PETSC_FALSE,PETSC_FALSE,&ncols,&A_ci,&A_cj,&done);CHKERRQ(ierr); 2108 ierr = MatGetColumnIJ(baij->B,0,PETSC_FALSE,PETSC_FALSE,&ncols,&B_ci,&B_cj,&done);CHKERRQ(ierr); 2109 2110 ierr = PetscMalloc((M+1)*sizeof(PetscInt),&rowhit);CHKERRQ(ierr); 2111 ierr = PetscMalloc((M+1)*sizeof(PetscInt),&columnsforrow);CHKERRQ(ierr); 2112 2113 for (i=0; i<nis; i++) { 2114 ierr = ISGetLocalSize(isa[i],&n);CHKERRQ(ierr); 2115 ierr = ISGetIndices(isa[i],&is);CHKERRQ(ierr); 2116 c->ncolumns[i] = n; 2117 if (n) { 2118 ierr = PetscMalloc(n*sizeof(PetscInt),&c->columns[i]);CHKERRQ(ierr); 2119 ierr = PetscLogObjectMemory(c,n*sizeof(PetscInt));CHKERRQ(ierr); 2120 ierr = PetscMemcpy(c->columns[i],is,n*sizeof(PetscInt));CHKERRQ(ierr); 2121 } else { 2122 c->columns[i] = 0; 2123 } 2124 2125 if (ctype == IS_COLORING_GLOBAL){ 2126 /* Determine the total (parallel) number of columns of this color */ 2127 ierr = MPI_Comm_size(((PetscObject)mat)->comm,&size);CHKERRQ(ierr); 2128 ierr = PetscMalloc(2*size*sizeof(PetscInt*),&ncolsonproc);CHKERRQ(ierr); 2129 disp = ncolsonproc + size; 2130 2131 nn = PetscMPIIntCast(n); 2132 ierr = MPI_Allgather(&nn,1,MPI_INT,ncolsonproc,1,MPI_INT,((PetscObject)mat)->comm);CHKERRQ(ierr); 2133 nctot = 0; for (j=0; j<size; j++) {nctot += ncolsonproc[j];} 2134 if (!nctot) { 2135 ierr = PetscInfo(mat,"Coloring of matrix has some unneeded colors with no corresponding rows\n");CHKERRQ(ierr); 2136 } 2137 2138 disp[0] = 0; 2139 for (j=1; j<size; j++) { 2140 disp[j] = disp[j-1] + ncolsonproc[j-1]; 2141 } 2142 2143 /* Get complete list of columns for color on each processor */ 2144 ierr = PetscMalloc((nctot+1)*sizeof(PetscInt),&cols);CHKERRQ(ierr); 2145 ierr = MPI_Allgatherv((void*)is,n,MPIU_INT,cols,ncolsonproc,disp,MPIU_INT,((PetscObject)mat)->comm);CHKERRQ(ierr); 2146 ierr = PetscFree(ncolsonproc);CHKERRQ(ierr); 2147 } else if (ctype == IS_COLORING_GHOSTED){ 2148 /* Determine local number of columns of this color on this process, including ghost points */ 2149 nctot = n; 2150 ierr = PetscMalloc((nctot+1)*sizeof(PetscInt),&cols);CHKERRQ(ierr); 2151 ierr = PetscMemcpy(cols,is,n*sizeof(PetscInt));CHKERRQ(ierr); 2152 } else { 2153 SETERRQ(PETSC_ERR_SUP,"Not provided for this MatFDColoring type"); 2154 } 2155 2156 /* 2157 Mark all rows affect by these columns 2158 */ 2159 /* Temporary option to allow for debugging/testing */ 2160 flg = PETSC_FALSE; 2161 ierr = PetscOptionsGetTruth(PETSC_NULL,"-matfdcoloring_slow",&flg,PETSC_NULL);CHKERRQ(ierr); 2162 if (!flg) {/*-----------------------------------------------------------------------------*/ 2163 /* crude, fast version */ 2164 ierr = PetscMemzero(rowhit,M*sizeof(PetscInt));CHKERRQ(ierr); 2165 /* loop over columns*/ 2166 for (j=0; j<nctot; j++) { 2167 if (ctype == IS_COLORING_GHOSTED) { 2168 col = ltog[cols[j]]; 2169 } else { 2170 col = cols[j]; 2171 } 2172 if (col >= cstart && col < cend) { 2173 /* column is in diagonal block of matrix */ 2174 rows = A_cj + A_ci[col-cstart]; 2175 m = A_ci[col-cstart+1] - A_ci[col-cstart]; 2176 } else { 2177 #if defined (PETSC_USE_CTABLE) 2178 ierr = PetscTableFind(baij->colmap,col+1,&colb);CHKERRQ(ierr) 2179 colb --; 2180 #else 2181 colb = baij->colmap[col] - 1; 2182 #endif 2183 if (colb == -1) { 2184 m = 0; 2185 } else { 2186 colb = colb/bs; 2187 rows = B_cj + B_ci[colb]; 2188 m = B_ci[colb+1] - B_ci[colb]; 2189 } 2190 } 2191 /* loop over columns marking them in rowhit */ 2192 for (k=0; k<m; k++) { 2193 rowhit[*rows++] = col + 1; 2194 } 2195 } 2196 2197 /* count the number of hits */ 2198 nrows = 0; 2199 for (j=0; j<M; j++) { 2200 if (rowhit[j]) nrows++; 2201 } 2202 c->nrows[i] = nrows; 2203 ierr = PetscMalloc((nrows+1)*sizeof(PetscInt),&c->rows[i]);CHKERRQ(ierr); 2204 ierr = PetscMalloc((nrows+1)*sizeof(PetscInt),&c->columnsforrow[i]);CHKERRQ(ierr); 2205 ierr = PetscLogObjectMemory(c,2*(nrows+1)*sizeof(PetscInt));CHKERRQ(ierr); 2206 nrows = 0; 2207 for (j=0; j<M; j++) { 2208 if (rowhit[j]) { 2209 c->rows[i][nrows] = j; 2210 c->columnsforrow[i][nrows] = rowhit[j] - 1; 2211 nrows++; 2212 } 2213 } 2214 } else {/*-------------------------------------------------------------------------------*/ 2215 /* slow version, using rowhit as a linked list */ 2216 PetscInt currentcol,fm,mfm; 2217 rowhit[M] = M; 2218 nrows = 0; 2219 /* loop over columns*/ 2220 for (j=0; j<nctot; j++) { 2221 if (ctype == IS_COLORING_GHOSTED) { 2222 col = ltog[cols[j]]; 2223 } else { 2224 col = cols[j]; 2225 } 2226 if (col >= cstart && col < cend) { 2227 /* column is in diagonal block of matrix */ 2228 rows = A_cj + A_ci[col-cstart]; 2229 m = A_ci[col-cstart+1] - A_ci[col-cstart]; 2230 } else { 2231 #if defined (PETSC_USE_CTABLE) 2232 ierr = PetscTableFind(baij->colmap,col+1,&colb);CHKERRQ(ierr); 2233 colb --; 2234 #else 2235 colb = baij->colmap[col] - 1; 2236 #endif 2237 if (colb == -1) { 2238 m = 0; 2239 } else { 2240 colb = colb/bs; 2241 rows = B_cj + B_ci[colb]; 2242 m = B_ci[colb+1] - B_ci[colb]; 2243 } 2244 } 2245 2246 /* loop over columns marking them in rowhit */ 2247 fm = M; /* fm points to first entry in linked list */ 2248 for (k=0; k<m; k++) { 2249 currentcol = *rows++; 2250 /* is it already in the list? */ 2251 do { 2252 mfm = fm; 2253 fm = rowhit[fm]; 2254 } while (fm < currentcol); 2255 /* not in list so add it */ 2256 if (fm != currentcol) { 2257 nrows++; 2258 columnsforrow[currentcol] = col; 2259 /* next three lines insert new entry into linked list */ 2260 rowhit[mfm] = currentcol; 2261 rowhit[currentcol] = fm; 2262 fm = currentcol; 2263 /* fm points to present position in list since we know the columns are sorted */ 2264 } else { 2265 SETERRQ(PETSC_ERR_PLIB,"Invalid coloring of matrix detected"); 2266 } 2267 } 2268 } 2269 c->nrows[i] = nrows; 2270 ierr = PetscMalloc((nrows+1)*sizeof(PetscInt),&c->rows[i]);CHKERRQ(ierr); 2271 ierr = PetscMalloc((nrows+1)*sizeof(PetscInt),&c->columnsforrow[i]);CHKERRQ(ierr); 2272 ierr = PetscLogObjectMemory(c,(nrows+1)*sizeof(PetscInt));CHKERRQ(ierr); 2273 /* now store the linked list of rows into c->rows[i] */ 2274 nrows = 0; 2275 fm = rowhit[M]; 2276 do { 2277 c->rows[i][nrows] = fm; 2278 c->columnsforrow[i][nrows++] = columnsforrow[fm]; 2279 fm = rowhit[fm]; 2280 } while (fm < M); 2281 } /* ---------------------------------------------------------------------------------------*/ 2282 ierr = PetscFree(cols);CHKERRQ(ierr); 2283 } 2284 2285 /* Optimize by adding the vscale, and scaleforrow[][] fields */ 2286 /* 2287 vscale will contain the "diagonal" on processor scalings followed by the off processor 2288 */ 2289 if (ctype == IS_COLORING_GLOBAL) { 2290 PetscInt *garray; 2291 ierr = PetscMalloc(baij->B->cmap->n*sizeof(PetscInt),&garray);CHKERRQ(ierr); 2292 for (i=0; i<baij->B->cmap->n/bs; i++) { 2293 for (j=0; j<bs; j++) { 2294 garray[i*bs+j] = bs*baij->garray[i]+j; 2295 } 2296 } 2297 ierr = VecCreateGhost(((PetscObject)mat)->comm,baij->A->rmap->n,PETSC_DETERMINE,baij->B->cmap->n,garray,&c->vscale);CHKERRQ(ierr); 2298 ierr = PetscFree(garray);CHKERRQ(ierr); 2299 CHKMEMQ; 2300 ierr = PetscMalloc(c->ncolors*sizeof(PetscInt*),&c->vscaleforrow);CHKERRQ(ierr); 2301 for (k=0; k<c->ncolors; k++) { 2302 ierr = PetscMalloc((c->nrows[k]+1)*sizeof(PetscInt),&c->vscaleforrow[k]);CHKERRQ(ierr); 2303 for (l=0; l<c->nrows[k]; l++) { 2304 col = c->columnsforrow[k][l]; 2305 if (col >= cstart && col < cend) { 2306 /* column is in diagonal block of matrix */ 2307 colb = col - cstart; 2308 } else { 2309 /* column is in "off-processor" part */ 2310 #if defined (PETSC_USE_CTABLE) 2311 ierr = PetscTableFind(baij->colmap,col+1,&colb);CHKERRQ(ierr); 2312 colb --; 2313 #else 2314 colb = baij->colmap[col] - 1; 2315 #endif 2316 colb = colb/bs; 2317 colb += cend - cstart; 2318 } 2319 c->vscaleforrow[k][l] = colb; 2320 } 2321 } 2322 } else if (ctype == IS_COLORING_GHOSTED) { 2323 /* Get gtol mapping */ 2324 PetscInt N = mat->cmap->N, *gtol; 2325 ierr = PetscMalloc((N+1)*sizeof(PetscInt),>ol);CHKERRQ(ierr); 2326 for (i=0; i<N; i++) gtol[i] = -1; 2327 for (i=0; i<map->n; i++) gtol[ltog[i]] = i; 2328 2329 c->vscale = 0; /* will be created in MatFDColoringApply() */ 2330 ierr = PetscMalloc(c->ncolors*sizeof(PetscInt*),&c->vscaleforrow);CHKERRQ(ierr); 2331 for (k=0; k<c->ncolors; k++) { 2332 ierr = PetscMalloc((c->nrows[k]+1)*sizeof(PetscInt),&c->vscaleforrow[k]);CHKERRQ(ierr); 2333 for (l=0; l<c->nrows[k]; l++) { 2334 col = c->columnsforrow[k][l]; /* global column index */ 2335 c->vscaleforrow[k][l] = gtol[col]; /* local column index */ 2336 } 2337 } 2338 ierr = PetscFree(gtol);CHKERRQ(ierr); 2339 } 2340 ierr = ISColoringRestoreIS(iscoloring,&isa);CHKERRQ(ierr); 2341 2342 ierr = PetscFree(rowhit);CHKERRQ(ierr); 2343 ierr = PetscFree(columnsforrow);CHKERRQ(ierr); 2344 ierr = MatRestoreColumnIJ(baij->A,0,PETSC_FALSE,PETSC_FALSE,&ncols,&A_ci,&A_cj,&done);CHKERRQ(ierr); 2345 ierr = MatRestoreColumnIJ(baij->B,0,PETSC_FALSE,PETSC_FALSE,&ncols,&B_ci,&B_cj,&done);CHKERRQ(ierr); 2346 CHKMEMQ; 2347 PetscFunctionReturn(0); 2348 } 2349 2350 #undef __FUNCT__ 2351 #define __FUNCT__ "MatGetSeqNonzerostructure_MPIBAIJ" 2352 PetscErrorCode MatGetSeqNonzerostructure_MPIBAIJ(Mat A,Mat *newmat) 2353 { 2354 Mat B; 2355 Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data; 2356 Mat_SeqBAIJ *ad = (Mat_SeqBAIJ*)a->A->data,*bd = (Mat_SeqBAIJ*)a->B->data; 2357 Mat_SeqAIJ *b; 2358 PetscErrorCode ierr; 2359 PetscMPIInt size,rank,*recvcounts = 0,*displs = 0; 2360 PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j,bs = A->rmap->bs; 2361 PetscInt m,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 2362 2363 PetscFunctionBegin; 2364 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 2365 ierr = MPI_Comm_rank(((PetscObject)A)->comm,&rank);CHKERRQ(ierr); 2366 2367 /* ---------------------------------------------------------------- 2368 Tell every processor the number of nonzeros per row 2369 */ 2370 ierr = PetscMalloc((A->rmap->N/bs)*sizeof(PetscInt),&lens);CHKERRQ(ierr); 2371 for (i=A->rmap->rstart/bs; i<A->rmap->rend/bs; i++) { 2372 lens[i] = ad->i[i-A->rmap->rstart/bs+1] - ad->i[i-A->rmap->rstart/bs] + bd->i[i-A->rmap->rstart/bs+1] - bd->i[i-A->rmap->rstart/bs]; 2373 } 2374 sendcount = A->rmap->rend/bs - A->rmap->rstart/bs; 2375 ierr = PetscMalloc(2*size*sizeof(PetscMPIInt),&recvcounts);CHKERRQ(ierr); 2376 displs = recvcounts + size; 2377 for (i=0; i<size; i++) { 2378 recvcounts[i] = A->rmap->range[i+1]/bs - A->rmap->range[i]/bs; 2379 displs[i] = A->rmap->range[i]/bs; 2380 } 2381 #if defined(PETSC_HAVE_MPI_IN_PLACE) 2382 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 2383 #else 2384 ierr = MPI_Allgatherv(lens+A->rmap->rstart/bs,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 2385 #endif 2386 /* --------------------------------------------------------------- 2387 Create the sequential matrix of the same type as the local block diagonal 2388 */ 2389 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 2390 ierr = MatSetSizes(B,A->rmap->N/bs,A->cmap->N/bs,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2391 ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr); 2392 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 2393 b = (Mat_SeqAIJ *)B->data; 2394 2395 /*-------------------------------------------------------------------- 2396 Copy my part of matrix column indices over 2397 */ 2398 sendcount = ad->nz + bd->nz; 2399 jsendbuf = b->j + b->i[rstarts[rank]/bs]; 2400 a_jsendbuf = ad->j; 2401 b_jsendbuf = bd->j; 2402 n = A->rmap->rend/bs - A->rmap->rstart/bs; 2403 cnt = 0; 2404 for (i=0; i<n; i++) { 2405 2406 /* put in lower diagonal portion */ 2407 m = bd->i[i+1] - bd->i[i]; 2408 while (m > 0) { 2409 /* is it above diagonal (in bd (compressed) numbering) */ 2410 if (garray[*b_jsendbuf] > A->rmap->rstart/bs + i) break; 2411 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 2412 m--; 2413 } 2414 2415 /* put in diagonal portion */ 2416 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 2417 jsendbuf[cnt++] = A->rmap->rstart/bs + *a_jsendbuf++; 2418 } 2419 2420 /* put in upper diagonal portion */ 2421 while (m-- > 0) { 2422 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 2423 } 2424 } 2425 if (cnt != sendcount) SETERRQ2(PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 2426 2427 /*-------------------------------------------------------------------- 2428 Gather all column indices to all processors 2429 */ 2430 for (i=0; i<size; i++) { 2431 recvcounts[i] = 0; 2432 for (j=A->rmap->range[i]/bs; j<A->rmap->range[i+1]/bs; j++) { 2433 recvcounts[i] += lens[j]; 2434 } 2435 } 2436 displs[0] = 0; 2437 for (i=1; i<size; i++) { 2438 displs[i] = displs[i-1] + recvcounts[i-1]; 2439 } 2440 #if defined(PETSC_HAVE_MPI_IN_PLACE) 2441 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 2442 #else 2443 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,((PetscObject)A)->comm);CHKERRQ(ierr); 2444 #endif 2445 /*-------------------------------------------------------------------- 2446 Assemble the matrix into useable form (note numerical values not yet set) 2447 */ 2448 /* set the b->ilen (length of each row) values */ 2449 ierr = PetscMemcpy(b->ilen,lens,(A->rmap->N/bs)*sizeof(PetscInt));CHKERRQ(ierr); 2450 /* set the b->i indices */ 2451 b->i[0] = 0; 2452 for (i=1; i<=A->rmap->N/bs; i++) { 2453 b->i[i] = b->i[i-1] + lens[i-1]; 2454 } 2455 ierr = PetscFree(lens);CHKERRQ(ierr); 2456 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2457 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2458 ierr = PetscFree(recvcounts);CHKERRQ(ierr); 2459 2460 if (A->symmetric){ 2461 ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 2462 } else if (A->hermitian) { 2463 ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); 2464 } else if (A->structurally_symmetric) { 2465 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 2466 } 2467 *newmat = B; 2468 PetscFunctionReturn(0); 2469 } 2470 2471 extern PetscErrorCode PETSCMAT_DLLEXPORT MatFDColoringApply_BAIJ(Mat,MatFDColoring,Vec,MatStructure*,void*); 2472 2473 2474 /* -------------------------------------------------------------------*/ 2475 static struct _MatOps MatOps_Values = { 2476 MatSetValues_MPIBAIJ, 2477 MatGetRow_MPIBAIJ, 2478 MatRestoreRow_MPIBAIJ, 2479 MatMult_MPIBAIJ, 2480 /* 4*/ MatMultAdd_MPIBAIJ, 2481 MatMultTranspose_MPIBAIJ, 2482 MatMultTransposeAdd_MPIBAIJ, 2483 0, 2484 0, 2485 0, 2486 /*10*/ 0, 2487 0, 2488 0, 2489 0, 2490 MatTranspose_MPIBAIJ, 2491 /*15*/ MatGetInfo_MPIBAIJ, 2492 MatEqual_MPIBAIJ, 2493 MatGetDiagonal_MPIBAIJ, 2494 MatDiagonalScale_MPIBAIJ, 2495 MatNorm_MPIBAIJ, 2496 /*20*/ MatAssemblyBegin_MPIBAIJ, 2497 MatAssemblyEnd_MPIBAIJ, 2498 MatSetOption_MPIBAIJ, 2499 MatZeroEntries_MPIBAIJ, 2500 /*24*/ MatZeroRows_MPIBAIJ, 2501 0, 2502 0, 2503 0, 2504 0, 2505 /*29*/ MatSetUpPreallocation_MPIBAIJ, 2506 0, 2507 0, 2508 0, 2509 0, 2510 /*34*/ MatDuplicate_MPIBAIJ, 2511 0, 2512 0, 2513 0, 2514 0, 2515 /*39*/ MatAXPY_MPIBAIJ, 2516 MatGetSubMatrices_MPIBAIJ, 2517 MatIncreaseOverlap_MPIBAIJ, 2518 MatGetValues_MPIBAIJ, 2519 MatCopy_MPIBAIJ, 2520 /*44*/ 0, 2521 MatScale_MPIBAIJ, 2522 0, 2523 0, 2524 0, 2525 /*49*/ 0, 2526 0, 2527 0, 2528 0, 2529 0, 2530 /*54*/ MatFDColoringCreate_MPIBAIJ, 2531 0, 2532 MatSetUnfactored_MPIBAIJ, 2533 MatPermute_MPIBAIJ, 2534 MatSetValuesBlocked_MPIBAIJ, 2535 /*59*/ MatGetSubMatrix_MPIBAIJ, 2536 MatDestroy_MPIBAIJ, 2537 MatView_MPIBAIJ, 2538 0, 2539 0, 2540 /*64*/ 0, 2541 0, 2542 0, 2543 0, 2544 0, 2545 /*69*/ MatGetRowMaxAbs_MPIBAIJ, 2546 0, 2547 0, 2548 0, 2549 0, 2550 /*74*/ 0, 2551 MatFDColoringApply_BAIJ, 2552 0, 2553 0, 2554 0, 2555 /*79*/ 0, 2556 0, 2557 0, 2558 0, 2559 MatLoad_MPIBAIJ, 2560 /*84*/ 0, 2561 0, 2562 0, 2563 0, 2564 0, 2565 /*89*/ 0, 2566 0, 2567 0, 2568 0, 2569 0, 2570 /*94*/ 0, 2571 0, 2572 0, 2573 0, 2574 0, 2575 /*99*/ 0, 2576 0, 2577 0, 2578 0, 2579 0, 2580 /*104*/0, 2581 MatRealPart_MPIBAIJ, 2582 MatImaginaryPart_MPIBAIJ, 2583 0, 2584 0, 2585 /*109*/0, 2586 0, 2587 0, 2588 0, 2589 0, 2590 /*114*/MatGetSeqNonzerostructure_MPIBAIJ, 2591 0, 2592 MatGetGhosts_MPIBAIJ 2593 }; 2594 2595 EXTERN_C_BEGIN 2596 #undef __FUNCT__ 2597 #define __FUNCT__ "MatGetDiagonalBlock_MPIBAIJ" 2598 PetscErrorCode PETSCMAT_DLLEXPORT MatGetDiagonalBlock_MPIBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a) 2599 { 2600 PetscFunctionBegin; 2601 *a = ((Mat_MPIBAIJ *)A->data)->A; 2602 *iscopy = PETSC_FALSE; 2603 PetscFunctionReturn(0); 2604 } 2605 EXTERN_C_END 2606 2607 EXTERN_C_BEGIN 2608 extern PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIBAIJ_MPISBAIJ(Mat, MatType,MatReuse,Mat*); 2609 EXTERN_C_END 2610 2611 EXTERN_C_BEGIN 2612 #undef __FUNCT__ 2613 #define __FUNCT__ "MatMPIBAIJSetPreallocationCSR_MPIBAIJ" 2614 PetscErrorCode MatMPIBAIJSetPreallocationCSR_MPIBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[]) 2615 { 2616 PetscInt m,rstart,cstart,cend; 2617 PetscInt i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0; 2618 const PetscInt *JJ=0; 2619 PetscScalar *values=0; 2620 PetscErrorCode ierr; 2621 2622 PetscFunctionBegin; 2623 2624 if (bs < 1) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive but it is %D",bs); 2625 ierr = PetscMapSetBlockSize(B->rmap,bs);CHKERRQ(ierr); 2626 ierr = PetscMapSetBlockSize(B->cmap,bs);CHKERRQ(ierr); 2627 ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr); 2628 ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr); 2629 m = B->rmap->n/bs; 2630 rstart = B->rmap->rstart/bs; 2631 cstart = B->cmap->rstart/bs; 2632 cend = B->cmap->rend/bs; 2633 2634 if (ii[0]) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]); 2635 ierr = PetscMalloc((2*m+1)*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr); 2636 o_nnz = d_nnz + m; 2637 for (i=0; i<m; i++) { 2638 nz = ii[i+1] - ii[i]; 2639 if (nz < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz); 2640 nz_max = PetscMax(nz_max,nz); 2641 JJ = jj + ii[i]; 2642 for (j=0; j<nz; j++) { 2643 if (*JJ >= cstart) break; 2644 JJ++; 2645 } 2646 d = 0; 2647 for (; j<nz; j++) { 2648 if (*JJ++ >= cend) break; 2649 d++; 2650 } 2651 d_nnz[i] = d; 2652 o_nnz[i] = nz - d; 2653 } 2654 ierr = MatMPIBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);CHKERRQ(ierr); 2655 ierr = PetscFree(d_nnz);CHKERRQ(ierr); 2656 2657 values = (PetscScalar*)V; 2658 if (!values) { 2659 ierr = PetscMalloc(bs*bs*(nz_max+1)*sizeof(PetscScalar),&values);CHKERRQ(ierr); 2660 ierr = PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));CHKERRQ(ierr); 2661 } 2662 for (i=0; i<m; i++) { 2663 PetscInt row = i + rstart; 2664 PetscInt ncols = ii[i+1] - ii[i]; 2665 const PetscInt *icols = jj + ii[i]; 2666 const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0); 2667 ierr = MatSetValuesBlocked_MPIBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);CHKERRQ(ierr); 2668 } 2669 2670 if (!V) { ierr = PetscFree(values);CHKERRQ(ierr); } 2671 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2672 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2673 2674 PetscFunctionReturn(0); 2675 } 2676 EXTERN_C_END 2677 2678 #undef __FUNCT__ 2679 #define __FUNCT__ "MatMPIBAIJSetPreallocationCSR" 2680 /*@C 2681 MatMPIBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format 2682 (the default parallel PETSc format). 2683 2684 Collective on MPI_Comm 2685 2686 Input Parameters: 2687 + A - the matrix 2688 . i - the indices into j for the start of each local row (starts with zero) 2689 . j - the column indices for each local row (starts with zero) these must be sorted for each row 2690 - v - optional values in the matrix 2691 2692 Level: developer 2693 2694 .keywords: matrix, aij, compressed row, sparse, parallel 2695 2696 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ 2697 @*/ 2698 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[]) 2699 { 2700 PetscErrorCode ierr,(*f)(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]); 2701 2702 PetscFunctionBegin; 2703 ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",(void (**)(void))&f);CHKERRQ(ierr); 2704 if (f) { 2705 ierr = (*f)(B,bs,i,j,v);CHKERRQ(ierr); 2706 } 2707 PetscFunctionReturn(0); 2708 } 2709 2710 EXTERN_C_BEGIN 2711 #undef __FUNCT__ 2712 #define __FUNCT__ "MatMPIBAIJSetPreallocation_MPIBAIJ" 2713 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,PetscInt bs,PetscInt d_nz,PetscInt *d_nnz,PetscInt o_nz,PetscInt *o_nnz) 2714 { 2715 Mat_MPIBAIJ *b; 2716 PetscErrorCode ierr; 2717 PetscInt i, newbs = PetscAbs(bs); 2718 2719 PetscFunctionBegin; 2720 if (bs < 0) { 2721 ierr = PetscOptionsBegin(((PetscObject)B)->comm,((PetscObject)B)->prefix,"Options for MPIBAIJ matrix","Mat");CHKERRQ(ierr); 2722 ierr = PetscOptionsInt("-mat_block_size","Set the blocksize used to store the matrix","MatMPIBAIJSetPreallocation",newbs,&newbs,PETSC_NULL);CHKERRQ(ierr); 2723 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2724 bs = PetscAbs(bs); 2725 } 2726 if ((d_nnz || o_nnz) && newbs != bs) { 2727 SETERRQ(PETSC_ERR_ARG_WRONG,"Cannot change blocksize from command line if setting d_nnz or o_nnz"); 2728 } 2729 bs = newbs; 2730 2731 2732 if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive"); 2733 if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5; 2734 if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2; 2735 if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz); 2736 if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz); 2737 2738 ierr = PetscMapSetBlockSize(B->rmap,bs);CHKERRQ(ierr); 2739 ierr = PetscMapSetBlockSize(B->cmap,bs);CHKERRQ(ierr); 2740 ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr); 2741 ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr); 2742 2743 if (d_nnz) { 2744 for (i=0; i<B->rmap->n/bs; i++) { 2745 if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]); 2746 } 2747 } 2748 if (o_nnz) { 2749 for (i=0; i<B->rmap->n/bs; i++) { 2750 if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]); 2751 } 2752 } 2753 2754 b = (Mat_MPIBAIJ*)B->data; 2755 b->bs2 = bs*bs; 2756 b->mbs = B->rmap->n/bs; 2757 b->nbs = B->cmap->n/bs; 2758 b->Mbs = B->rmap->N/bs; 2759 b->Nbs = B->cmap->N/bs; 2760 2761 for (i=0; i<=b->size; i++) { 2762 b->rangebs[i] = B->rmap->range[i]/bs; 2763 } 2764 b->rstartbs = B->rmap->rstart/bs; 2765 b->rendbs = B->rmap->rend/bs; 2766 b->cstartbs = B->cmap->rstart/bs; 2767 b->cendbs = B->cmap->rend/bs; 2768 2769 if (!B->preallocated) { 2770 ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); 2771 ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); 2772 ierr = MatSetType(b->A,MATSEQBAIJ);CHKERRQ(ierr); 2773 ierr = PetscLogObjectParent(B,b->A);CHKERRQ(ierr); 2774 ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); 2775 ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); 2776 ierr = MatSetType(b->B,MATSEQBAIJ);CHKERRQ(ierr); 2777 ierr = PetscLogObjectParent(B,b->B);CHKERRQ(ierr); 2778 ierr = MatStashCreate_Private(((PetscObject)B)->comm,bs,&B->bstash);CHKERRQ(ierr); 2779 } 2780 2781 ierr = MatSeqBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);CHKERRQ(ierr); 2782 ierr = MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);CHKERRQ(ierr); 2783 B->preallocated = PETSC_TRUE; 2784 PetscFunctionReturn(0); 2785 } 2786 EXTERN_C_END 2787 2788 EXTERN_C_BEGIN 2789 EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec); 2790 EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal); 2791 EXTERN_C_END 2792 2793 2794 EXTERN_C_BEGIN 2795 #undef __FUNCT__ 2796 #define __FUNCT__ "MatConvert_MPIBAIJ_MPIAdj" 2797 PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIBAIJ_MPIAdj(Mat B, const MatType newtype,MatReuse reuse,Mat *adj) 2798 { 2799 Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data; 2800 PetscErrorCode ierr; 2801 Mat_SeqBAIJ *d = (Mat_SeqBAIJ*) b->A->data,*o = (Mat_SeqBAIJ*) b->B->data; 2802 PetscInt M = B->rmap->n/B->rmap->bs,i,*ii,*jj,cnt,j,k,rstart = B->rmap->rstart/B->rmap->bs; 2803 const PetscInt *id = d->i, *jd = d->j, *io = o->i, *jo = o->j, *garray = b->garray; 2804 2805 PetscFunctionBegin; 2806 ierr = PetscMalloc((M+1)*sizeof(PetscInt),&ii);CHKERRQ(ierr); 2807 ii[0] = 0; 2808 CHKMEMQ; 2809 for (i=0; i<M; i++) { 2810 if ((id[i+1] - id[i]) < 0) SETERRQ3(PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,id[i],id[i+1]); 2811 if ((io[i+1] - io[i]) < 0) SETERRQ3(PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,io[i],io[i+1]); 2812 ii[i+1] = ii[i] + id[i+1] - id[i] + io[i+1] - io[i]; 2813 /* remove one from count of matrix has diagonal */ 2814 for (j=id[i]; j<id[i+1]; j++) { 2815 if (jd[j] == i) {ii[i+1]--;break;} 2816 } 2817 CHKMEMQ; 2818 } 2819 ierr = PetscMalloc(ii[M]*sizeof(PetscInt),&jj);CHKERRQ(ierr); 2820 cnt = 0; 2821 for (i=0; i<M; i++) { 2822 for (j=io[i]; j<io[i+1]; j++) { 2823 if (garray[jo[j]] > rstart) break; 2824 jj[cnt++] = garray[jo[j]]; 2825 CHKMEMQ; 2826 } 2827 for (k=id[i]; k<id[i+1]; k++) { 2828 if (jd[k] != i) { 2829 jj[cnt++] = rstart + jd[k]; 2830 CHKMEMQ; 2831 } 2832 } 2833 for (;j<io[i+1]; j++) { 2834 jj[cnt++] = garray[jo[j]]; 2835 CHKMEMQ; 2836 } 2837 } 2838 ierr = MatCreateMPIAdj(((PetscObject)B)->comm,M,B->cmap->N/B->rmap->bs,ii,jj,PETSC_NULL,adj);CHKERRQ(ierr); 2839 PetscFunctionReturn(0); 2840 } 2841 EXTERN_C_END 2842 2843 /*MC 2844 MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices. 2845 2846 Options Database Keys: 2847 + -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions() 2848 . -mat_block_size <bs> - set the blocksize used to store the matrix 2849 - -mat_use_hash_table <fact> 2850 2851 Level: beginner 2852 2853 .seealso: MatCreateMPIBAIJ 2854 M*/ 2855 2856 EXTERN_C_BEGIN 2857 #undef __FUNCT__ 2858 #define __FUNCT__ "MatCreate_MPIBAIJ" 2859 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIBAIJ(Mat B) 2860 { 2861 Mat_MPIBAIJ *b; 2862 PetscErrorCode ierr; 2863 PetscTruth flg; 2864 2865 PetscFunctionBegin; 2866 ierr = PetscNewLog(B,Mat_MPIBAIJ,&b);CHKERRQ(ierr); 2867 B->data = (void*)b; 2868 2869 2870 ierr = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr); 2871 B->mapping = 0; 2872 B->assembled = PETSC_FALSE; 2873 2874 B->insertmode = NOT_SET_VALUES; 2875 ierr = MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);CHKERRQ(ierr); 2876 ierr = MPI_Comm_size(((PetscObject)B)->comm,&b->size);CHKERRQ(ierr); 2877 2878 /* build local table of row and column ownerships */ 2879 ierr = PetscMalloc((b->size+1)*sizeof(PetscInt),&b->rangebs);CHKERRQ(ierr); 2880 2881 /* build cache for off array entries formed */ 2882 ierr = MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);CHKERRQ(ierr); 2883 b->donotstash = PETSC_FALSE; 2884 b->colmap = PETSC_NULL; 2885 b->garray = PETSC_NULL; 2886 b->roworiented = PETSC_TRUE; 2887 2888 /* stuff used in block assembly */ 2889 b->barray = 0; 2890 2891 /* stuff used for matrix vector multiply */ 2892 b->lvec = 0; 2893 b->Mvctx = 0; 2894 2895 /* stuff for MatGetRow() */ 2896 b->rowindices = 0; 2897 b->rowvalues = 0; 2898 b->getrowactive = PETSC_FALSE; 2899 2900 /* hash table stuff */ 2901 b->ht = 0; 2902 b->hd = 0; 2903 b->ht_size = 0; 2904 b->ht_flag = PETSC_FALSE; 2905 b->ht_fact = 0; 2906 b->ht_total_ct = 0; 2907 b->ht_insert_ct = 0; 2908 2909 ierr = PetscOptionsBegin(((PetscObject)B)->comm,PETSC_NULL,"Options for loading MPIBAIJ matrix 1","Mat");CHKERRQ(ierr); 2910 ierr = PetscOptionsTruth("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,PETSC_NULL);CHKERRQ(ierr); 2911 if (flg) { 2912 PetscReal fact = 1.39; 2913 ierr = MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);CHKERRQ(ierr); 2914 ierr = PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,PETSC_NULL);CHKERRQ(ierr); 2915 if (fact <= 1.0) fact = 1.39; 2916 ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr); 2917 ierr = PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);CHKERRQ(ierr); 2918 } 2919 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2920 2921 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpibaij_mpiadj_C", 2922 "MatConvert_MPIBAIJ_MPIAdj", 2923 MatConvert_MPIBAIJ_MPIAdj);CHKERRQ(ierr); 2924 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C", 2925 "MatStoreValues_MPIBAIJ", 2926 MatStoreValues_MPIBAIJ);CHKERRQ(ierr); 2927 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C", 2928 "MatRetrieveValues_MPIBAIJ", 2929 MatRetrieveValues_MPIBAIJ);CHKERRQ(ierr); 2930 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C", 2931 "MatGetDiagonalBlock_MPIBAIJ", 2932 MatGetDiagonalBlock_MPIBAIJ);CHKERRQ(ierr); 2933 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocation_C", 2934 "MatMPIBAIJSetPreallocation_MPIBAIJ", 2935 MatMPIBAIJSetPreallocation_MPIBAIJ);CHKERRQ(ierr); 2936 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C", 2937 "MatMPIBAIJSetPreallocationCSR_MPIBAIJ", 2938 MatMPIBAIJSetPreallocationCSR_MPIBAIJ);CHKERRQ(ierr); 2939 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C", 2940 "MatDiagonalScaleLocal_MPIBAIJ", 2941 MatDiagonalScaleLocal_MPIBAIJ);CHKERRQ(ierr); 2942 ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatSetHashTableFactor_C", 2943 "MatSetHashTableFactor_MPIBAIJ", 2944 MatSetHashTableFactor_MPIBAIJ);CHKERRQ(ierr); 2945 ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIBAIJ);CHKERRQ(ierr); 2946 PetscFunctionReturn(0); 2947 } 2948 EXTERN_C_END 2949 2950 /*MC 2951 MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices. 2952 2953 This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator, 2954 and MATMPIBAIJ otherwise. 2955 2956 Options Database Keys: 2957 . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions() 2958 2959 Level: beginner 2960 2961 .seealso: MatCreateMPIBAIJ(),MATSEQBAIJ,MATMPIBAIJ, MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR() 2962 M*/ 2963 2964 EXTERN_C_BEGIN 2965 #undef __FUNCT__ 2966 #define __FUNCT__ "MatCreate_BAIJ" 2967 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_BAIJ(Mat A) 2968 { 2969 PetscErrorCode ierr; 2970 PetscMPIInt size; 2971 2972 PetscFunctionBegin; 2973 ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr); 2974 if (size == 1) { 2975 ierr = MatSetType(A,MATSEQBAIJ);CHKERRQ(ierr); 2976 } else { 2977 ierr = MatSetType(A,MATMPIBAIJ);CHKERRQ(ierr); 2978 } 2979 PetscFunctionReturn(0); 2980 } 2981 EXTERN_C_END 2982 2983 #undef __FUNCT__ 2984 #define __FUNCT__ "MatMPIBAIJSetPreallocation" 2985 /*@C 2986 MatMPIBAIJSetPreallocation - Allocates memory for a sparse parallel matrix in block AIJ format 2987 (block compressed row). For good matrix assembly performance 2988 the user should preallocate the matrix storage by setting the parameters 2989 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 2990 performance can be increased by more than a factor of 50. 2991 2992 Collective on Mat 2993 2994 Input Parameters: 2995 + A - the matrix 2996 . bs - size of blockk 2997 . d_nz - number of block nonzeros per block row in diagonal portion of local 2998 submatrix (same for all local rows) 2999 . d_nnz - array containing the number of block nonzeros in the various block rows 3000 of the in diagonal portion of the local (possibly different for each block 3001 row) or PETSC_NULL. You must leave room for the diagonal entry even if it is zero. 3002 . o_nz - number of block nonzeros per block row in the off-diagonal portion of local 3003 submatrix (same for all local rows). 3004 - o_nnz - array containing the number of nonzeros in the various block rows of the 3005 off-diagonal portion of the local submatrix (possibly different for 3006 each block row) or PETSC_NULL. 3007 3008 If the *_nnz parameter is given then the *_nz parameter is ignored 3009 3010 Options Database Keys: 3011 + -mat_block_size - size of the blocks to use 3012 - -mat_use_hash_table <fact> 3013 3014 Notes: 3015 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor 3016 than it must be used on all processors that share the object for that argument. 3017 3018 Storage Information: 3019 For a square global matrix we define each processor's diagonal portion 3020 to be its local rows and the corresponding columns (a square submatrix); 3021 each processor's off-diagonal portion encompasses the remainder of the 3022 local matrix (a rectangular submatrix). 3023 3024 The user can specify preallocated storage for the diagonal part of 3025 the local submatrix with either d_nz or d_nnz (not both). Set 3026 d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic 3027 memory allocation. Likewise, specify preallocated storage for the 3028 off-diagonal part of the local submatrix with o_nz or o_nnz (not both). 3029 3030 Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In 3031 the figure below we depict these three local rows and all columns (0-11). 3032 3033 .vb 3034 0 1 2 3 4 5 6 7 8 9 10 11 3035 ------------------- 3036 row 3 | o o o d d d o o o o o o 3037 row 4 | o o o d d d o o o o o o 3038 row 5 | o o o d d d o o o o o o 3039 ------------------- 3040 .ve 3041 3042 Thus, any entries in the d locations are stored in the d (diagonal) 3043 submatrix, and any entries in the o locations are stored in the 3044 o (off-diagonal) submatrix. Note that the d and the o submatrices are 3045 stored simply in the MATSEQBAIJ format for compressed row storage. 3046 3047 Now d_nz should indicate the number of block nonzeros per row in the d matrix, 3048 and o_nz should indicate the number of block nonzeros per row in the o matrix. 3049 In general, for PDE problems in which most nonzeros are near the diagonal, 3050 one expects d_nz >> o_nz. For large problems you MUST preallocate memory 3051 or you will get TERRIBLE performance; see the users' manual chapter on 3052 matrices. 3053 3054 You can call MatGetInfo() to get information on how effective the preallocation was; 3055 for example the fields mallocs,nz_allocated,nz_used,nz_unneeded; 3056 You can also run with the option -info and look for messages with the string 3057 malloc in them to see if additional memory allocation was needed. 3058 3059 Level: intermediate 3060 3061 .keywords: matrix, block, aij, compressed row, sparse, parallel 3062 3063 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ(), MatMPIBAIJSetPreallocationCSR() 3064 @*/ 3065 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) 3066 { 3067 PetscErrorCode ierr,(*f)(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]); 3068 3069 PetscFunctionBegin; 3070 ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIBAIJSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr); 3071 if (f) { 3072 ierr = (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); 3073 } 3074 PetscFunctionReturn(0); 3075 } 3076 3077 #undef __FUNCT__ 3078 #define __FUNCT__ "MatCreateMPIBAIJ" 3079 /*@C 3080 MatCreateMPIBAIJ - Creates a sparse parallel matrix in block AIJ format 3081 (block compressed row). For good matrix assembly performance 3082 the user should preallocate the matrix storage by setting the parameters 3083 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 3084 performance can be increased by more than a factor of 50. 3085 3086 Collective on MPI_Comm 3087 3088 Input Parameters: 3089 + comm - MPI communicator 3090 . bs - size of blockk 3091 . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) 3092 This value should be the same as the local size used in creating the 3093 y vector for the matrix-vector product y = Ax. 3094 . n - number of local columns (or PETSC_DECIDE to have calculated if N is given) 3095 This value should be the same as the local size used in creating the 3096 x vector for the matrix-vector product y = Ax. 3097 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 3098 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 3099 . d_nz - number of nonzero blocks per block row in diagonal portion of local 3100 submatrix (same for all local rows) 3101 . d_nnz - array containing the number of nonzero blocks in the various block rows 3102 of the in diagonal portion of the local (possibly different for each block 3103 row) or PETSC_NULL. You must leave room for the diagonal entry even if it is zero. 3104 . o_nz - number of nonzero blocks per block row in the off-diagonal portion of local 3105 submatrix (same for all local rows). 3106 - o_nnz - array containing the number of nonzero blocks in the various block rows of the 3107 off-diagonal portion of the local submatrix (possibly different for 3108 each block row) or PETSC_NULL. 3109 3110 Output Parameter: 3111 . A - the matrix 3112 3113 Options Database Keys: 3114 + -mat_block_size - size of the blocks to use 3115 - -mat_use_hash_table <fact> 3116 3117 It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 3118 MatXXXXSetPreallocation() paradgm instead of this routine directly. 3119 [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 3120 3121 Notes: 3122 If the *_nnz parameter is given then the *_nz parameter is ignored 3123 3124 A nonzero block is any block that as 1 or more nonzeros in it 3125 3126 The user MUST specify either the local or global matrix dimensions 3127 (possibly both). 3128 3129 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor 3130 than it must be used on all processors that share the object for that argument. 3131 3132 Storage Information: 3133 For a square global matrix we define each processor's diagonal portion 3134 to be its local rows and the corresponding columns (a square submatrix); 3135 each processor's off-diagonal portion encompasses the remainder of the 3136 local matrix (a rectangular submatrix). 3137 3138 The user can specify preallocated storage for the diagonal part of 3139 the local submatrix with either d_nz or d_nnz (not both). Set 3140 d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic 3141 memory allocation. Likewise, specify preallocated storage for the 3142 off-diagonal part of the local submatrix with o_nz or o_nnz (not both). 3143 3144 Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In 3145 the figure below we depict these three local rows and all columns (0-11). 3146 3147 .vb 3148 0 1 2 3 4 5 6 7 8 9 10 11 3149 ------------------- 3150 row 3 | o o o d d d o o o o o o 3151 row 4 | o o o d d d o o o o o o 3152 row 5 | o o o d d d o o o o o o 3153 ------------------- 3154 .ve 3155 3156 Thus, any entries in the d locations are stored in the d (diagonal) 3157 submatrix, and any entries in the o locations are stored in the 3158 o (off-diagonal) submatrix. Note that the d and the o submatrices are 3159 stored simply in the MATSEQBAIJ format for compressed row storage. 3160 3161 Now d_nz should indicate the number of block nonzeros per row in the d matrix, 3162 and o_nz should indicate the number of block nonzeros per row in the o matrix. 3163 In general, for PDE problems in which most nonzeros are near the diagonal, 3164 one expects d_nz >> o_nz. For large problems you MUST preallocate memory 3165 or you will get TERRIBLE performance; see the users' manual chapter on 3166 matrices. 3167 3168 Level: intermediate 3169 3170 .keywords: matrix, block, aij, compressed row, sparse, parallel 3171 3172 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ(), MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR() 3173 @*/ 3174 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) 3175 { 3176 PetscErrorCode ierr; 3177 PetscMPIInt size; 3178 3179 PetscFunctionBegin; 3180 ierr = MatCreate(comm,A);CHKERRQ(ierr); 3181 ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); 3182 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3183 if (size > 1) { 3184 ierr = MatSetType(*A,MATMPIBAIJ);CHKERRQ(ierr); 3185 ierr = MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); 3186 } else { 3187 ierr = MatSetType(*A,MATSEQBAIJ);CHKERRQ(ierr); 3188 ierr = MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr); 3189 } 3190 PetscFunctionReturn(0); 3191 } 3192 3193 #undef __FUNCT__ 3194 #define __FUNCT__ "MatDuplicate_MPIBAIJ" 3195 static PetscErrorCode MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat) 3196 { 3197 Mat mat; 3198 Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data; 3199 PetscErrorCode ierr; 3200 PetscInt len=0; 3201 3202 PetscFunctionBegin; 3203 *newmat = 0; 3204 ierr = MatCreate(((PetscObject)matin)->comm,&mat);CHKERRQ(ierr); 3205 ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr); 3206 ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr); 3207 ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr); 3208 3209 mat->factor = matin->factor; 3210 mat->preallocated = PETSC_TRUE; 3211 mat->assembled = PETSC_TRUE; 3212 mat->insertmode = NOT_SET_VALUES; 3213 3214 a = (Mat_MPIBAIJ*)mat->data; 3215 mat->rmap->bs = matin->rmap->bs; 3216 a->bs2 = oldmat->bs2; 3217 a->mbs = oldmat->mbs; 3218 a->nbs = oldmat->nbs; 3219 a->Mbs = oldmat->Mbs; 3220 a->Nbs = oldmat->Nbs; 3221 3222 ierr = PetscMapCopy(((PetscObject)matin)->comm,matin->rmap,mat->rmap);CHKERRQ(ierr); 3223 ierr = PetscMapCopy(((PetscObject)matin)->comm,matin->cmap,mat->cmap);CHKERRQ(ierr); 3224 3225 a->size = oldmat->size; 3226 a->rank = oldmat->rank; 3227 a->donotstash = oldmat->donotstash; 3228 a->roworiented = oldmat->roworiented; 3229 a->rowindices = 0; 3230 a->rowvalues = 0; 3231 a->getrowactive = PETSC_FALSE; 3232 a->barray = 0; 3233 a->rstartbs = oldmat->rstartbs; 3234 a->rendbs = oldmat->rendbs; 3235 a->cstartbs = oldmat->cstartbs; 3236 a->cendbs = oldmat->cendbs; 3237 3238 /* hash table stuff */ 3239 a->ht = 0; 3240 a->hd = 0; 3241 a->ht_size = 0; 3242 a->ht_flag = oldmat->ht_flag; 3243 a->ht_fact = oldmat->ht_fact; 3244 a->ht_total_ct = 0; 3245 a->ht_insert_ct = 0; 3246 3247 ierr = PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr); 3248 ierr = MatStashCreate_Private(((PetscObject)matin)->comm,1,&mat->stash);CHKERRQ(ierr); 3249 ierr = MatStashCreate_Private(((PetscObject)matin)->comm,matin->rmap->bs,&mat->bstash);CHKERRQ(ierr); 3250 if (oldmat->colmap) { 3251 #if defined (PETSC_USE_CTABLE) 3252 ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr); 3253 #else 3254 ierr = PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);CHKERRQ(ierr); 3255 ierr = PetscLogObjectMemory(mat,(a->Nbs)*sizeof(PetscInt));CHKERRQ(ierr); 3256 ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));CHKERRQ(ierr); 3257 #endif 3258 } else a->colmap = 0; 3259 3260 if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) { 3261 ierr = PetscMalloc(len*sizeof(PetscInt),&a->garray);CHKERRQ(ierr); 3262 ierr = PetscLogObjectMemory(mat,len*sizeof(PetscInt));CHKERRQ(ierr); 3263 ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); 3264 } else a->garray = 0; 3265 3266 ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr); 3267 ierr = PetscLogObjectParent(mat,a->lvec);CHKERRQ(ierr); 3268 ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr); 3269 ierr = PetscLogObjectParent(mat,a->Mvctx);CHKERRQ(ierr); 3270 3271 ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr); 3272 ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr); 3273 ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr); 3274 ierr = PetscLogObjectParent(mat,a->B);CHKERRQ(ierr); 3275 ierr = PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr); 3276 *newmat = mat; 3277 3278 PetscFunctionReturn(0); 3279 } 3280 3281 #include "petscsys.h" 3282 3283 #undef __FUNCT__ 3284 #define __FUNCT__ "MatLoad_MPIBAIJ" 3285 PetscErrorCode MatLoad_MPIBAIJ(PetscViewer viewer, const MatType type,Mat *newmat) 3286 { 3287 Mat A; 3288 PetscErrorCode ierr; 3289 int fd; 3290 PetscInt i,nz,j,rstart,rend; 3291 PetscScalar *vals,*buf; 3292 MPI_Comm comm = ((PetscObject)viewer)->comm; 3293 MPI_Status status; 3294 PetscMPIInt rank,size,maxnz; 3295 PetscInt header[4],*rowlengths = 0,M,N,m,*rowners,*cols; 3296 PetscInt *locrowlens = PETSC_NULL,*procsnz = PETSC_NULL,*browners = PETSC_NULL; 3297 PetscInt jj,*mycols,*ibuf,bs=1,Mbs,mbs,extra_rows,mmax; 3298 PetscMPIInt tag = ((PetscObject)viewer)->tag; 3299 PetscInt *dlens = PETSC_NULL,*odlens = PETSC_NULL,*mask = PETSC_NULL,*masked1 = PETSC_NULL,*masked2 = PETSC_NULL,rowcount,odcount; 3300 PetscInt dcount,kmax,k,nzcount,tmp,mend; 3301 3302 PetscFunctionBegin; 3303 ierr = PetscOptionsBegin(comm,PETSC_NULL,"Options for loading MPIBAIJ matrix 2","Mat");CHKERRQ(ierr); 3304 ierr = PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,PETSC_NULL);CHKERRQ(ierr); 3305 ierr = PetscOptionsEnd();CHKERRQ(ierr); 3306 3307 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3308 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 3309 if (!rank) { 3310 ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr); 3311 ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr); 3312 if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object"); 3313 } 3314 3315 ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr); 3316 M = header[1]; N = header[2]; 3317 3318 if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices"); 3319 3320 /* 3321 This code adds extra rows to make sure the number of rows is 3322 divisible by the blocksize 3323 */ 3324 Mbs = M/bs; 3325 extra_rows = bs - M + bs*Mbs; 3326 if (extra_rows == bs) extra_rows = 0; 3327 else Mbs++; 3328 if (extra_rows && !rank) { 3329 ierr = PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");CHKERRQ(ierr); 3330 } 3331 3332 /* determine ownership of all rows */ 3333 mbs = Mbs/size + ((Mbs % size) > rank); 3334 m = mbs*bs; 3335 ierr = PetscMalloc2(size+1,PetscInt,&rowners,size+1,PetscInt,&browners);CHKERRQ(ierr); 3336 ierr = MPI_Allgather(&mbs,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 3337 3338 /* process 0 needs enough room for process with most rows */ 3339 if (!rank) { 3340 mmax = rowners[1]; 3341 for (i=2; i<size; i++) { 3342 mmax = PetscMax(mmax,rowners[i]); 3343 } 3344 mmax*=bs; 3345 } else mmax = m; 3346 3347 rowners[0] = 0; 3348 for (i=2; i<=size; i++) rowners[i] += rowners[i-1]; 3349 for (i=0; i<=size; i++) browners[i] = rowners[i]*bs; 3350 rstart = rowners[rank]; 3351 rend = rowners[rank+1]; 3352 3353 /* distribute row lengths to all processors */ 3354 ierr = PetscMalloc((mmax+1)*sizeof(PetscInt),&locrowlens);CHKERRQ(ierr); 3355 if (!rank) { 3356 mend = m; 3357 if (size == 1) mend = mend - extra_rows; 3358 ierr = PetscBinaryRead(fd,locrowlens,mend,PETSC_INT);CHKERRQ(ierr); 3359 for (j=mend; j<m; j++) locrowlens[j] = 1; 3360 ierr = PetscMalloc(m*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr); 3361 ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr); 3362 ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr); 3363 for (j=0; j<m; j++) { 3364 procsnz[0] += locrowlens[j]; 3365 } 3366 for (i=1; i<size; i++) { 3367 mend = browners[i+1] - browners[i]; 3368 if (i == size-1) mend = mend - extra_rows; 3369 ierr = PetscBinaryRead(fd,rowlengths,mend,PETSC_INT);CHKERRQ(ierr); 3370 for (j=mend; j<browners[i+1] - browners[i]; j++) rowlengths[j] = 1; 3371 /* calculate the number of nonzeros on each processor */ 3372 for (j=0; j<browners[i+1]-browners[i]; j++) { 3373 procsnz[i] += rowlengths[j]; 3374 } 3375 ierr = MPI_Send(rowlengths,browners[i+1]-browners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr); 3376 } 3377 ierr = PetscFree(rowlengths);CHKERRQ(ierr); 3378 } else { 3379 ierr = MPI_Recv(locrowlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 3380 } 3381 3382 if (!rank) { 3383 /* determine max buffer needed and allocate it */ 3384 maxnz = procsnz[0]; 3385 for (i=1; i<size; i++) { 3386 maxnz = PetscMax(maxnz,procsnz[i]); 3387 } 3388 ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr); 3389 3390 /* read in my part of the matrix column indices */ 3391 nz = procsnz[0]; 3392 ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);CHKERRQ(ierr); 3393 mycols = ibuf; 3394 if (size == 1) nz -= extra_rows; 3395 ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr); 3396 if (size == 1) for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; } 3397 3398 /* read in every ones (except the last) and ship off */ 3399 for (i=1; i<size-1; i++) { 3400 nz = procsnz[i]; 3401 ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr); 3402 ierr = MPI_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr); 3403 } 3404 /* read in the stuff for the last proc */ 3405 if (size != 1) { 3406 nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */ 3407 ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr); 3408 for (i=0; i<extra_rows; i++) cols[nz+i] = M+i; 3409 ierr = MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);CHKERRQ(ierr); 3410 } 3411 ierr = PetscFree(cols);CHKERRQ(ierr); 3412 } else { 3413 /* determine buffer space needed for message */ 3414 nz = 0; 3415 for (i=0; i<m; i++) { 3416 nz += locrowlens[i]; 3417 } 3418 ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);CHKERRQ(ierr); 3419 mycols = ibuf; 3420 /* receive message of column indices*/ 3421 ierr = MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 3422 ierr = MPI_Get_count(&status,MPIU_INT,&maxnz);CHKERRQ(ierr); 3423 if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file"); 3424 } 3425 3426 /* loop over local rows, determining number of off diagonal entries */ 3427 ierr = PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);CHKERRQ(ierr); 3428 ierr = PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);CHKERRQ(ierr); 3429 ierr = PetscMemzero(mask,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3430 ierr = PetscMemzero(masked1,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3431 ierr = PetscMemzero(masked2,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3432 rowcount = 0; nzcount = 0; 3433 for (i=0; i<mbs; i++) { 3434 dcount = 0; 3435 odcount = 0; 3436 for (j=0; j<bs; j++) { 3437 kmax = locrowlens[rowcount]; 3438 for (k=0; k<kmax; k++) { 3439 tmp = mycols[nzcount++]/bs; 3440 if (!mask[tmp]) { 3441 mask[tmp] = 1; 3442 if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; 3443 else masked1[dcount++] = tmp; 3444 } 3445 } 3446 rowcount++; 3447 } 3448 3449 dlens[i] = dcount; 3450 odlens[i] = odcount; 3451 3452 /* zero out the mask elements we set */ 3453 for (j=0; j<dcount; j++) mask[masked1[j]] = 0; 3454 for (j=0; j<odcount; j++) mask[masked2[j]] = 0; 3455 } 3456 3457 /* create our matrix */ 3458 ierr = MatCreate(comm,&A);CHKERRQ(ierr); 3459 ierr = MatSetSizes(A,m,m,M+extra_rows,N+extra_rows);CHKERRQ(ierr); 3460 ierr = MatSetType(A,type);CHKERRQ(ierr) 3461 ierr = MatMPIBAIJSetPreallocation(A,bs,0,dlens,0,odlens);CHKERRQ(ierr); 3462 3463 if (!rank) { 3464 ierr = PetscMalloc((maxnz+1)*sizeof(PetscScalar),&buf);CHKERRQ(ierr); 3465 /* read in my part of the matrix numerical values */ 3466 nz = procsnz[0]; 3467 vals = buf; 3468 mycols = ibuf; 3469 if (size == 1) nz -= extra_rows; 3470 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3471 if (size == 1) for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; } 3472 3473 /* insert into matrix */ 3474 jj = rstart*bs; 3475 for (i=0; i<m; i++) { 3476 ierr = MatSetValues_MPIBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr); 3477 mycols += locrowlens[i]; 3478 vals += locrowlens[i]; 3479 jj++; 3480 } 3481 /* read in other processors (except the last one) and ship out */ 3482 for (i=1; i<size-1; i++) { 3483 nz = procsnz[i]; 3484 vals = buf; 3485 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3486 ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);CHKERRQ(ierr); 3487 } 3488 /* the last proc */ 3489 if (size != 1){ 3490 nz = procsnz[i] - extra_rows; 3491 vals = buf; 3492 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3493 for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0; 3494 ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)A)->tag,comm);CHKERRQ(ierr); 3495 } 3496 ierr = PetscFree(procsnz);CHKERRQ(ierr); 3497 } else { 3498 /* receive numeric values */ 3499 ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&buf);CHKERRQ(ierr); 3500 3501 /* receive message of values*/ 3502 vals = buf; 3503 mycols = ibuf; 3504 ierr = MPI_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);CHKERRQ(ierr); 3505 ierr = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr); 3506 if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file"); 3507 3508 /* insert into matrix */ 3509 jj = rstart*bs; 3510 for (i=0; i<m; i++) { 3511 ierr = MatSetValues_MPIBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr); 3512 mycols += locrowlens[i]; 3513 vals += locrowlens[i]; 3514 jj++; 3515 } 3516 } 3517 ierr = PetscFree(locrowlens);CHKERRQ(ierr); 3518 ierr = PetscFree(buf);CHKERRQ(ierr); 3519 ierr = PetscFree(ibuf);CHKERRQ(ierr); 3520 ierr = PetscFree2(rowners,browners);CHKERRQ(ierr); 3521 ierr = PetscFree2(dlens,odlens);CHKERRQ(ierr); 3522 ierr = PetscFree3(mask,masked1,masked2);CHKERRQ(ierr); 3523 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3524 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3525 3526 *newmat = A; 3527 PetscFunctionReturn(0); 3528 } 3529 3530 #undef __FUNCT__ 3531 #define __FUNCT__ "MatMPIBAIJSetHashTableFactor" 3532 /*@ 3533 MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable. 3534 3535 Input Parameters: 3536 . mat - the matrix 3537 . fact - factor 3538 3539 Collective on Mat 3540 3541 Level: advanced 3542 3543 Notes: 3544 This can also be set by the command line option: -mat_use_hash_table <fact> 3545 3546 .keywords: matrix, hashtable, factor, HT 3547 3548 .seealso: MatSetOption() 3549 @*/ 3550 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact) 3551 { 3552 PetscErrorCode ierr,(*f)(Mat,PetscReal); 3553 3554 PetscFunctionBegin; 3555 ierr = PetscObjectQueryFunction((PetscObject)mat,"MatSetHashTableFactor_C",(void (**)(void))&f);CHKERRQ(ierr); 3556 if (f) { 3557 ierr = (*f)(mat,fact);CHKERRQ(ierr); 3558 } 3559 PetscFunctionReturn(0); 3560 } 3561 3562 EXTERN_C_BEGIN 3563 #undef __FUNCT__ 3564 #define __FUNCT__ "MatSetHashTableFactor_MPIBAIJ" 3565 PetscErrorCode PETSCMAT_DLLEXPORT MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact) 3566 { 3567 Mat_MPIBAIJ *baij; 3568 3569 PetscFunctionBegin; 3570 baij = (Mat_MPIBAIJ*)mat->data; 3571 baij->ht_fact = fact; 3572 PetscFunctionReturn(0); 3573 } 3574 EXTERN_C_END 3575 3576 #undef __FUNCT__ 3577 #define __FUNCT__ "MatMPIBAIJGetSeqBAIJ" 3578 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[]) 3579 { 3580 Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data; 3581 PetscFunctionBegin; 3582 *Ad = a->A; 3583 *Ao = a->B; 3584 *colmap = a->garray; 3585 PetscFunctionReturn(0); 3586 } 3587 3588 /* 3589 Special version for direct calls from Fortran (to eliminate two function call overheads 3590 */ 3591 #if defined(PETSC_HAVE_FORTRAN_CAPS) 3592 #define matmpibaijsetvaluesblocked_ MATMPIBAIJSETVALUESBLOCKED 3593 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE) 3594 #define matmpibaijsetvaluesblocked_ matmpibaijsetvaluesblocked 3595 #endif 3596 3597 #undef __FUNCT__ 3598 #define __FUNCT__ "matmpibiajsetvaluesblocked" 3599 /*@C 3600 MatMPIBAIJSetValuesBlocked - Direct Fortran call to replace call to MatSetValuesBlocked() 3601 3602 Collective on Mat 3603 3604 Input Parameters: 3605 + mat - the matrix 3606 . min - number of input rows 3607 . im - input rows 3608 . nin - number of input columns 3609 . in - input columns 3610 . v - numerical values input 3611 - addvin - INSERT_VALUES or ADD_VALUES 3612 3613 Notes: This has a complete copy of MatSetValuesBlocked_MPIBAIJ() which is terrible code un-reuse. 3614 3615 Level: advanced 3616 3617 .seealso: MatSetValuesBlocked() 3618 @*/ 3619 PetscErrorCode matmpibaijsetvaluesblocked_(Mat *matin,PetscInt *min,const PetscInt im[],PetscInt *nin,const PetscInt in[],const MatScalar v[],InsertMode *addvin) 3620 { 3621 /* convert input arguments to C version */ 3622 Mat mat = *matin; 3623 PetscInt m = *min, n = *nin; 3624 InsertMode addv = *addvin; 3625 3626 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 3627 const MatScalar *value; 3628 MatScalar *barray=baij->barray; 3629 PetscTruth roworiented = baij->roworiented; 3630 PetscErrorCode ierr; 3631 PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs; 3632 PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval; 3633 PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2; 3634 3635 PetscFunctionBegin; 3636 /* tasks normally handled by MatSetValuesBlocked() */ 3637 if (mat->insertmode == NOT_SET_VALUES) { 3638 mat->insertmode = addv; 3639 } 3640 #if defined(PETSC_USE_DEBUG) 3641 else if (mat->insertmode != addv) { 3642 SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values"); 3643 } 3644 if (mat->factor) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 3645 #endif 3646 if (mat->assembled) { 3647 mat->was_assembled = PETSC_TRUE; 3648 mat->assembled = PETSC_FALSE; 3649 } 3650 ierr = PetscLogEventBegin(MAT_SetValues,mat,0,0,0);CHKERRQ(ierr); 3651 3652 3653 if(!barray) { 3654 ierr = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr); 3655 baij->barray = barray; 3656 } 3657 3658 if (roworiented) { 3659 stepval = (n-1)*bs; 3660 } else { 3661 stepval = (m-1)*bs; 3662 } 3663 for (i=0; i<m; i++) { 3664 if (im[i] < 0) continue; 3665 #if defined(PETSC_USE_DEBUG) 3666 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1); 3667 #endif 3668 if (im[i] >= rstart && im[i] < rend) { 3669 row = im[i] - rstart; 3670 for (j=0; j<n; j++) { 3671 /* If NumCol = 1 then a copy is not required */ 3672 if ((roworiented) && (n == 1)) { 3673 barray = (MatScalar*)v + i*bs2; 3674 } else if((!roworiented) && (m == 1)) { 3675 barray = (MatScalar*)v + j*bs2; 3676 } else { /* Here a copy is required */ 3677 if (roworiented) { 3678 value = v + i*(stepval+bs)*bs + j*bs; 3679 } else { 3680 value = v + j*(stepval+bs)*bs + i*bs; 3681 } 3682 for (ii=0; ii<bs; ii++,value+=stepval) { 3683 for (jj=0; jj<bs; jj++) { 3684 *barray++ = *value++; 3685 } 3686 } 3687 barray -=bs2; 3688 } 3689 3690 if (in[j] >= cstart && in[j] < cend){ 3691 col = in[j] - cstart; 3692 ierr = MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 3693 } 3694 else if (in[j] < 0) continue; 3695 #if defined(PETSC_USE_DEBUG) 3696 else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1);} 3697 #endif 3698 else { 3699 if (mat->was_assembled) { 3700 if (!baij->colmap) { 3701 ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 3702 } 3703 3704 #if defined(PETSC_USE_DEBUG) 3705 #if defined (PETSC_USE_CTABLE) 3706 { PetscInt data; 3707 ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr); 3708 if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap"); 3709 } 3710 #else 3711 if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap"); 3712 #endif 3713 #endif 3714 #if defined (PETSC_USE_CTABLE) 3715 ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr); 3716 col = (col - 1)/bs; 3717 #else 3718 col = (baij->colmap[in[j]] - 1)/bs; 3719 #endif 3720 if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) { 3721 ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 3722 col = in[j]; 3723 } 3724 } 3725 else col = in[j]; 3726 ierr = MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 3727 } 3728 } 3729 } else { 3730 if (!baij->donotstash) { 3731 if (roworiented) { 3732 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 3733 } else { 3734 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 3735 } 3736 } 3737 } 3738 } 3739 3740 /* task normally handled by MatSetValuesBlocked() */ 3741 ierr = PetscLogEventEnd(MAT_SetValues,mat,0,0,0);CHKERRQ(ierr); 3742 PetscFunctionReturn(0); 3743 } 3744