1 2 #include <../src/mat/impls/baij/mpi/mpibaij.h> /*I "petscmat.h" I*/ 3 #include <petscblaslapack.h> 4 5 extern PetscErrorCode MatSetUpMultiply_MPIBAIJ(Mat); 6 extern PetscErrorCode MatDisAssemble_MPIBAIJ(Mat); 7 extern PetscErrorCode MatGetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],PetscScalar []); 8 extern PetscErrorCode MatSetValues_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt [],const PetscScalar [],InsertMode); 9 extern PetscErrorCode MatSetValuesBlocked_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[],const PetscScalar[],InsertMode); 10 extern PetscErrorCode MatGetRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]); 11 extern PetscErrorCode MatRestoreRow_SeqBAIJ(Mat,PetscInt,PetscInt*,PetscInt*[],PetscScalar*[]); 12 extern PetscErrorCode MatZeroRows_SeqBAIJ(Mat,PetscInt,const PetscInt[],PetscScalar,Vec,Vec); 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatGetRowMaxAbs_MPIBAIJ" 16 PetscErrorCode MatGetRowMaxAbs_MPIBAIJ(Mat A,Vec v,PetscInt idx[]) 17 { 18 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 19 PetscErrorCode ierr; 20 PetscInt i,*idxb = 0; 21 PetscScalar *va,*vb; 22 Vec vtmp; 23 24 PetscFunctionBegin; 25 ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr); 26 ierr = VecGetArray(v,&va);CHKERRQ(ierr); 27 if (idx) { 28 for (i=0; i<A->rmap->n; i++) { 29 if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart; 30 } 31 } 32 33 ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr); 34 if (idx) {ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);} 35 ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr); 36 ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr); 37 38 for (i=0; i<A->rmap->n; i++) { 39 if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) { 40 va[i] = vb[i]; 41 if (idx) idx[i] = A->cmap->bs*a->garray[idxb[i]/A->cmap->bs] + (idxb[i] % A->cmap->bs); 42 } 43 } 44 45 ierr = VecRestoreArray(v,&va);CHKERRQ(ierr); 46 ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr); 47 ierr = PetscFree(idxb);CHKERRQ(ierr); 48 ierr = VecDestroy(&vtmp);CHKERRQ(ierr); 49 PetscFunctionReturn(0); 50 } 51 52 #undef __FUNCT__ 53 #define __FUNCT__ "MatStoreValues_MPIBAIJ" 54 PetscErrorCode MatStoreValues_MPIBAIJ(Mat mat) 55 { 56 Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)mat->data; 57 PetscErrorCode ierr; 58 59 PetscFunctionBegin; 60 ierr = MatStoreValues(aij->A);CHKERRQ(ierr); 61 ierr = MatStoreValues(aij->B);CHKERRQ(ierr); 62 PetscFunctionReturn(0); 63 } 64 65 #undef __FUNCT__ 66 #define __FUNCT__ "MatRetrieveValues_MPIBAIJ" 67 PetscErrorCode MatRetrieveValues_MPIBAIJ(Mat mat) 68 { 69 Mat_MPIBAIJ *aij = (Mat_MPIBAIJ*)mat->data; 70 PetscErrorCode ierr; 71 72 PetscFunctionBegin; 73 ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr); 74 ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr); 75 PetscFunctionReturn(0); 76 } 77 78 /* 79 Local utility routine that creates a mapping from the global column 80 number to the local number in the off-diagonal part of the local 81 storage of the matrix. This is done in a non scalable way since the 82 length of colmap equals the global matrix length. 83 */ 84 #undef __FUNCT__ 85 #define __FUNCT__ "MatCreateColmap_MPIBAIJ_Private" 86 PetscErrorCode MatCreateColmap_MPIBAIJ_Private(Mat mat) 87 { 88 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 89 Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data; 90 PetscErrorCode ierr; 91 PetscInt nbs = B->nbs,i,bs=mat->rmap->bs; 92 93 PetscFunctionBegin; 94 #if defined(PETSC_USE_CTABLE) 95 ierr = PetscTableCreate(baij->nbs,baij->Nbs+1,&baij->colmap);CHKERRQ(ierr); 96 for (i=0; i<nbs; i++) { 97 ierr = PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1,INSERT_VALUES);CHKERRQ(ierr); 98 } 99 #else 100 ierr = PetscMalloc((baij->Nbs+1)*sizeof(PetscInt),&baij->colmap);CHKERRQ(ierr); 101 ierr = PetscLogObjectMemory((PetscObject)mat,baij->Nbs*sizeof(PetscInt));CHKERRQ(ierr); 102 ierr = PetscMemzero(baij->colmap,baij->Nbs*sizeof(PetscInt));CHKERRQ(ierr); 103 for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1; 104 #endif 105 PetscFunctionReturn(0); 106 } 107 108 #define MatSetValues_SeqBAIJ_A_Private(row,col,value,addv) \ 109 { \ 110 \ 111 brow = row/bs; \ 112 rp = aj + ai[brow]; ap = aa + bs2*ai[brow]; \ 113 rmax = aimax[brow]; nrow = ailen[brow]; \ 114 bcol = col/bs; \ 115 ridx = row % bs; cidx = col % bs; \ 116 low = 0; high = nrow; \ 117 while (high-low > 3) { \ 118 t = (low+high)/2; \ 119 if (rp[t] > bcol) high = t; \ 120 else low = t; \ 121 } \ 122 for (_i=low; _i<high; _i++) { \ 123 if (rp[_i] > bcol) break; \ 124 if (rp[_i] == bcol) { \ 125 bap = ap + bs2*_i + bs*cidx + ridx; \ 126 if (addv == ADD_VALUES) *bap += value; \ 127 else *bap = value; \ 128 goto a_noinsert; \ 129 } \ 130 } \ 131 if (a->nonew == 1) goto a_noinsert; \ 132 if (a->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \ 133 MatSeqXAIJReallocateAIJ(A,a->mbs,bs2,nrow,brow,bcol,rmax,aa,ai,aj,rp,ap,aimax,a->nonew,MatScalar); \ 134 N = nrow++ - 1; \ 135 /* shift up all the later entries in this row */ \ 136 for (ii=N; ii>=_i; ii--) { \ 137 rp[ii+1] = rp[ii]; \ 138 ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \ 139 } \ 140 if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); } \ 141 rp[_i] = bcol; \ 142 ap[bs2*_i + bs*cidx + ridx] = value; \ 143 a_noinsert:; \ 144 ailen[brow] = nrow; \ 145 } 146 147 #define MatSetValues_SeqBAIJ_B_Private(row,col,value,addv) \ 148 { \ 149 brow = row/bs; \ 150 rp = bj + bi[brow]; ap = ba + bs2*bi[brow]; \ 151 rmax = bimax[brow]; nrow = bilen[brow]; \ 152 bcol = col/bs; \ 153 ridx = row % bs; cidx = col % bs; \ 154 low = 0; high = nrow; \ 155 while (high-low > 3) { \ 156 t = (low+high)/2; \ 157 if (rp[t] > bcol) high = t; \ 158 else low = t; \ 159 } \ 160 for (_i=low; _i<high; _i++) { \ 161 if (rp[_i] > bcol) break; \ 162 if (rp[_i] == bcol) { \ 163 bap = ap + bs2*_i + bs*cidx + ridx; \ 164 if (addv == ADD_VALUES) *bap += value; \ 165 else *bap = value; \ 166 goto b_noinsert; \ 167 } \ 168 } \ 169 if (b->nonew == 1) goto b_noinsert; \ 170 if (b->nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \ 171 MatSeqXAIJReallocateAIJ(B,b->mbs,bs2,nrow,brow,bcol,rmax,ba,bi,bj,rp,ap,bimax,b->nonew,MatScalar); \ 172 N = nrow++ - 1; \ 173 /* shift up all the later entries in this row */ \ 174 for (ii=N; ii>=_i; ii--) { \ 175 rp[ii+1] = rp[ii]; \ 176 ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \ 177 } \ 178 if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);} \ 179 rp[_i] = bcol; \ 180 ap[bs2*_i + bs*cidx + ridx] = value; \ 181 b_noinsert:; \ 182 bilen[brow] = nrow; \ 183 } 184 185 #undef __FUNCT__ 186 #define __FUNCT__ "MatSetValues_MPIBAIJ" 187 PetscErrorCode MatSetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 188 { 189 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 190 MatScalar value; 191 PetscBool roworiented = baij->roworiented; 192 PetscErrorCode ierr; 193 PetscInt i,j,row,col; 194 PetscInt rstart_orig=mat->rmap->rstart; 195 PetscInt rend_orig =mat->rmap->rend,cstart_orig=mat->cmap->rstart; 196 PetscInt cend_orig =mat->cmap->rend,bs=mat->rmap->bs; 197 198 /* Some Variables required in the macro */ 199 Mat A = baij->A; 200 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data; 201 PetscInt *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j; 202 MatScalar *aa =a->a; 203 204 Mat B = baij->B; 205 Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data; 206 PetscInt *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j; 207 MatScalar *ba =b->a; 208 209 PetscInt *rp,ii,nrow,_i,rmax,N,brow,bcol; 210 PetscInt low,high,t,ridx,cidx,bs2=a->bs2; 211 MatScalar *ap,*bap; 212 213 PetscFunctionBegin; 214 if (v) PetscValidScalarPointer(v,6); 215 for (i=0; i<m; i++) { 216 if (im[i] < 0) continue; 217 #if defined(PETSC_USE_DEBUG) 218 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 219 #endif 220 if (im[i] >= rstart_orig && im[i] < rend_orig) { 221 row = im[i] - rstart_orig; 222 for (j=0; j<n; j++) { 223 if (in[j] >= cstart_orig && in[j] < cend_orig) { 224 col = in[j] - cstart_orig; 225 if (roworiented) value = v[i*n+j]; 226 else value = v[i+j*m]; 227 MatSetValues_SeqBAIJ_A_Private(row,col,value,addv); 228 /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */ 229 } else if (in[j] < 0) continue; 230 #if defined(PETSC_USE_DEBUG) 231 else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1); 232 #endif 233 else { 234 if (mat->was_assembled) { 235 if (!baij->colmap) { 236 ierr = MatCreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 237 } 238 #if defined(PETSC_USE_CTABLE) 239 ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr); 240 col = col - 1; 241 #else 242 col = baij->colmap[in[j]/bs] - 1; 243 #endif 244 if (col < 0 && !((Mat_SeqBAIJ*)(baij->B->data))->nonew) { 245 ierr = MatDisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 246 col = in[j]; 247 /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */ 248 B = baij->B; 249 b = (Mat_SeqBAIJ*)(B)->data; 250 bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j; 251 ba =b->a; 252 } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", im[i], in[j]); 253 else col += in[j]%bs; 254 } else col = in[j]; 255 if (roworiented) value = v[i*n+j]; 256 else value = v[i+j*m]; 257 MatSetValues_SeqBAIJ_B_Private(row,col,value,addv); 258 /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */ 259 } 260 } 261 } else { 262 if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]); 263 if (!baij->donotstash) { 264 mat->assembled = PETSC_FALSE; 265 if (roworiented) { 266 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);CHKERRQ(ierr); 267 } else { 268 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);CHKERRQ(ierr); 269 } 270 } 271 } 272 } 273 PetscFunctionReturn(0); 274 } 275 276 #undef __FUNCT__ 277 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ" 278 PetscErrorCode MatSetValuesBlocked_MPIBAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 279 { 280 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 281 const PetscScalar *value; 282 MatScalar *barray = baij->barray; 283 PetscBool roworiented = baij->roworiented; 284 PetscErrorCode ierr; 285 PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs; 286 PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval; 287 PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2; 288 289 PetscFunctionBegin; 290 if (!barray) { 291 ierr = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr); 292 baij->barray = barray; 293 } 294 295 if (roworiented) stepval = (n-1)*bs; 296 else stepval = (m-1)*bs; 297 298 for (i=0; i<m; i++) { 299 if (im[i] < 0) continue; 300 #if defined(PETSC_USE_DEBUG) 301 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1); 302 #endif 303 if (im[i] >= rstart && im[i] < rend) { 304 row = im[i] - rstart; 305 for (j=0; j<n; j++) { 306 /* If NumCol = 1 then a copy is not required */ 307 if ((roworiented) && (n == 1)) { 308 barray = (MatScalar*)v + i*bs2; 309 } else if ((!roworiented) && (m == 1)) { 310 barray = (MatScalar*)v + j*bs2; 311 } else { /* Here a copy is required */ 312 if (roworiented) { 313 value = v + (i*(stepval+bs) + j)*bs; 314 } else { 315 value = v + (j*(stepval+bs) + i)*bs; 316 } 317 for (ii=0; ii<bs; ii++,value+=bs+stepval) { 318 for (jj=0; jj<bs; jj++) barray[jj] = value[jj]; 319 barray += bs; 320 } 321 barray -= bs2; 322 } 323 324 if (in[j] >= cstart && in[j] < cend) { 325 col = in[j] - cstart; 326 ierr = MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 327 } else if (in[j] < 0) continue; 328 #if defined(PETSC_USE_DEBUG) 329 else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1); 330 #endif 331 else { 332 if (mat->was_assembled) { 333 if (!baij->colmap) { 334 ierr = MatCreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 335 } 336 337 #if defined(PETSC_USE_DEBUG) 338 #if defined(PETSC_USE_CTABLE) 339 { PetscInt data; 340 ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr); 341 if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap"); 342 } 343 #else 344 if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap"); 345 #endif 346 #endif 347 #if defined(PETSC_USE_CTABLE) 348 ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr); 349 col = (col - 1)/bs; 350 #else 351 col = (baij->colmap[in[j]] - 1)/bs; 352 #endif 353 if (col < 0 && !((Mat_SeqBAIJ*)(baij->B->data))->nonew) { 354 ierr = MatDisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 355 col = in[j]; 356 } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", bs*im[i], bs*in[j]); 357 } else col = in[j]; 358 ierr = MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 359 } 360 } 361 } else { 362 if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]); 363 if (!baij->donotstash) { 364 if (roworiented) { 365 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 366 } else { 367 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 368 } 369 } 370 } 371 } 372 PetscFunctionReturn(0); 373 } 374 375 #define HASH_KEY 0.6180339887 376 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(PetscInt)((size)*(tmp-(PetscInt)tmp))) 377 /* #define HASH(size,key) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */ 378 /* #define HASH(size,key,tmp) ((PetscInt)((size)*fmod(((key)*HASH_KEY),1))) */ 379 #undef __FUNCT__ 380 #define __FUNCT__ "MatSetValues_MPIBAIJ_HT" 381 PetscErrorCode MatSetValues_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 382 { 383 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 384 PetscBool roworiented = baij->roworiented; 385 PetscErrorCode ierr; 386 PetscInt i,j,row,col; 387 PetscInt rstart_orig=mat->rmap->rstart; 388 PetscInt rend_orig =mat->rmap->rend,Nbs=baij->Nbs; 389 PetscInt h1,key,size=baij->ht_size,bs=mat->rmap->bs,*HT=baij->ht,idx; 390 PetscReal tmp; 391 MatScalar **HD = baij->hd,value; 392 #if defined(PETSC_USE_DEBUG) 393 PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct; 394 #endif 395 396 PetscFunctionBegin; 397 if (v) PetscValidScalarPointer(v,6); 398 for (i=0; i<m; i++) { 399 #if defined(PETSC_USE_DEBUG) 400 if (im[i] < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row"); 401 if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1); 402 #endif 403 row = im[i]; 404 if (row >= rstart_orig && row < rend_orig) { 405 for (j=0; j<n; j++) { 406 col = in[j]; 407 if (roworiented) value = v[i*n+j]; 408 else value = v[i+j*m]; 409 /* Look up PetscInto the Hash Table */ 410 key = (row/bs)*Nbs+(col/bs)+1; 411 h1 = HASH(size,key,tmp); 412 413 414 idx = h1; 415 #if defined(PETSC_USE_DEBUG) 416 insert_ct++; 417 total_ct++; 418 if (HT[idx] != key) { 419 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++) ; 420 if (idx == size) { 421 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++) ; 422 if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 423 } 424 } 425 #else 426 if (HT[idx] != key) { 427 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++) ; 428 if (idx == size) { 429 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++) ; 430 if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 431 } 432 } 433 #endif 434 /* A HASH table entry is found, so insert the values at the correct address */ 435 if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value; 436 else *(HD[idx]+ (col % bs)*bs + (row % bs)) = value; 437 } 438 } else if (!baij->donotstash) { 439 if (roworiented) { 440 ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,PETSC_FALSE);CHKERRQ(ierr); 441 } else { 442 ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,PETSC_FALSE);CHKERRQ(ierr); 443 } 444 } 445 } 446 #if defined(PETSC_USE_DEBUG) 447 baij->ht_total_ct = total_ct; 448 baij->ht_insert_ct = insert_ct; 449 #endif 450 PetscFunctionReturn(0); 451 } 452 453 #undef __FUNCT__ 454 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ_HT" 455 PetscErrorCode MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv) 456 { 457 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 458 PetscBool roworiented = baij->roworiented; 459 PetscErrorCode ierr; 460 PetscInt i,j,ii,jj,row,col; 461 PetscInt rstart=baij->rstartbs; 462 PetscInt rend =mat->rmap->rend,stepval,bs=mat->rmap->bs,bs2=baij->bs2,nbs2=n*bs2; 463 PetscInt h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs; 464 PetscReal tmp; 465 MatScalar **HD = baij->hd,*baij_a; 466 const PetscScalar *v_t,*value; 467 #if defined(PETSC_USE_DEBUG) 468 PetscInt total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct; 469 #endif 470 471 PetscFunctionBegin; 472 if (roworiented) stepval = (n-1)*bs; 473 else stepval = (m-1)*bs; 474 475 for (i=0; i<m; i++) { 476 #if defined(PETSC_USE_DEBUG) 477 if (im[i] < 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",im[i]); 478 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],baij->Mbs-1); 479 #endif 480 row = im[i]; 481 v_t = v + i*nbs2; 482 if (row >= rstart && row < rend) { 483 for (j=0; j<n; j++) { 484 col = in[j]; 485 486 /* Look up into the Hash Table */ 487 key = row*Nbs+col+1; 488 h1 = HASH(size,key,tmp); 489 490 idx = h1; 491 #if defined(PETSC_USE_DEBUG) 492 total_ct++; 493 insert_ct++; 494 if (HT[idx] != key) { 495 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++) ; 496 if (idx == size) { 497 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++) ; 498 if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 499 } 500 } 501 #else 502 if (HT[idx] != key) { 503 for (idx=h1; (idx<size) && (HT[idx]!=key); idx++) ; 504 if (idx == size) { 505 for (idx=0; (idx<h1) && (HT[idx]!=key); idx++) ; 506 if (idx == h1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"(%D,%D) has no entry in the hash table", row, col); 507 } 508 } 509 #endif 510 baij_a = HD[idx]; 511 if (roworiented) { 512 /*value = v + i*(stepval+bs)*bs + j*bs;*/ 513 /* value = v + (i*(stepval+bs)+j)*bs; */ 514 value = v_t; 515 v_t += bs; 516 if (addv == ADD_VALUES) { 517 for (ii=0; ii<bs; ii++,value+=stepval) { 518 for (jj=ii; jj<bs2; jj+=bs) { 519 baij_a[jj] += *value++; 520 } 521 } 522 } else { 523 for (ii=0; ii<bs; ii++,value+=stepval) { 524 for (jj=ii; jj<bs2; jj+=bs) { 525 baij_a[jj] = *value++; 526 } 527 } 528 } 529 } else { 530 value = v + j*(stepval+bs)*bs + i*bs; 531 if (addv == ADD_VALUES) { 532 for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) { 533 for (jj=0; jj<bs; jj++) { 534 baij_a[jj] += *value++; 535 } 536 } 537 } else { 538 for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) { 539 for (jj=0; jj<bs; jj++) { 540 baij_a[jj] = *value++; 541 } 542 } 543 } 544 } 545 } 546 } else { 547 if (!baij->donotstash) { 548 if (roworiented) { 549 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 550 } else { 551 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 552 } 553 } 554 } 555 } 556 #if defined(PETSC_USE_DEBUG) 557 baij->ht_total_ct = total_ct; 558 baij->ht_insert_ct = insert_ct; 559 #endif 560 PetscFunctionReturn(0); 561 } 562 563 #undef __FUNCT__ 564 #define __FUNCT__ "MatGetValues_MPIBAIJ" 565 PetscErrorCode MatGetValues_MPIBAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[]) 566 { 567 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 568 PetscErrorCode ierr; 569 PetscInt bs = mat->rmap->bs,i,j,bsrstart = mat->rmap->rstart,bsrend = mat->rmap->rend; 570 PetscInt bscstart = mat->cmap->rstart,bscend = mat->cmap->rend,row,col,data; 571 572 PetscFunctionBegin; 573 for (i=0; i<m; i++) { 574 if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/ 575 if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1); 576 if (idxm[i] >= bsrstart && idxm[i] < bsrend) { 577 row = idxm[i] - bsrstart; 578 for (j=0; j<n; j++) { 579 if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */ 580 if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1); 581 if (idxn[j] >= bscstart && idxn[j] < bscend) { 582 col = idxn[j] - bscstart; 583 ierr = MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 584 } else { 585 if (!baij->colmap) { 586 ierr = MatCreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 587 } 588 #if defined(PETSC_USE_CTABLE) 589 ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr); 590 data--; 591 #else 592 data = baij->colmap[idxn[j]/bs]-1; 593 #endif 594 if ((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0; 595 else { 596 col = data + idxn[j]%bs; 597 ierr = MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr); 598 } 599 } 600 } 601 } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported"); 602 } 603 PetscFunctionReturn(0); 604 } 605 606 #undef __FUNCT__ 607 #define __FUNCT__ "MatNorm_MPIBAIJ" 608 PetscErrorCode MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm) 609 { 610 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 611 Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data; 612 PetscErrorCode ierr; 613 PetscInt i,j,bs2=baij->bs2,bs=baij->A->rmap->bs,nz,row,col; 614 PetscReal sum = 0.0; 615 MatScalar *v; 616 617 PetscFunctionBegin; 618 if (baij->size == 1) { 619 ierr = MatNorm(baij->A,type,nrm);CHKERRQ(ierr); 620 } else { 621 if (type == NORM_FROBENIUS) { 622 v = amat->a; 623 nz = amat->nz*bs2; 624 for (i=0; i<nz; i++) { 625 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 626 } 627 v = bmat->a; 628 nz = bmat->nz*bs2; 629 for (i=0; i<nz; i++) { 630 sum += PetscRealPart(PetscConj(*v)*(*v)); v++; 631 } 632 ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 633 *nrm = PetscSqrtReal(*nrm); 634 } else if (type == NORM_1) { /* max column sum */ 635 PetscReal *tmp,*tmp2; 636 PetscInt *jj,*garray=baij->garray,cstart=baij->rstartbs; 637 ierr = PetscMalloc2(mat->cmap->N,PetscReal,&tmp,mat->cmap->N,PetscReal,&tmp2);CHKERRQ(ierr); 638 ierr = PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));CHKERRQ(ierr); 639 v = amat->a; jj = amat->j; 640 for (i=0; i<amat->nz; i++) { 641 for (j=0; j<bs; j++) { 642 col = bs*(cstart + *jj) + j; /* column index */ 643 for (row=0; row<bs; row++) { 644 tmp[col] += PetscAbsScalar(*v); v++; 645 } 646 } 647 jj++; 648 } 649 v = bmat->a; jj = bmat->j; 650 for (i=0; i<bmat->nz; i++) { 651 for (j=0; j<bs; j++) { 652 col = bs*garray[*jj] + j; 653 for (row=0; row<bs; row++) { 654 tmp[col] += PetscAbsScalar(*v); v++; 655 } 656 } 657 jj++; 658 } 659 ierr = MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 660 *nrm = 0.0; 661 for (j=0; j<mat->cmap->N; j++) { 662 if (tmp2[j] > *nrm) *nrm = tmp2[j]; 663 } 664 ierr = PetscFree2(tmp,tmp2);CHKERRQ(ierr); 665 } else if (type == NORM_INFINITY) { /* max row sum */ 666 PetscReal *sums; 667 ierr = PetscMalloc(bs*sizeof(PetscReal),&sums);CHKERRQ(ierr); 668 sum = 0.0; 669 for (j=0; j<amat->mbs; j++) { 670 for (row=0; row<bs; row++) sums[row] = 0.0; 671 v = amat->a + bs2*amat->i[j]; 672 nz = amat->i[j+1]-amat->i[j]; 673 for (i=0; i<nz; i++) { 674 for (col=0; col<bs; col++) { 675 for (row=0; row<bs; row++) { 676 sums[row] += PetscAbsScalar(*v); v++; 677 } 678 } 679 } 680 v = bmat->a + bs2*bmat->i[j]; 681 nz = bmat->i[j+1]-bmat->i[j]; 682 for (i=0; i<nz; i++) { 683 for (col=0; col<bs; col++) { 684 for (row=0; row<bs; row++) { 685 sums[row] += PetscAbsScalar(*v); v++; 686 } 687 } 688 } 689 for (row=0; row<bs; row++) { 690 if (sums[row] > sum) sum = sums[row]; 691 } 692 } 693 ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 694 ierr = PetscFree(sums);CHKERRQ(ierr); 695 } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for this norm yet"); 696 } 697 PetscFunctionReturn(0); 698 } 699 700 /* 701 Creates the hash table, and sets the table 702 This table is created only once. 703 If new entried need to be added to the matrix 704 then the hash table has to be destroyed and 705 recreated. 706 */ 707 #undef __FUNCT__ 708 #define __FUNCT__ "MatCreateHashTable_MPIBAIJ_Private" 709 PetscErrorCode MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor) 710 { 711 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 712 Mat A = baij->A,B=baij->B; 713 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b=(Mat_SeqBAIJ*)B->data; 714 PetscInt i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j; 715 PetscErrorCode ierr; 716 PetscInt ht_size,bs2=baij->bs2,rstart=baij->rstartbs; 717 PetscInt cstart=baij->cstartbs,*garray=baij->garray,row,col,Nbs=baij->Nbs; 718 PetscInt *HT,key; 719 MatScalar **HD; 720 PetscReal tmp; 721 #if defined(PETSC_USE_INFO) 722 PetscInt ct=0,max=0; 723 #endif 724 725 PetscFunctionBegin; 726 if (baij->ht) PetscFunctionReturn(0); 727 728 baij->ht_size = (PetscInt)(factor*nz); 729 ht_size = baij->ht_size; 730 731 /* Allocate Memory for Hash Table */ 732 ierr = PetscMalloc2(ht_size,MatScalar*,&baij->hd,ht_size,PetscInt,&baij->ht);CHKERRQ(ierr); 733 ierr = PetscMemzero(baij->hd,ht_size*sizeof(MatScalar*));CHKERRQ(ierr); 734 ierr = PetscMemzero(baij->ht,ht_size*sizeof(PetscInt));CHKERRQ(ierr); 735 HD = baij->hd; 736 HT = baij->ht; 737 738 /* Loop Over A */ 739 for (i=0; i<a->mbs; i++) { 740 for (j=ai[i]; j<ai[i+1]; j++) { 741 row = i+rstart; 742 col = aj[j]+cstart; 743 744 key = row*Nbs + col + 1; 745 h1 = HASH(ht_size,key,tmp); 746 for (k=0; k<ht_size; k++) { 747 if (!HT[(h1+k)%ht_size]) { 748 HT[(h1+k)%ht_size] = key; 749 HD[(h1+k)%ht_size] = a->a + j*bs2; 750 break; 751 #if defined(PETSC_USE_INFO) 752 } else { 753 ct++; 754 #endif 755 } 756 } 757 #if defined(PETSC_USE_INFO) 758 if (k> max) max = k; 759 #endif 760 } 761 } 762 /* Loop Over B */ 763 for (i=0; i<b->mbs; i++) { 764 for (j=bi[i]; j<bi[i+1]; j++) { 765 row = i+rstart; 766 col = garray[bj[j]]; 767 key = row*Nbs + col + 1; 768 h1 = HASH(ht_size,key,tmp); 769 for (k=0; k<ht_size; k++) { 770 if (!HT[(h1+k)%ht_size]) { 771 HT[(h1+k)%ht_size] = key; 772 HD[(h1+k)%ht_size] = b->a + j*bs2; 773 break; 774 #if defined(PETSC_USE_INFO) 775 } else { 776 ct++; 777 #endif 778 } 779 } 780 #if defined(PETSC_USE_INFO) 781 if (k> max) max = k; 782 #endif 783 } 784 } 785 786 /* Print Summary */ 787 #if defined(PETSC_USE_INFO) 788 for (i=0,j=0; i<ht_size; i++) { 789 if (HT[i]) j++; 790 } 791 ierr = PetscInfo2(mat,"Average Search = %5.2f,max search = %D\n",(!j)? 0.0:((PetscReal)(ct+j))/j,max);CHKERRQ(ierr); 792 #endif 793 PetscFunctionReturn(0); 794 } 795 796 #undef __FUNCT__ 797 #define __FUNCT__ "MatAssemblyBegin_MPIBAIJ" 798 PetscErrorCode MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode) 799 { 800 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 801 PetscErrorCode ierr; 802 PetscInt nstash,reallocs; 803 InsertMode addv; 804 805 PetscFunctionBegin; 806 if (baij->donotstash || mat->nooffprocentries) PetscFunctionReturn(0); 807 808 /* make sure all processors are either in INSERTMODE or ADDMODE */ 809 ierr = MPI_Allreduce((PetscEnum*)&mat->insertmode,(PetscEnum*)&addv,1,MPIU_ENUM,MPI_BOR,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 810 if (addv == (ADD_VALUES|INSERT_VALUES)) SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added"); 811 mat->insertmode = addv; /* in case this processor had no cache */ 812 813 ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr); 814 ierr = MatStashScatterBegin_Private(mat,&mat->bstash,baij->rangebs);CHKERRQ(ierr); 815 ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr); 816 ierr = PetscInfo2(mat,"Stash has %D entries,uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 817 ierr = MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);CHKERRQ(ierr); 818 ierr = PetscInfo2(mat,"Block-Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr); 819 PetscFunctionReturn(0); 820 } 821 822 #undef __FUNCT__ 823 #define __FUNCT__ "MatAssemblyEnd_MPIBAIJ" 824 PetscErrorCode MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode) 825 { 826 Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data; 827 Mat_SeqBAIJ *a =(Mat_SeqBAIJ*)baij->A->data; 828 PetscErrorCode ierr; 829 PetscInt i,j,rstart,ncols,flg,bs2=baij->bs2; 830 PetscInt *row,*col; 831 PetscBool r1,r2,r3,other_disassembled; 832 MatScalar *val; 833 InsertMode addv = mat->insertmode; 834 PetscMPIInt n; 835 836 PetscFunctionBegin; 837 /* do not use 'b=(Mat_SeqBAIJ*)baij->B->data' as B can be reset in disassembly */ 838 if (!baij->donotstash && !mat->nooffprocentries) { 839 while (1) { 840 ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 841 if (!flg) break; 842 843 for (i=0; i<n;) { 844 /* Now identify the consecutive vals belonging to the same row */ 845 for (j=i,rstart=row[j]; j<n; j++) { 846 if (row[j] != rstart) break; 847 } 848 if (j < n) ncols = j-i; 849 else ncols = n-i; 850 /* Now assemble all these values with a single function call */ 851 ierr = MatSetValues_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr); 852 i = j; 853 } 854 } 855 ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr); 856 /* Now process the block-stash. Since the values are stashed column-oriented, 857 set the roworiented flag to column oriented, and after MatSetValues() 858 restore the original flags */ 859 r1 = baij->roworiented; 860 r2 = a->roworiented; 861 r3 = ((Mat_SeqBAIJ*)baij->B->data)->roworiented; 862 863 baij->roworiented = PETSC_FALSE; 864 a->roworiented = PETSC_FALSE; 865 866 (((Mat_SeqBAIJ*)baij->B->data))->roworiented = PETSC_FALSE; /* b->roworiented */ 867 while (1) { 868 ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr); 869 if (!flg) break; 870 871 for (i=0; i<n;) { 872 /* Now identify the consecutive vals belonging to the same row */ 873 for (j=i,rstart=row[j]; j<n; j++) { 874 if (row[j] != rstart) break; 875 } 876 if (j < n) ncols = j-i; 877 else ncols = n-i; 878 ierr = MatSetValuesBlocked_MPIBAIJ(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr); 879 i = j; 880 } 881 } 882 ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr); 883 884 baij->roworiented = r1; 885 a->roworiented = r2; 886 887 ((Mat_SeqBAIJ*)baij->B->data)->roworiented = r3; /* b->roworiented */ 888 } 889 890 ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr); 891 ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr); 892 893 /* determine if any processor has disassembled, if so we must 894 also disassemble ourselfs, in order that we may reassemble. */ 895 /* 896 if nonzero structure of submatrix B cannot change then we know that 897 no processor disassembled thus we can skip this stuff 898 */ 899 if (!((Mat_SeqBAIJ*)baij->B->data)->nonew) { 900 ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 901 if (mat->was_assembled && !other_disassembled) { 902 ierr = MatDisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 903 } 904 } 905 906 if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) { 907 ierr = MatSetUpMultiply_MPIBAIJ(mat);CHKERRQ(ierr); 908 } 909 ierr = MatSetOption(baij->B,MAT_CHECK_COMPRESSED_ROW,PETSC_FALSE);CHKERRQ(ierr); 910 ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr); 911 ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr); 912 913 #if defined(PETSC_USE_INFO) 914 if (baij->ht && mode== MAT_FINAL_ASSEMBLY) { 915 ierr = PetscInfo1(mat,"Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);CHKERRQ(ierr); 916 917 baij->ht_total_ct = 0; 918 baij->ht_insert_ct = 0; 919 } 920 #endif 921 if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) { 922 ierr = MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr); 923 924 mat->ops->setvalues = MatSetValues_MPIBAIJ_HT; 925 mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT; 926 } 927 928 ierr = PetscFree2(baij->rowvalues,baij->rowindices);CHKERRQ(ierr); 929 930 baij->rowvalues = 0; 931 PetscFunctionReturn(0); 932 } 933 934 #include <petscdraw.h> 935 #undef __FUNCT__ 936 #define __FUNCT__ "MatView_MPIBAIJ_ASCIIorDraworSocket" 937 static PetscErrorCode MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer) 938 { 939 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 940 PetscErrorCode ierr; 941 PetscMPIInt size = baij->size,rank = baij->rank; 942 PetscInt bs = mat->rmap->bs; 943 PetscBool iascii,isdraw; 944 PetscViewer sviewer; 945 PetscViewerFormat format; 946 947 PetscFunctionBegin; 948 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 949 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 950 if (iascii) { 951 ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr); 952 if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) { 953 MatInfo info; 954 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr); 955 ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr); 956 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_TRUE);CHKERRQ(ierr); 957 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D bs %D mem %D\n", 958 rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,mat->rmap->bs,(PetscInt)info.memory);CHKERRQ(ierr); 959 ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr); 960 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 961 ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr); 962 ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr); 963 ierr = PetscViewerFlush(viewer);CHKERRQ(ierr); 964 ierr = PetscViewerASCIISynchronizedAllow(viewer,PETSC_FALSE);CHKERRQ(ierr); 965 ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr); 966 ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr); 967 PetscFunctionReturn(0); 968 } else if (format == PETSC_VIEWER_ASCII_INFO) { 969 ierr = PetscViewerASCIIPrintf(viewer," block size is %D\n",bs);CHKERRQ(ierr); 970 PetscFunctionReturn(0); 971 } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) { 972 PetscFunctionReturn(0); 973 } 974 } 975 976 if (isdraw) { 977 PetscDraw draw; 978 PetscBool isnull; 979 ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr); 980 ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0); 981 } 982 983 if (size == 1) { 984 ierr = PetscObjectSetName((PetscObject)baij->A,((PetscObject)mat)->name);CHKERRQ(ierr); 985 ierr = MatView(baij->A,viewer);CHKERRQ(ierr); 986 } else { 987 /* assemble the entire matrix onto first processor. */ 988 Mat A; 989 Mat_SeqBAIJ *Aloc; 990 PetscInt M = mat->rmap->N,N = mat->cmap->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs; 991 MatScalar *a; 992 993 /* Here we are creating a temporary matrix, so will assume MPIBAIJ is acceptable */ 994 /* Perhaps this should be the type of mat? */ 995 ierr = MatCreate(PetscObjectComm((PetscObject)mat),&A);CHKERRQ(ierr); 996 if (!rank) { 997 ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr); 998 } else { 999 ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr); 1000 } 1001 ierr = MatSetType(A,MATMPIBAIJ);CHKERRQ(ierr); 1002 ierr = MatMPIBAIJSetPreallocation(A,mat->rmap->bs,0,NULL,0,NULL);CHKERRQ(ierr); 1003 ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr); 1004 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)A);CHKERRQ(ierr); 1005 1006 /* copy over the A part */ 1007 Aloc = (Mat_SeqBAIJ*)baij->A->data; 1008 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1009 ierr = PetscMalloc(bs*sizeof(PetscInt),&rvals);CHKERRQ(ierr); 1010 1011 for (i=0; i<mbs; i++) { 1012 rvals[0] = bs*(baij->rstartbs + i); 1013 for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1; 1014 for (j=ai[i]; j<ai[i+1]; j++) { 1015 col = (baij->cstartbs+aj[j])*bs; 1016 for (k=0; k<bs; k++) { 1017 ierr = MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr); 1018 col++; a += bs; 1019 } 1020 } 1021 } 1022 /* copy over the B part */ 1023 Aloc = (Mat_SeqBAIJ*)baij->B->data; 1024 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1025 for (i=0; i<mbs; i++) { 1026 rvals[0] = bs*(baij->rstartbs + i); 1027 for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1; 1028 for (j=ai[i]; j<ai[i+1]; j++) { 1029 col = baij->garray[aj[j]]*bs; 1030 for (k=0; k<bs; k++) { 1031 ierr = MatSetValues_MPIBAIJ(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr); 1032 col++; a += bs; 1033 } 1034 } 1035 } 1036 ierr = PetscFree(rvals);CHKERRQ(ierr); 1037 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1038 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1039 /* 1040 Everyone has to call to draw the matrix since the graphics waits are 1041 synchronized across all processors that share the PetscDraw object 1042 */ 1043 ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr); 1044 if (!rank) { 1045 ierr = PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr); 1046 /* Set the type name to MATMPIBAIJ so that the correct type can be printed out by PetscObjectPrintClassNamePrefixType() in MatView_SeqBAIJ_ASCII()*/ 1047 PetscStrcpy(((PetscObject)((Mat_MPIBAIJ*)(A->data))->A)->type_name,MATMPIBAIJ); 1048 ierr = MatView(((Mat_MPIBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr); 1049 } 1050 ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr); 1051 ierr = MatDestroy(&A);CHKERRQ(ierr); 1052 } 1053 PetscFunctionReturn(0); 1054 } 1055 1056 #undef __FUNCT__ 1057 #define __FUNCT__ "MatView_MPIBAIJ_Binary" 1058 static PetscErrorCode MatView_MPIBAIJ_Binary(Mat mat,PetscViewer viewer) 1059 { 1060 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)mat->data; 1061 Mat_SeqBAIJ *A = (Mat_SeqBAIJ*)a->A->data; 1062 Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)a->B->data; 1063 PetscErrorCode ierr; 1064 PetscInt i,*row_lens,*crow_lens,bs = mat->rmap->bs,j,k,bs2=a->bs2,header[4],nz,rlen; 1065 PetscInt *range=0,nzmax,*column_indices,cnt,col,*garray = a->garray,cstart = mat->cmap->rstart/bs,len,pcnt,l,ll; 1066 int fd; 1067 PetscScalar *column_values; 1068 FILE *file; 1069 PetscMPIInt rank,size,tag = ((PetscObject)viewer)->tag; 1070 PetscInt message_count,flowcontrolcount; 1071 1072 PetscFunctionBegin; 1073 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr); 1074 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);CHKERRQ(ierr); 1075 nz = bs2*(A->nz + B->nz); 1076 rlen = mat->rmap->n; 1077 if (!rank) { 1078 header[0] = MAT_FILE_CLASSID; 1079 header[1] = mat->rmap->N; 1080 header[2] = mat->cmap->N; 1081 1082 ierr = MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1083 ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr); 1084 ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1085 /* get largest number of rows any processor has */ 1086 range = mat->rmap->range; 1087 for (i=1; i<size; i++) { 1088 rlen = PetscMax(rlen,range[i+1] - range[i]); 1089 } 1090 } else { 1091 ierr = MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1092 } 1093 1094 ierr = PetscMalloc((rlen/bs)*sizeof(PetscInt),&crow_lens);CHKERRQ(ierr); 1095 /* compute lengths of each row */ 1096 for (i=0; i<a->mbs; i++) { 1097 crow_lens[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i]; 1098 } 1099 /* store the row lengths to the file */ 1100 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1101 if (!rank) { 1102 MPI_Status status; 1103 ierr = PetscMalloc(rlen*sizeof(PetscInt),&row_lens);CHKERRQ(ierr); 1104 rlen = (range[1] - range[0])/bs; 1105 for (i=0; i<rlen; i++) { 1106 for (j=0; j<bs; j++) { 1107 row_lens[i*bs+j] = bs*crow_lens[i]; 1108 } 1109 } 1110 ierr = PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1111 for (i=1; i<size; i++) { 1112 rlen = (range[i+1] - range[i])/bs; 1113 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1114 ierr = MPI_Recv(crow_lens,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1115 for (k=0; k<rlen; k++) { 1116 for (j=0; j<bs; j++) { 1117 row_lens[k*bs+j] = bs*crow_lens[k]; 1118 } 1119 } 1120 ierr = PetscBinaryWrite(fd,row_lens,bs*rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1121 } 1122 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1123 ierr = PetscFree(row_lens);CHKERRQ(ierr); 1124 } else { 1125 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1126 ierr = MPI_Send(crow_lens,mat->rmap->n/bs,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1127 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1128 } 1129 ierr = PetscFree(crow_lens);CHKERRQ(ierr); 1130 1131 /* load up the local column indices. Include for all rows not just one for each block row since process 0 does not have the 1132 information needed to make it for each row from a block row. This does require more communication but still not more than 1133 the communication needed for the nonzero values */ 1134 nzmax = nz; /* space a largest processor needs */ 1135 ierr = MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1136 ierr = PetscMalloc(nzmax*sizeof(PetscInt),&column_indices);CHKERRQ(ierr); 1137 cnt = 0; 1138 for (i=0; i<a->mbs; i++) { 1139 pcnt = cnt; 1140 for (j=B->i[i]; j<B->i[i+1]; j++) { 1141 if ((col = garray[B->j[j]]) > cstart) break; 1142 for (l=0; l<bs; l++) { 1143 column_indices[cnt++] = bs*col+l; 1144 } 1145 } 1146 for (k=A->i[i]; k<A->i[i+1]; k++) { 1147 for (l=0; l<bs; l++) { 1148 column_indices[cnt++] = bs*(A->j[k] + cstart)+l; 1149 } 1150 } 1151 for (; j<B->i[i+1]; j++) { 1152 for (l=0; l<bs; l++) { 1153 column_indices[cnt++] = bs*garray[B->j[j]]+l; 1154 } 1155 } 1156 len = cnt - pcnt; 1157 for (k=1; k<bs; k++) { 1158 ierr = PetscMemcpy(&column_indices[cnt],&column_indices[pcnt],len*sizeof(PetscInt));CHKERRQ(ierr); 1159 cnt += len; 1160 } 1161 } 1162 if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz); 1163 1164 /* store the columns to the file */ 1165 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1166 if (!rank) { 1167 MPI_Status status; 1168 ierr = PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1169 for (i=1; i<size; i++) { 1170 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1171 ierr = MPI_Recv(&cnt,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1172 ierr = MPI_Recv(column_indices,cnt,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1173 ierr = PetscBinaryWrite(fd,column_indices,cnt,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr); 1174 } 1175 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1176 } else { 1177 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1178 ierr = MPI_Send(&cnt,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1179 ierr = MPI_Send(column_indices,cnt,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1180 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1181 } 1182 ierr = PetscFree(column_indices);CHKERRQ(ierr); 1183 1184 /* load up the numerical values */ 1185 ierr = PetscMalloc(nzmax*sizeof(PetscScalar),&column_values);CHKERRQ(ierr); 1186 cnt = 0; 1187 for (i=0; i<a->mbs; i++) { 1188 rlen = bs*(B->i[i+1] - B->i[i] + A->i[i+1] - A->i[i]); 1189 for (j=B->i[i]; j<B->i[i+1]; j++) { 1190 if (garray[B->j[j]] > cstart) break; 1191 for (l=0; l<bs; l++) { 1192 for (ll=0; ll<bs; ll++) { 1193 column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll]; 1194 } 1195 } 1196 cnt += bs; 1197 } 1198 for (k=A->i[i]; k<A->i[i+1]; k++) { 1199 for (l=0; l<bs; l++) { 1200 for (ll=0; ll<bs; ll++) { 1201 column_values[cnt + l*rlen + ll] = A->a[bs2*k+l+bs*ll]; 1202 } 1203 } 1204 cnt += bs; 1205 } 1206 for (; j<B->i[i+1]; j++) { 1207 for (l=0; l<bs; l++) { 1208 for (ll=0; ll<bs; ll++) { 1209 column_values[cnt + l*rlen + ll] = B->a[bs2*j+l+bs*ll]; 1210 } 1211 } 1212 cnt += bs; 1213 } 1214 cnt += (bs-1)*rlen; 1215 } 1216 if (cnt != nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,nz); 1217 1218 /* store the column values to the file */ 1219 ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr); 1220 if (!rank) { 1221 MPI_Status status; 1222 ierr = PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr); 1223 for (i=1; i<size; i++) { 1224 ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr); 1225 ierr = MPI_Recv(&cnt,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1226 ierr = MPI_Recv(column_values,cnt,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr); 1227 ierr = PetscBinaryWrite(fd,column_values,cnt,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr); 1228 } 1229 ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr); 1230 } else { 1231 ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr); 1232 ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1233 ierr = MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr); 1234 ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr); 1235 } 1236 ierr = PetscFree(column_values);CHKERRQ(ierr); 1237 1238 ierr = PetscViewerBinaryGetInfoPointer(viewer,&file);CHKERRQ(ierr); 1239 if (file) { 1240 fprintf(file,"-matload_block_size %d\n",(int)mat->rmap->bs); 1241 } 1242 PetscFunctionReturn(0); 1243 } 1244 1245 #undef __FUNCT__ 1246 #define __FUNCT__ "MatView_MPIBAIJ" 1247 PetscErrorCode MatView_MPIBAIJ(Mat mat,PetscViewer viewer) 1248 { 1249 PetscErrorCode ierr; 1250 PetscBool iascii,isdraw,issocket,isbinary; 1251 1252 PetscFunctionBegin; 1253 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr); 1254 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr); 1255 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);CHKERRQ(ierr); 1256 ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr); 1257 if (iascii || isdraw || issocket) { 1258 ierr = MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr); 1259 } else if (isbinary) { 1260 ierr = MatView_MPIBAIJ_Binary(mat,viewer);CHKERRQ(ierr); 1261 } 1262 PetscFunctionReturn(0); 1263 } 1264 1265 #undef __FUNCT__ 1266 #define __FUNCT__ "MatDestroy_MPIBAIJ" 1267 PetscErrorCode MatDestroy_MPIBAIJ(Mat mat) 1268 { 1269 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1270 PetscErrorCode ierr; 1271 1272 PetscFunctionBegin; 1273 #if defined(PETSC_USE_LOG) 1274 PetscLogObjectState((PetscObject)mat,"Rows=%D,Cols=%D",mat->rmap->N,mat->cmap->N); 1275 #endif 1276 ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr); 1277 ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr); 1278 ierr = MatDestroy(&baij->A);CHKERRQ(ierr); 1279 ierr = MatDestroy(&baij->B);CHKERRQ(ierr); 1280 #if defined(PETSC_USE_CTABLE) 1281 ierr = PetscTableDestroy(&baij->colmap);CHKERRQ(ierr); 1282 #else 1283 ierr = PetscFree(baij->colmap);CHKERRQ(ierr); 1284 #endif 1285 ierr = PetscFree(baij->garray);CHKERRQ(ierr); 1286 ierr = VecDestroy(&baij->lvec);CHKERRQ(ierr); 1287 ierr = VecScatterDestroy(&baij->Mvctx);CHKERRQ(ierr); 1288 ierr = PetscFree2(baij->rowvalues,baij->rowindices);CHKERRQ(ierr); 1289 ierr = PetscFree(baij->barray);CHKERRQ(ierr); 1290 ierr = PetscFree2(baij->hd,baij->ht);CHKERRQ(ierr); 1291 ierr = PetscFree(baij->rangebs);CHKERRQ(ierr); 1292 ierr = PetscFree(mat->data);CHKERRQ(ierr); 1293 1294 ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr); 1295 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);CHKERRQ(ierr); 1296 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);CHKERRQ(ierr); 1297 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C",NULL);CHKERRQ(ierr); 1298 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocation_C",NULL);CHKERRQ(ierr); 1299 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIBAIJSetPreallocationCSR_C",NULL);CHKERRQ(ierr); 1300 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);CHKERRQ(ierr); 1301 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatSetHashTableFactor_C",NULL);CHKERRQ(ierr); 1302 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpisbaij_C",NULL);CHKERRQ(ierr); 1303 ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpibaij_mpibstrm_C",NULL);CHKERRQ(ierr); 1304 PetscFunctionReturn(0); 1305 } 1306 1307 #undef __FUNCT__ 1308 #define __FUNCT__ "MatMult_MPIBAIJ" 1309 PetscErrorCode MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy) 1310 { 1311 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1312 PetscErrorCode ierr; 1313 PetscInt nt; 1314 1315 PetscFunctionBegin; 1316 ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr); 1317 if (nt != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx"); 1318 ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr); 1319 if (nt != A->rmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy"); 1320 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1321 ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr); 1322 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1323 ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr); 1324 PetscFunctionReturn(0); 1325 } 1326 1327 #undef __FUNCT__ 1328 #define __FUNCT__ "MatMultAdd_MPIBAIJ" 1329 PetscErrorCode MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz) 1330 { 1331 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1332 PetscErrorCode ierr; 1333 1334 PetscFunctionBegin; 1335 ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1336 ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 1337 ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1338 ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr); 1339 PetscFunctionReturn(0); 1340 } 1341 1342 #undef __FUNCT__ 1343 #define __FUNCT__ "MatMultTranspose_MPIBAIJ" 1344 PetscErrorCode MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy) 1345 { 1346 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1347 PetscErrorCode ierr; 1348 PetscBool merged; 1349 1350 PetscFunctionBegin; 1351 ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr); 1352 /* do nondiagonal part */ 1353 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 1354 if (!merged) { 1355 /* send it on its way */ 1356 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1357 /* do local part */ 1358 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 1359 /* receive remote parts: note this assumes the values are not actually */ 1360 /* inserted in yy until the next line */ 1361 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1362 } else { 1363 /* do local part */ 1364 ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr); 1365 /* send it on its way */ 1366 ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1367 /* values actually were received in the Begin() but we need to call this nop */ 1368 ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1369 } 1370 PetscFunctionReturn(0); 1371 } 1372 1373 #undef __FUNCT__ 1374 #define __FUNCT__ "MatMultTransposeAdd_MPIBAIJ" 1375 PetscErrorCode MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz) 1376 { 1377 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1378 PetscErrorCode ierr; 1379 1380 PetscFunctionBegin; 1381 /* do nondiagonal part */ 1382 ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr); 1383 /* send it on its way */ 1384 ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1385 /* do local part */ 1386 ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr); 1387 /* receive remote parts: note this assumes the values are not actually */ 1388 /* inserted in yy until the next line, which is true for my implementation*/ 1389 /* but is not perhaps always true. */ 1390 ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr); 1391 PetscFunctionReturn(0); 1392 } 1393 1394 /* 1395 This only works correctly for square matrices where the subblock A->A is the 1396 diagonal block 1397 */ 1398 #undef __FUNCT__ 1399 #define __FUNCT__ "MatGetDiagonal_MPIBAIJ" 1400 PetscErrorCode MatGetDiagonal_MPIBAIJ(Mat A,Vec v) 1401 { 1402 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1403 PetscErrorCode ierr; 1404 1405 PetscFunctionBegin; 1406 if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); 1407 ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr); 1408 PetscFunctionReturn(0); 1409 } 1410 1411 #undef __FUNCT__ 1412 #define __FUNCT__ "MatScale_MPIBAIJ" 1413 PetscErrorCode MatScale_MPIBAIJ(Mat A,PetscScalar aa) 1414 { 1415 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1416 PetscErrorCode ierr; 1417 1418 PetscFunctionBegin; 1419 ierr = MatScale(a->A,aa);CHKERRQ(ierr); 1420 ierr = MatScale(a->B,aa);CHKERRQ(ierr); 1421 PetscFunctionReturn(0); 1422 } 1423 1424 #undef __FUNCT__ 1425 #define __FUNCT__ "MatGetRow_MPIBAIJ" 1426 PetscErrorCode MatGetRow_MPIBAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1427 { 1428 Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data; 1429 PetscScalar *vworkA,*vworkB,**pvA,**pvB,*v_p; 1430 PetscErrorCode ierr; 1431 PetscInt bs = matin->rmap->bs,bs2 = mat->bs2,i,*cworkA,*cworkB,**pcA,**pcB; 1432 PetscInt nztot,nzA,nzB,lrow,brstart = matin->rmap->rstart,brend = matin->rmap->rend; 1433 PetscInt *cmap,*idx_p,cstart = mat->cstartbs; 1434 1435 PetscFunctionBegin; 1436 if (row < brstart || row >= brend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local rows"); 1437 if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active"); 1438 mat->getrowactive = PETSC_TRUE; 1439 1440 if (!mat->rowvalues && (idx || v)) { 1441 /* 1442 allocate enough space to hold information from the longest row. 1443 */ 1444 Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data; 1445 PetscInt max = 1,mbs = mat->mbs,tmp; 1446 for (i=0; i<mbs; i++) { 1447 tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; 1448 if (max < tmp) max = tmp; 1449 } 1450 ierr = PetscMalloc2(max*bs2,PetscScalar,&mat->rowvalues,max*bs2,PetscInt,&mat->rowindices);CHKERRQ(ierr); 1451 } 1452 lrow = row - brstart; 1453 1454 pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB; 1455 if (!v) {pvA = 0; pvB = 0;} 1456 if (!idx) {pcA = 0; if (!v) pcB = 0;} 1457 ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1458 ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1459 nztot = nzA + nzB; 1460 1461 cmap = mat->garray; 1462 if (v || idx) { 1463 if (nztot) { 1464 /* Sort by increasing column numbers, assuming A and B already sorted */ 1465 PetscInt imark = -1; 1466 if (v) { 1467 *v = v_p = mat->rowvalues; 1468 for (i=0; i<nzB; i++) { 1469 if (cmap[cworkB[i]/bs] < cstart) v_p[i] = vworkB[i]; 1470 else break; 1471 } 1472 imark = i; 1473 for (i=0; i<nzA; i++) v_p[imark+i] = vworkA[i]; 1474 for (i=imark; i<nzB; i++) v_p[nzA+i] = vworkB[i]; 1475 } 1476 if (idx) { 1477 *idx = idx_p = mat->rowindices; 1478 if (imark > -1) { 1479 for (i=0; i<imark; i++) { 1480 idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs; 1481 } 1482 } else { 1483 for (i=0; i<nzB; i++) { 1484 if (cmap[cworkB[i]/bs] < cstart) idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs; 1485 else break; 1486 } 1487 imark = i; 1488 } 1489 for (i=0; i<nzA; i++) idx_p[imark+i] = cstart*bs + cworkA[i]; 1490 for (i=imark; i<nzB; i++) idx_p[nzA+i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ; 1491 } 1492 } else { 1493 if (idx) *idx = 0; 1494 if (v) *v = 0; 1495 } 1496 } 1497 *nz = nztot; 1498 ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr); 1499 ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr); 1500 PetscFunctionReturn(0); 1501 } 1502 1503 #undef __FUNCT__ 1504 #define __FUNCT__ "MatRestoreRow_MPIBAIJ" 1505 PetscErrorCode MatRestoreRow_MPIBAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v) 1506 { 1507 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1508 1509 PetscFunctionBegin; 1510 if (!baij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called"); 1511 baij->getrowactive = PETSC_FALSE; 1512 PetscFunctionReturn(0); 1513 } 1514 1515 #undef __FUNCT__ 1516 #define __FUNCT__ "MatZeroEntries_MPIBAIJ" 1517 PetscErrorCode MatZeroEntries_MPIBAIJ(Mat A) 1518 { 1519 Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data; 1520 PetscErrorCode ierr; 1521 1522 PetscFunctionBegin; 1523 ierr = MatZeroEntries(l->A);CHKERRQ(ierr); 1524 ierr = MatZeroEntries(l->B);CHKERRQ(ierr); 1525 PetscFunctionReturn(0); 1526 } 1527 1528 #undef __FUNCT__ 1529 #define __FUNCT__ "MatGetInfo_MPIBAIJ" 1530 PetscErrorCode MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info) 1531 { 1532 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data; 1533 Mat A = a->A,B = a->B; 1534 PetscErrorCode ierr; 1535 PetscReal isend[5],irecv[5]; 1536 1537 PetscFunctionBegin; 1538 info->block_size = (PetscReal)matin->rmap->bs; 1539 1540 ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr); 1541 1542 isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded; 1543 isend[3] = info->memory; isend[4] = info->mallocs; 1544 1545 ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr); 1546 1547 isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded; 1548 isend[3] += info->memory; isend[4] += info->mallocs; 1549 1550 if (flag == MAT_LOCAL) { 1551 info->nz_used = isend[0]; 1552 info->nz_allocated = isend[1]; 1553 info->nz_unneeded = isend[2]; 1554 info->memory = isend[3]; 1555 info->mallocs = isend[4]; 1556 } else if (flag == MAT_GLOBAL_MAX) { 1557 ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 1558 1559 info->nz_used = irecv[0]; 1560 info->nz_allocated = irecv[1]; 1561 info->nz_unneeded = irecv[2]; 1562 info->memory = irecv[3]; 1563 info->mallocs = irecv[4]; 1564 } else if (flag == MAT_GLOBAL_SUM) { 1565 ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr); 1566 1567 info->nz_used = irecv[0]; 1568 info->nz_allocated = irecv[1]; 1569 info->nz_unneeded = irecv[2]; 1570 info->memory = irecv[3]; 1571 info->mallocs = irecv[4]; 1572 } else SETERRQ1(PetscObjectComm((PetscObject)matin),PETSC_ERR_ARG_WRONG,"Unknown MatInfoType argument %d",(int)flag); 1573 info->fill_ratio_given = 0; /* no parallel LU/ILU/Cholesky */ 1574 info->fill_ratio_needed = 0; 1575 info->factor_mallocs = 0; 1576 PetscFunctionReturn(0); 1577 } 1578 1579 #undef __FUNCT__ 1580 #define __FUNCT__ "MatSetOption_MPIBAIJ" 1581 PetscErrorCode MatSetOption_MPIBAIJ(Mat A,MatOption op,PetscBool flg) 1582 { 1583 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1584 PetscErrorCode ierr; 1585 1586 PetscFunctionBegin; 1587 switch (op) { 1588 case MAT_NEW_NONZERO_LOCATIONS: 1589 case MAT_NEW_NONZERO_ALLOCATION_ERR: 1590 case MAT_UNUSED_NONZERO_LOCATION_ERR: 1591 case MAT_KEEP_NONZERO_PATTERN: 1592 case MAT_NEW_NONZERO_LOCATION_ERR: 1593 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1594 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1595 break; 1596 case MAT_ROW_ORIENTED: 1597 a->roworiented = flg; 1598 1599 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1600 ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr); 1601 break; 1602 case MAT_NEW_DIAGONALS: 1603 ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr); 1604 break; 1605 case MAT_IGNORE_OFF_PROC_ENTRIES: 1606 a->donotstash = flg; 1607 break; 1608 case MAT_USE_HASH_TABLE: 1609 a->ht_flag = flg; 1610 break; 1611 case MAT_SYMMETRIC: 1612 case MAT_STRUCTURALLY_SYMMETRIC: 1613 case MAT_HERMITIAN: 1614 case MAT_SYMMETRY_ETERNAL: 1615 ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr); 1616 break; 1617 default: 1618 SETERRQ1(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"unknown option %d",op); 1619 } 1620 PetscFunctionReturn(0); 1621 } 1622 1623 #undef __FUNCT__ 1624 #define __FUNCT__ "MatTranspose_MPIBAIJ" 1625 PetscErrorCode MatTranspose_MPIBAIJ(Mat A,MatReuse reuse,Mat *matout) 1626 { 1627 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data; 1628 Mat_SeqBAIJ *Aloc; 1629 Mat B; 1630 PetscErrorCode ierr; 1631 PetscInt M =A->rmap->N,N=A->cmap->N,*ai,*aj,i,*rvals,j,k,col; 1632 PetscInt bs=A->rmap->bs,mbs=baij->mbs; 1633 MatScalar *a; 1634 1635 PetscFunctionBegin; 1636 if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place"); 1637 if (reuse == MAT_INITIAL_MATRIX || *matout == A) { 1638 ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); 1639 ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr); 1640 ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr); 1641 /* Do not know preallocation information, but must set block size */ 1642 ierr = MatMPIBAIJSetPreallocation(B,A->rmap->bs,PETSC_DECIDE,NULL,PETSC_DECIDE,NULL);CHKERRQ(ierr); 1643 } else { 1644 B = *matout; 1645 } 1646 1647 /* copy over the A part */ 1648 Aloc = (Mat_SeqBAIJ*)baij->A->data; 1649 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1650 ierr = PetscMalloc(bs*sizeof(PetscInt),&rvals);CHKERRQ(ierr); 1651 1652 for (i=0; i<mbs; i++) { 1653 rvals[0] = bs*(baij->rstartbs + i); 1654 for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1; 1655 for (j=ai[i]; j<ai[i+1]; j++) { 1656 col = (baij->cstartbs+aj[j])*bs; 1657 for (k=0; k<bs; k++) { 1658 ierr = MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr); 1659 1660 col++; a += bs; 1661 } 1662 } 1663 } 1664 /* copy over the B part */ 1665 Aloc = (Mat_SeqBAIJ*)baij->B->data; 1666 ai = Aloc->i; aj = Aloc->j; a = Aloc->a; 1667 for (i=0; i<mbs; i++) { 1668 rvals[0] = bs*(baij->rstartbs + i); 1669 for (j=1; j<bs; j++) rvals[j] = rvals[j-1] + 1; 1670 for (j=ai[i]; j<ai[i+1]; j++) { 1671 col = baij->garray[aj[j]]*bs; 1672 for (k=0; k<bs; k++) { 1673 ierr = MatSetValues_MPIBAIJ(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr); 1674 col++; 1675 a += bs; 1676 } 1677 } 1678 } 1679 ierr = PetscFree(rvals);CHKERRQ(ierr); 1680 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1681 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1682 1683 if (reuse == MAT_INITIAL_MATRIX || *matout != A) *matout = B; 1684 else { 1685 ierr = MatHeaderMerge(A,B);CHKERRQ(ierr); 1686 } 1687 PetscFunctionReturn(0); 1688 } 1689 1690 #undef __FUNCT__ 1691 #define __FUNCT__ "MatDiagonalScale_MPIBAIJ" 1692 PetscErrorCode MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr) 1693 { 1694 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 1695 Mat a = baij->A,b = baij->B; 1696 PetscErrorCode ierr; 1697 PetscInt s1,s2,s3; 1698 1699 PetscFunctionBegin; 1700 ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr); 1701 if (rr) { 1702 ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr); 1703 if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size"); 1704 /* Overlap communication with computation. */ 1705 ierr = VecScatterBegin(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1706 } 1707 if (ll) { 1708 ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr); 1709 if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size"); 1710 ierr = (*b->ops->diagonalscale)(b,ll,NULL);CHKERRQ(ierr); 1711 } 1712 /* scale the diagonal block */ 1713 ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr); 1714 1715 if (rr) { 1716 /* Do a scatter end and then right scale the off-diagonal block */ 1717 ierr = VecScatterEnd(baij->Mvctx,rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 1718 ierr = (*b->ops->diagonalscale)(b,NULL,baij->lvec);CHKERRQ(ierr); 1719 } 1720 PetscFunctionReturn(0); 1721 } 1722 1723 #undef __FUNCT__ 1724 #define __FUNCT__ "MatZeroRows_MPIBAIJ" 1725 PetscErrorCode MatZeroRows_MPIBAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b) 1726 { 1727 Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data; 1728 PetscErrorCode ierr; 1729 PetscMPIInt imdex,size = l->size,n,rank = l->rank; 1730 PetscInt i,*owners = A->rmap->range; 1731 PetscInt *nprocs,j,idx,nsends,row; 1732 PetscInt nmax,*svalues,*starts,*owner,nrecvs; 1733 PetscInt *rvalues,tag = ((PetscObject)A)->tag,count,base,slen,*source,lastidx = -1; 1734 PetscInt *lens,*lrows,*values,rstart_bs=A->rmap->rstart; 1735 MPI_Comm comm; 1736 MPI_Request *send_waits,*recv_waits; 1737 MPI_Status recv_status,*send_status; 1738 const PetscScalar *xx; 1739 PetscScalar *bb; 1740 #if defined(PETSC_DEBUG) 1741 PetscBool found = PETSC_FALSE; 1742 #endif 1743 1744 PetscFunctionBegin; 1745 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 1746 /* first count number of contributors to each processor */ 1747 ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr); 1748 ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr); 1749 ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/ 1750 j = 0; 1751 for (i=0; i<N; i++) { 1752 if (lastidx > (idx = rows[i])) j = 0; 1753 lastidx = idx; 1754 for (; j<size; j++) { 1755 if (idx >= owners[j] && idx < owners[j+1]) { 1756 nprocs[2*j]++; 1757 nprocs[2*j+1] = 1; 1758 owner[i] = j; 1759 #if defined(PETSC_DEBUG) 1760 found = PETSC_TRUE; 1761 #endif 1762 break; 1763 } 1764 } 1765 #if defined(PETSC_DEBUG) 1766 if (!found) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index out of range"); 1767 found = PETSC_FALSE; 1768 #endif 1769 } 1770 nsends = 0; for (i=0; i<size; i++) nsends += nprocs[2*i+1]; 1771 1772 if (A->nooffproczerorows) { 1773 if (nsends > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"You called MatSetOption(,MAT_NO_OFF_PROC_ZERO_ROWS,PETSC_TRUE) but set an off process zero row"); 1774 nrecvs = nsends; 1775 nmax = N; 1776 } else { 1777 /* inform other processors of number of messages and max length*/ 1778 ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr); 1779 } 1780 1781 /* post receives: */ 1782 ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr); 1783 ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr); 1784 for (i=0; i<nrecvs; i++) { 1785 ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr); 1786 } 1787 1788 /* do sends: 1789 1) starts[i] gives the starting index in svalues for stuff going to 1790 the ith processor 1791 */ 1792 ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr); 1793 ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr); 1794 ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr); 1795 starts[0] = 0; 1796 for (i=1; i<size; i++) starts[i] = starts[i-1] + nprocs[2*i-2]; 1797 for (i=0; i<N; i++) { 1798 svalues[starts[owner[i]]++] = rows[i]; 1799 } 1800 1801 starts[0] = 0; 1802 for (i=1; i<size+1; i++) starts[i] = starts[i-1] + nprocs[2*i-2]; 1803 count = 0; 1804 for (i=0; i<size; i++) { 1805 if (nprocs[2*i+1]) { 1806 ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr); 1807 } 1808 } 1809 ierr = PetscFree(starts);CHKERRQ(ierr); 1810 1811 base = owners[rank]; 1812 1813 /* wait on receives */ 1814 ierr = PetscMalloc2(nrecvs+1,PetscInt,&lens,nrecvs+1,PetscInt,&source);CHKERRQ(ierr); 1815 count = nrecvs; 1816 slen = 0; 1817 while (count) { 1818 ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr); 1819 /* unpack receives into our local space */ 1820 ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr); 1821 1822 source[imdex] = recv_status.MPI_SOURCE; 1823 lens[imdex] = n; 1824 slen += n; 1825 count--; 1826 } 1827 ierr = PetscFree(recv_waits);CHKERRQ(ierr); 1828 1829 /* move the data into the send scatter */ 1830 ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr); 1831 count = 0; 1832 for (i=0; i<nrecvs; i++) { 1833 values = rvalues + i*nmax; 1834 for (j=0; j<lens[i]; j++) { 1835 lrows[count++] = values[j] - base; 1836 } 1837 } 1838 ierr = PetscFree(rvalues);CHKERRQ(ierr); 1839 ierr = PetscFree2(lens,source);CHKERRQ(ierr); 1840 ierr = PetscFree(owner);CHKERRQ(ierr); 1841 ierr = PetscFree(nprocs);CHKERRQ(ierr); 1842 1843 /* fix right hand side if needed */ 1844 if (x && b) { 1845 ierr = VecGetArrayRead(x,&xx);CHKERRQ(ierr); 1846 ierr = VecGetArray(b,&bb);CHKERRQ(ierr); 1847 for (i=0; i<slen; i++) { 1848 bb[lrows[i]] = diag*xx[lrows[i]]; 1849 } 1850 ierr = VecRestoreArrayRead(x,&xx);CHKERRQ(ierr); 1851 ierr = VecRestoreArray(b,&bb);CHKERRQ(ierr); 1852 } 1853 1854 /* actually zap the local rows */ 1855 /* 1856 Zero the required rows. If the "diagonal block" of the matrix 1857 is square and the user wishes to set the diagonal we use separate 1858 code so that MatSetValues() is not called for each diagonal allocating 1859 new memory, thus calling lots of mallocs and slowing things down. 1860 1861 */ 1862 /* must zero l->B before l->A because the (diag) case below may put values into l->B*/ 1863 ierr = MatZeroRows_SeqBAIJ(l->B,slen,lrows,0.0,0,0);CHKERRQ(ierr); 1864 if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) { 1865 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,diag,0,0);CHKERRQ(ierr); 1866 } else if (diag != 0.0) { 1867 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0,0,0);CHKERRQ(ierr); 1868 if (((Mat_SeqBAIJ*)l->A->data)->nonew) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\ 1869 MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR"); 1870 for (i=0; i<slen; i++) { 1871 row = lrows[i] + rstart_bs; 1872 ierr = MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);CHKERRQ(ierr); 1873 } 1874 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1875 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1876 } else { 1877 ierr = MatZeroRows_SeqBAIJ(l->A,slen,lrows,0.0,0,0);CHKERRQ(ierr); 1878 } 1879 1880 ierr = PetscFree(lrows);CHKERRQ(ierr); 1881 1882 /* wait on sends */ 1883 if (nsends) { 1884 ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr); 1885 ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr); 1886 ierr = PetscFree(send_status);CHKERRQ(ierr); 1887 } 1888 ierr = PetscFree(send_waits);CHKERRQ(ierr); 1889 ierr = PetscFree(svalues);CHKERRQ(ierr); 1890 PetscFunctionReturn(0); 1891 } 1892 1893 #undef __FUNCT__ 1894 #define __FUNCT__ "MatSetUnfactored_MPIBAIJ" 1895 PetscErrorCode MatSetUnfactored_MPIBAIJ(Mat A) 1896 { 1897 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1898 PetscErrorCode ierr; 1899 1900 PetscFunctionBegin; 1901 ierr = MatSetUnfactored(a->A);CHKERRQ(ierr); 1902 PetscFunctionReturn(0); 1903 } 1904 1905 static PetscErrorCode MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat*); 1906 1907 #undef __FUNCT__ 1908 #define __FUNCT__ "MatEqual_MPIBAIJ" 1909 PetscErrorCode MatEqual_MPIBAIJ(Mat A,Mat B,PetscBool *flag) 1910 { 1911 Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data; 1912 Mat a,b,c,d; 1913 PetscBool flg; 1914 PetscErrorCode ierr; 1915 1916 PetscFunctionBegin; 1917 a = matA->A; b = matA->B; 1918 c = matB->A; d = matB->B; 1919 1920 ierr = MatEqual(a,c,&flg);CHKERRQ(ierr); 1921 if (flg) { 1922 ierr = MatEqual(b,d,&flg);CHKERRQ(ierr); 1923 } 1924 ierr = MPI_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 1925 PetscFunctionReturn(0); 1926 } 1927 1928 #undef __FUNCT__ 1929 #define __FUNCT__ "MatCopy_MPIBAIJ" 1930 PetscErrorCode MatCopy_MPIBAIJ(Mat A,Mat B,MatStructure str) 1931 { 1932 PetscErrorCode ierr; 1933 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1934 Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data; 1935 1936 PetscFunctionBegin; 1937 /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */ 1938 if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) { 1939 ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr); 1940 } else { 1941 ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr); 1942 ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr); 1943 } 1944 PetscFunctionReturn(0); 1945 } 1946 1947 #undef __FUNCT__ 1948 #define __FUNCT__ "MatSetUp_MPIBAIJ" 1949 PetscErrorCode MatSetUp_MPIBAIJ(Mat A) 1950 { 1951 PetscErrorCode ierr; 1952 1953 PetscFunctionBegin; 1954 ierr = MatMPIBAIJSetPreallocation(A,A->rmap->bs,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr); 1955 PetscFunctionReturn(0); 1956 } 1957 1958 #undef __FUNCT__ 1959 #define __FUNCT__ "MatAXPY_MPIBAIJ" 1960 PetscErrorCode MatAXPY_MPIBAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str) 1961 { 1962 PetscErrorCode ierr; 1963 Mat_MPIBAIJ *xx=(Mat_MPIBAIJ*)X->data,*yy=(Mat_MPIBAIJ*)Y->data; 1964 PetscBLASInt bnz,one=1; 1965 Mat_SeqBAIJ *x,*y; 1966 1967 PetscFunctionBegin; 1968 if (str == SAME_NONZERO_PATTERN) { 1969 PetscScalar alpha = a; 1970 x = (Mat_SeqBAIJ*)xx->A->data; 1971 y = (Mat_SeqBAIJ*)yy->A->data; 1972 ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); 1973 PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one)); 1974 x = (Mat_SeqBAIJ*)xx->B->data; 1975 y = (Mat_SeqBAIJ*)yy->B->data; 1976 ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr); 1977 PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one)); 1978 } else { 1979 ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr); 1980 } 1981 PetscFunctionReturn(0); 1982 } 1983 1984 #undef __FUNCT__ 1985 #define __FUNCT__ "MatRealPart_MPIBAIJ" 1986 PetscErrorCode MatRealPart_MPIBAIJ(Mat A) 1987 { 1988 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 1989 PetscErrorCode ierr; 1990 1991 PetscFunctionBegin; 1992 ierr = MatRealPart(a->A);CHKERRQ(ierr); 1993 ierr = MatRealPart(a->B);CHKERRQ(ierr); 1994 PetscFunctionReturn(0); 1995 } 1996 1997 #undef __FUNCT__ 1998 #define __FUNCT__ "MatImaginaryPart_MPIBAIJ" 1999 PetscErrorCode MatImaginaryPart_MPIBAIJ(Mat A) 2000 { 2001 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 2002 PetscErrorCode ierr; 2003 2004 PetscFunctionBegin; 2005 ierr = MatImaginaryPart(a->A);CHKERRQ(ierr); 2006 ierr = MatImaginaryPart(a->B);CHKERRQ(ierr); 2007 PetscFunctionReturn(0); 2008 } 2009 2010 #undef __FUNCT__ 2011 #define __FUNCT__ "MatGetSubMatrix_MPIBAIJ" 2012 PetscErrorCode MatGetSubMatrix_MPIBAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat) 2013 { 2014 PetscErrorCode ierr; 2015 IS iscol_local; 2016 PetscInt csize; 2017 2018 PetscFunctionBegin; 2019 ierr = ISGetLocalSize(iscol,&csize);CHKERRQ(ierr); 2020 if (call == MAT_REUSE_MATRIX) { 2021 ierr = PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);CHKERRQ(ierr); 2022 if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 2023 } else { 2024 ierr = ISAllGather(iscol,&iscol_local);CHKERRQ(ierr); 2025 } 2026 ierr = MatGetSubMatrix_MPIBAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);CHKERRQ(ierr); 2027 if (call == MAT_INITIAL_MATRIX) { 2028 ierr = PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);CHKERRQ(ierr); 2029 ierr = ISDestroy(&iscol_local);CHKERRQ(ierr); 2030 } 2031 PetscFunctionReturn(0); 2032 } 2033 extern PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,PetscBool*,Mat*); 2034 #undef __FUNCT__ 2035 #define __FUNCT__ "MatGetSubMatrix_MPIBAIJ_Private" 2036 /* 2037 Not great since it makes two copies of the submatrix, first an SeqBAIJ 2038 in local and then by concatenating the local matrices the end result. 2039 Writing it directly would be much like MatGetSubMatrices_MPIBAIJ() 2040 */ 2041 PetscErrorCode MatGetSubMatrix_MPIBAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat) 2042 { 2043 PetscErrorCode ierr; 2044 PetscMPIInt rank,size; 2045 PetscInt i,m,n,rstart,row,rend,nz,*cwork,j,bs; 2046 PetscInt *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal,ncol,nrow; 2047 Mat M,Mreuse; 2048 MatScalar *vwork,*aa; 2049 MPI_Comm comm; 2050 IS isrow_new, iscol_new; 2051 PetscBool idflag,allrows, allcols; 2052 Mat_SeqBAIJ *aij; 2053 2054 PetscFunctionBegin; 2055 ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr); 2056 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 2057 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 2058 /* The compression and expansion should be avoided. Doesn't point 2059 out errors, might change the indices, hence buggey */ 2060 ierr = ISCompressIndicesGeneral(mat->rmap->N,mat->rmap->n,mat->rmap->bs,1,&isrow,&isrow_new);CHKERRQ(ierr); 2061 ierr = ISCompressIndicesGeneral(mat->cmap->N,mat->cmap->n,mat->cmap->bs,1,&iscol,&iscol_new);CHKERRQ(ierr); 2062 2063 /* Check for special case: each processor gets entire matrix columns */ 2064 ierr = ISIdentity(iscol,&idflag);CHKERRQ(ierr); 2065 ierr = ISGetLocalSize(iscol,&ncol);CHKERRQ(ierr); 2066 if (idflag && ncol == mat->cmap->N) allcols = PETSC_TRUE; 2067 else allcols = PETSC_FALSE; 2068 2069 ierr = ISIdentity(isrow,&idflag);CHKERRQ(ierr); 2070 ierr = ISGetLocalSize(isrow,&nrow);CHKERRQ(ierr); 2071 if (idflag && nrow == mat->rmap->N) allrows = PETSC_TRUE; 2072 else allrows = PETSC_FALSE; 2073 2074 if (call == MAT_REUSE_MATRIX) { 2075 ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);CHKERRQ(ierr); 2076 if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse"); 2077 ierr = MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_REUSE_MATRIX,&allrows,&allcols,&Mreuse);CHKERRQ(ierr); 2078 } else { 2079 ierr = MatGetSubMatrices_MPIBAIJ_local(mat,1,&isrow_new,&iscol_new,MAT_INITIAL_MATRIX,&allrows,&allcols,&Mreuse);CHKERRQ(ierr); 2080 } 2081 ierr = ISDestroy(&isrow_new);CHKERRQ(ierr); 2082 ierr = ISDestroy(&iscol_new);CHKERRQ(ierr); 2083 /* 2084 m - number of local rows 2085 n - number of columns (same on all processors) 2086 rstart - first row in new global matrix generated 2087 */ 2088 ierr = MatGetBlockSize(mat,&bs);CHKERRQ(ierr); 2089 ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr); 2090 m = m/bs; 2091 n = n/bs; 2092 2093 if (call == MAT_INITIAL_MATRIX) { 2094 aij = (Mat_SeqBAIJ*)(Mreuse)->data; 2095 ii = aij->i; 2096 jj = aij->j; 2097 2098 /* 2099 Determine the number of non-zeros in the diagonal and off-diagonal 2100 portions of the matrix in order to do correct preallocation 2101 */ 2102 2103 /* first get start and end of "diagonal" columns */ 2104 if (csize == PETSC_DECIDE) { 2105 ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr); 2106 if (mglobal == n*bs) { /* square matrix */ 2107 nlocal = m; 2108 } else { 2109 nlocal = n/size + ((n % size) > rank); 2110 } 2111 } else { 2112 nlocal = csize/bs; 2113 } 2114 ierr = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr); 2115 rstart = rend - nlocal; 2116 if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n); 2117 2118 /* next, compute all the lengths */ 2119 ierr = PetscMalloc2(m+1,PetscInt,&dlens,m+1,PetscInt,&olens);CHKERRQ(ierr); 2120 for (i=0; i<m; i++) { 2121 jend = ii[i+1] - ii[i]; 2122 olen = 0; 2123 dlen = 0; 2124 for (j=0; j<jend; j++) { 2125 if (*jj < rstart || *jj >= rend) olen++; 2126 else dlen++; 2127 jj++; 2128 } 2129 olens[i] = olen; 2130 dlens[i] = dlen; 2131 } 2132 ierr = MatCreate(comm,&M);CHKERRQ(ierr); 2133 ierr = MatSetSizes(M,bs*m,bs*nlocal,PETSC_DECIDE,bs*n);CHKERRQ(ierr); 2134 ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr); 2135 ierr = MatMPIBAIJSetPreallocation(M,bs,0,dlens,0,olens);CHKERRQ(ierr); 2136 ierr = PetscFree2(dlens,olens);CHKERRQ(ierr); 2137 } else { 2138 PetscInt ml,nl; 2139 2140 M = *newmat; 2141 ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr); 2142 if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request"); 2143 ierr = MatZeroEntries(M);CHKERRQ(ierr); 2144 /* 2145 The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly, 2146 rather than the slower MatSetValues(). 2147 */ 2148 M->was_assembled = PETSC_TRUE; 2149 M->assembled = PETSC_FALSE; 2150 } 2151 ierr = MatSetOption(M,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr); 2152 ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr); 2153 aij = (Mat_SeqBAIJ*)(Mreuse)->data; 2154 ii = aij->i; 2155 jj = aij->j; 2156 aa = aij->a; 2157 for (i=0; i<m; i++) { 2158 row = rstart/bs + i; 2159 nz = ii[i+1] - ii[i]; 2160 cwork = jj; jj += nz; 2161 vwork = aa; aa += nz*bs*bs; 2162 ierr = MatSetValuesBlocked_MPIBAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr); 2163 } 2164 2165 ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2166 ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2167 *newmat = M; 2168 2169 /* save submatrix used in processor for next request */ 2170 if (call == MAT_INITIAL_MATRIX) { 2171 ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr); 2172 ierr = PetscObjectDereference((PetscObject)Mreuse);CHKERRQ(ierr); 2173 } 2174 PetscFunctionReturn(0); 2175 } 2176 2177 #undef __FUNCT__ 2178 #define __FUNCT__ "MatPermute_MPIBAIJ" 2179 PetscErrorCode MatPermute_MPIBAIJ(Mat A,IS rowp,IS colp,Mat *B) 2180 { 2181 MPI_Comm comm,pcomm; 2182 PetscInt first,rlocal_size,clocal_size,nrows; 2183 const PetscInt *rows; 2184 PetscMPIInt size; 2185 IS crowp,growp,irowp,lrowp,lcolp; 2186 PetscErrorCode ierr; 2187 2188 PetscFunctionBegin; 2189 ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr); 2190 /* make a collective version of 'rowp' */ 2191 ierr = PetscObjectGetComm((PetscObject)rowp,&pcomm);CHKERRQ(ierr); 2192 if (pcomm==comm) { 2193 crowp = rowp; 2194 } else { 2195 ierr = ISGetSize(rowp,&nrows);CHKERRQ(ierr); 2196 ierr = ISGetIndices(rowp,&rows);CHKERRQ(ierr); 2197 ierr = ISCreateGeneral(comm,nrows,rows,PETSC_COPY_VALUES,&crowp);CHKERRQ(ierr); 2198 ierr = ISRestoreIndices(rowp,&rows);CHKERRQ(ierr); 2199 } 2200 /* collect the global row permutation and invert it */ 2201 ierr = ISAllGather(crowp,&growp);CHKERRQ(ierr); 2202 ierr = ISSetPermutation(growp);CHKERRQ(ierr); 2203 if (pcomm!=comm) { 2204 ierr = ISDestroy(&crowp);CHKERRQ(ierr); 2205 } 2206 ierr = ISInvertPermutation(growp,PETSC_DECIDE,&irowp);CHKERRQ(ierr); 2207 ierr = ISDestroy(&growp);CHKERRQ(ierr); 2208 /* get the local target indices */ 2209 ierr = MatGetOwnershipRange(A,&first,NULL);CHKERRQ(ierr); 2210 ierr = MatGetLocalSize(A,&rlocal_size,&clocal_size);CHKERRQ(ierr); 2211 ierr = ISGetIndices(irowp,&rows);CHKERRQ(ierr); 2212 ierr = ISCreateGeneral(MPI_COMM_SELF,rlocal_size,rows+first,PETSC_COPY_VALUES,&lrowp);CHKERRQ(ierr); 2213 ierr = ISRestoreIndices(irowp,&rows);CHKERRQ(ierr); 2214 ierr = ISDestroy(&irowp);CHKERRQ(ierr); 2215 /* the column permutation is so much easier; 2216 make a local version of 'colp' and invert it */ 2217 ierr = PetscObjectGetComm((PetscObject)colp,&pcomm);CHKERRQ(ierr); 2218 ierr = MPI_Comm_size(pcomm,&size);CHKERRQ(ierr); 2219 if (size==1) { 2220 lcolp = colp; 2221 } else { 2222 ierr = ISAllGather(colp,&lcolp);CHKERRQ(ierr); 2223 } 2224 ierr = ISSetPermutation(lcolp);CHKERRQ(ierr); 2225 /* now we just get the submatrix */ 2226 ierr = MatGetSubMatrix_MPIBAIJ_Private(A,lrowp,lcolp,clocal_size,MAT_INITIAL_MATRIX,B);CHKERRQ(ierr); 2227 if (size>1) { 2228 ierr = ISDestroy(&lcolp);CHKERRQ(ierr); 2229 } 2230 /* clean up */ 2231 ierr = ISDestroy(&lrowp);CHKERRQ(ierr); 2232 PetscFunctionReturn(0); 2233 } 2234 2235 #undef __FUNCT__ 2236 #define __FUNCT__ "MatGetGhosts_MPIBAIJ" 2237 PetscErrorCode MatGetGhosts_MPIBAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[]) 2238 { 2239 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*) mat->data; 2240 Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data; 2241 2242 PetscFunctionBegin; 2243 if (nghosts) *nghosts = B->nbs; 2244 if (ghosts) *ghosts = baij->garray; 2245 PetscFunctionReturn(0); 2246 } 2247 2248 #undef __FUNCT__ 2249 #define __FUNCT__ "MatGetSeqNonzeroStructure_MPIBAIJ" 2250 PetscErrorCode MatGetSeqNonzeroStructure_MPIBAIJ(Mat A,Mat *newmat) 2251 { 2252 Mat B; 2253 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 2254 Mat_SeqBAIJ *ad = (Mat_SeqBAIJ*)a->A->data,*bd = (Mat_SeqBAIJ*)a->B->data; 2255 Mat_SeqAIJ *b; 2256 PetscErrorCode ierr; 2257 PetscMPIInt size,rank,*recvcounts = 0,*displs = 0; 2258 PetscInt sendcount,i,*rstarts = A->rmap->range,n,cnt,j,bs = A->rmap->bs; 2259 PetscInt m,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf; 2260 2261 PetscFunctionBegin; 2262 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr); 2263 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr); 2264 2265 /* ---------------------------------------------------------------- 2266 Tell every processor the number of nonzeros per row 2267 */ 2268 ierr = PetscMalloc((A->rmap->N/bs)*sizeof(PetscInt),&lens);CHKERRQ(ierr); 2269 for (i=A->rmap->rstart/bs; i<A->rmap->rend/bs; i++) { 2270 lens[i] = ad->i[i-A->rmap->rstart/bs+1] - ad->i[i-A->rmap->rstart/bs] + bd->i[i-A->rmap->rstart/bs+1] - bd->i[i-A->rmap->rstart/bs]; 2271 } 2272 sendcount = A->rmap->rend/bs - A->rmap->rstart/bs; 2273 ierr = PetscMalloc(2*size*sizeof(PetscMPIInt),&recvcounts);CHKERRQ(ierr); 2274 displs = recvcounts + size; 2275 for (i=0; i<size; i++) { 2276 recvcounts[i] = A->rmap->range[i+1]/bs - A->rmap->range[i]/bs; 2277 displs[i] = A->rmap->range[i]/bs; 2278 } 2279 #if defined(PETSC_HAVE_MPI_IN_PLACE) 2280 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 2281 #else 2282 ierr = MPI_Allgatherv(lens+A->rmap->rstart/bs,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 2283 #endif 2284 /* --------------------------------------------------------------- 2285 Create the sequential matrix of the same type as the local block diagonal 2286 */ 2287 ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr); 2288 ierr = MatSetSizes(B,A->rmap->N/bs,A->cmap->N/bs,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 2289 ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr); 2290 ierr = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr); 2291 b = (Mat_SeqAIJ*)B->data; 2292 2293 /*-------------------------------------------------------------------- 2294 Copy my part of matrix column indices over 2295 */ 2296 sendcount = ad->nz + bd->nz; 2297 jsendbuf = b->j + b->i[rstarts[rank]/bs]; 2298 a_jsendbuf = ad->j; 2299 b_jsendbuf = bd->j; 2300 n = A->rmap->rend/bs - A->rmap->rstart/bs; 2301 cnt = 0; 2302 for (i=0; i<n; i++) { 2303 2304 /* put in lower diagonal portion */ 2305 m = bd->i[i+1] - bd->i[i]; 2306 while (m > 0) { 2307 /* is it above diagonal (in bd (compressed) numbering) */ 2308 if (garray[*b_jsendbuf] > A->rmap->rstart/bs + i) break; 2309 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 2310 m--; 2311 } 2312 2313 /* put in diagonal portion */ 2314 for (j=ad->i[i]; j<ad->i[i+1]; j++) { 2315 jsendbuf[cnt++] = A->rmap->rstart/bs + *a_jsendbuf++; 2316 } 2317 2318 /* put in upper diagonal portion */ 2319 while (m-- > 0) { 2320 jsendbuf[cnt++] = garray[*b_jsendbuf++]; 2321 } 2322 } 2323 if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt); 2324 2325 /*-------------------------------------------------------------------- 2326 Gather all column indices to all processors 2327 */ 2328 for (i=0; i<size; i++) { 2329 recvcounts[i] = 0; 2330 for (j=A->rmap->range[i]/bs; j<A->rmap->range[i+1]/bs; j++) { 2331 recvcounts[i] += lens[j]; 2332 } 2333 } 2334 displs[0] = 0; 2335 for (i=1; i<size; i++) { 2336 displs[i] = displs[i-1] + recvcounts[i-1]; 2337 } 2338 #if defined(PETSC_HAVE_MPI_IN_PLACE) 2339 ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 2340 #else 2341 ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr); 2342 #endif 2343 /*-------------------------------------------------------------------- 2344 Assemble the matrix into useable form (note numerical values not yet set) 2345 */ 2346 /* set the b->ilen (length of each row) values */ 2347 ierr = PetscMemcpy(b->ilen,lens,(A->rmap->N/bs)*sizeof(PetscInt));CHKERRQ(ierr); 2348 /* set the b->i indices */ 2349 b->i[0] = 0; 2350 for (i=1; i<=A->rmap->N/bs; i++) { 2351 b->i[i] = b->i[i-1] + lens[i-1]; 2352 } 2353 ierr = PetscFree(lens);CHKERRQ(ierr); 2354 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2355 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2356 ierr = PetscFree(recvcounts);CHKERRQ(ierr); 2357 2358 if (A->symmetric) { 2359 ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 2360 } else if (A->hermitian) { 2361 ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr); 2362 } else if (A->structurally_symmetric) { 2363 ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr); 2364 } 2365 *newmat = B; 2366 PetscFunctionReturn(0); 2367 } 2368 2369 #undef __FUNCT__ 2370 #define __FUNCT__ "MatSOR_MPIBAIJ" 2371 PetscErrorCode MatSOR_MPIBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx) 2372 { 2373 Mat_MPIBAIJ *mat = (Mat_MPIBAIJ*)matin->data; 2374 PetscErrorCode ierr; 2375 Vec bb1 = 0; 2376 2377 PetscFunctionBegin; 2378 if (flag == SOR_APPLY_UPPER) { 2379 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 2380 PetscFunctionReturn(0); 2381 } 2382 2383 if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS) { 2384 ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr); 2385 } 2386 2387 if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) { 2388 if (flag & SOR_ZERO_INITIAL_GUESS) { 2389 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 2390 its--; 2391 } 2392 2393 while (its--) { 2394 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2395 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2396 2397 /* update rhs: bb1 = bb - B*x */ 2398 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 2399 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 2400 2401 /* local sweep */ 2402 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 2403 } 2404 } else if (flag & SOR_LOCAL_FORWARD_SWEEP) { 2405 if (flag & SOR_ZERO_INITIAL_GUESS) { 2406 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 2407 its--; 2408 } 2409 while (its--) { 2410 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2411 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2412 2413 /* update rhs: bb1 = bb - B*x */ 2414 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 2415 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 2416 2417 /* local sweep */ 2418 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 2419 } 2420 } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) { 2421 if (flag & SOR_ZERO_INITIAL_GUESS) { 2422 ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr); 2423 its--; 2424 } 2425 while (its--) { 2426 ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2427 ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr); 2428 2429 /* update rhs: bb1 = bb - B*x */ 2430 ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr); 2431 ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr); 2432 2433 /* local sweep */ 2434 ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr); 2435 } 2436 } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel version of SOR requested not supported"); 2437 2438 ierr = VecDestroy(&bb1);CHKERRQ(ierr); 2439 PetscFunctionReturn(0); 2440 } 2441 2442 #undef __FUNCT__ 2443 #define __FUNCT__ "MatInvertBlockDiagonal_MPIBAIJ" 2444 PetscErrorCode MatInvertBlockDiagonal_MPIBAIJ(Mat A,const PetscScalar **values) 2445 { 2446 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*) A->data; 2447 PetscErrorCode ierr; 2448 2449 PetscFunctionBegin; 2450 ierr = MatInvertBlockDiagonal(a->A,values);CHKERRQ(ierr); 2451 PetscFunctionReturn(0); 2452 } 2453 2454 2455 /* -------------------------------------------------------------------*/ 2456 static struct _MatOps MatOps_Values = {MatSetValues_MPIBAIJ, 2457 MatGetRow_MPIBAIJ, 2458 MatRestoreRow_MPIBAIJ, 2459 MatMult_MPIBAIJ, 2460 /* 4*/ MatMultAdd_MPIBAIJ, 2461 MatMultTranspose_MPIBAIJ, 2462 MatMultTransposeAdd_MPIBAIJ, 2463 0, 2464 0, 2465 0, 2466 /*10*/ 0, 2467 0, 2468 0, 2469 MatSOR_MPIBAIJ, 2470 MatTranspose_MPIBAIJ, 2471 /*15*/ MatGetInfo_MPIBAIJ, 2472 MatEqual_MPIBAIJ, 2473 MatGetDiagonal_MPIBAIJ, 2474 MatDiagonalScale_MPIBAIJ, 2475 MatNorm_MPIBAIJ, 2476 /*20*/ MatAssemblyBegin_MPIBAIJ, 2477 MatAssemblyEnd_MPIBAIJ, 2478 MatSetOption_MPIBAIJ, 2479 MatZeroEntries_MPIBAIJ, 2480 /*24*/ MatZeroRows_MPIBAIJ, 2481 0, 2482 0, 2483 0, 2484 0, 2485 /*29*/ MatSetUp_MPIBAIJ, 2486 0, 2487 0, 2488 0, 2489 0, 2490 /*34*/ MatDuplicate_MPIBAIJ, 2491 0, 2492 0, 2493 0, 2494 0, 2495 /*39*/ MatAXPY_MPIBAIJ, 2496 MatGetSubMatrices_MPIBAIJ, 2497 MatIncreaseOverlap_MPIBAIJ, 2498 MatGetValues_MPIBAIJ, 2499 MatCopy_MPIBAIJ, 2500 /*44*/ 0, 2501 MatScale_MPIBAIJ, 2502 0, 2503 0, 2504 0, 2505 /*49*/ 0, 2506 0, 2507 0, 2508 0, 2509 0, 2510 /*54*/ MatFDColoringCreate_MPIXAIJ, 2511 0, 2512 MatSetUnfactored_MPIBAIJ, 2513 MatPermute_MPIBAIJ, 2514 MatSetValuesBlocked_MPIBAIJ, 2515 /*59*/ MatGetSubMatrix_MPIBAIJ, 2516 MatDestroy_MPIBAIJ, 2517 MatView_MPIBAIJ, 2518 0, 2519 0, 2520 /*64*/ 0, 2521 0, 2522 0, 2523 0, 2524 0, 2525 /*69*/ MatGetRowMaxAbs_MPIBAIJ, 2526 0, 2527 0, 2528 0, 2529 0, 2530 /*74*/ 0, 2531 MatFDColoringApply_BAIJ, 2532 0, 2533 0, 2534 0, 2535 /*79*/ 0, 2536 0, 2537 0, 2538 0, 2539 MatLoad_MPIBAIJ, 2540 /*84*/ 0, 2541 0, 2542 0, 2543 0, 2544 0, 2545 /*89*/ 0, 2546 0, 2547 0, 2548 0, 2549 0, 2550 /*94*/ 0, 2551 0, 2552 0, 2553 0, 2554 0, 2555 /*99*/ 0, 2556 0, 2557 0, 2558 0, 2559 0, 2560 /*104*/0, 2561 MatRealPart_MPIBAIJ, 2562 MatImaginaryPart_MPIBAIJ, 2563 0, 2564 0, 2565 /*109*/0, 2566 0, 2567 0, 2568 0, 2569 0, 2570 /*114*/MatGetSeqNonzeroStructure_MPIBAIJ, 2571 0, 2572 MatGetGhosts_MPIBAIJ, 2573 0, 2574 0, 2575 /*119*/0, 2576 0, 2577 0, 2578 0, 2579 MatGetMultiProcBlock_MPIBAIJ, 2580 /*124*/0, 2581 0, 2582 MatInvertBlockDiagonal_MPIBAIJ, 2583 0, 2584 0, 2585 /*129*/ 0, 2586 0, 2587 0, 2588 0, 2589 0, 2590 /*134*/ 0, 2591 0, 2592 0, 2593 0, 2594 0, 2595 /*139*/ 0, 2596 0 2597 }; 2598 2599 #undef __FUNCT__ 2600 #define __FUNCT__ "MatGetDiagonalBlock_MPIBAIJ" 2601 PetscErrorCode MatGetDiagonalBlock_MPIBAIJ(Mat A,Mat *a) 2602 { 2603 PetscFunctionBegin; 2604 *a = ((Mat_MPIBAIJ*)A->data)->A; 2605 PetscFunctionReturn(0); 2606 } 2607 2608 PETSC_EXTERN PetscErrorCode MatConvert_MPIBAIJ_MPISBAIJ(Mat, MatType,MatReuse,Mat*); 2609 2610 #undef __FUNCT__ 2611 #define __FUNCT__ "MatMPIBAIJSetPreallocationCSR_MPIBAIJ" 2612 PetscErrorCode MatMPIBAIJSetPreallocationCSR_MPIBAIJ(Mat B,PetscInt bs,const PetscInt ii[],const PetscInt jj[],const PetscScalar V[]) 2613 { 2614 PetscInt m,rstart,cstart,cend; 2615 PetscInt i,j,d,nz,nz_max=0,*d_nnz=0,*o_nnz=0; 2616 const PetscInt *JJ =0; 2617 PetscScalar *values=0; 2618 PetscErrorCode ierr; 2619 2620 PetscFunctionBegin; 2621 ierr = PetscLayoutSetBlockSize(B->rmap,bs);CHKERRQ(ierr); 2622 ierr = PetscLayoutSetBlockSize(B->cmap,bs);CHKERRQ(ierr); 2623 ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); 2624 ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); 2625 ierr = PetscLayoutGetBlockSize(B->rmap,&bs);CHKERRQ(ierr); 2626 m = B->rmap->n/bs; 2627 rstart = B->rmap->rstart/bs; 2628 cstart = B->cmap->rstart/bs; 2629 cend = B->cmap->rend/bs; 2630 2631 if (ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"ii[0] must be 0 but it is %D",ii[0]); 2632 ierr = PetscMalloc2(m,PetscInt,&d_nnz,m,PetscInt,&o_nnz);CHKERRQ(ierr); 2633 for (i=0; i<m; i++) { 2634 nz = ii[i+1] - ii[i]; 2635 if (nz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative number of columns %D",i,nz); 2636 nz_max = PetscMax(nz_max,nz); 2637 JJ = jj + ii[i]; 2638 for (j=0; j<nz; j++) { 2639 if (*JJ >= cstart) break; 2640 JJ++; 2641 } 2642 d = 0; 2643 for (; j<nz; j++) { 2644 if (*JJ++ >= cend) break; 2645 d++; 2646 } 2647 d_nnz[i] = d; 2648 o_nnz[i] = nz - d; 2649 } 2650 ierr = MatMPIBAIJSetPreallocation(B,bs,0,d_nnz,0,o_nnz);CHKERRQ(ierr); 2651 ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr); 2652 2653 values = (PetscScalar*)V; 2654 if (!values) { 2655 ierr = PetscMalloc(bs*bs*nz_max*sizeof(PetscScalar),&values);CHKERRQ(ierr); 2656 ierr = PetscMemzero(values,bs*bs*nz_max*sizeof(PetscScalar));CHKERRQ(ierr); 2657 } 2658 for (i=0; i<m; i++) { 2659 PetscInt row = i + rstart; 2660 PetscInt ncols = ii[i+1] - ii[i]; 2661 const PetscInt *icols = jj + ii[i]; 2662 const PetscScalar *svals = values + (V ? (bs*bs*ii[i]) : 0); 2663 ierr = MatSetValuesBlocked_MPIBAIJ(B,1,&row,ncols,icols,svals,INSERT_VALUES);CHKERRQ(ierr); 2664 } 2665 2666 if (!V) { ierr = PetscFree(values);CHKERRQ(ierr); } 2667 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2668 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2669 ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr); 2670 PetscFunctionReturn(0); 2671 } 2672 2673 #undef __FUNCT__ 2674 #define __FUNCT__ "MatMPIBAIJSetPreallocationCSR" 2675 /*@C 2676 MatMPIBAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in BAIJ format 2677 (the default parallel PETSc format). 2678 2679 Collective on MPI_Comm 2680 2681 Input Parameters: 2682 + A - the matrix 2683 . bs - the block size 2684 . i - the indices into j for the start of each local row (starts with zero) 2685 . j - the column indices for each local row (starts with zero) these must be sorted for each row 2686 - v - optional values in the matrix 2687 2688 Level: developer 2689 2690 .keywords: matrix, aij, compressed row, sparse, parallel 2691 2692 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIBAIJSetPreallocation(), MatCreateAIJ(), MPIAIJ 2693 @*/ 2694 PetscErrorCode MatMPIBAIJSetPreallocationCSR(Mat B,PetscInt bs,const PetscInt i[],const PetscInt j[], const PetscScalar v[]) 2695 { 2696 PetscErrorCode ierr; 2697 2698 PetscFunctionBegin; 2699 PetscValidHeaderSpecific(B,MAT_CLASSID,1); 2700 PetscValidType(B,1); 2701 PetscValidLogicalCollectiveInt(B,bs,2); 2702 ierr = PetscTryMethod(B,"MatMPIBAIJSetPreallocationCSR_C",(Mat,PetscInt,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,bs,i,j,v));CHKERRQ(ierr); 2703 PetscFunctionReturn(0); 2704 } 2705 2706 #undef __FUNCT__ 2707 #define __FUNCT__ "MatMPIBAIJSetPreallocation_MPIBAIJ" 2708 PetscErrorCode MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt *d_nnz,PetscInt o_nz,const PetscInt *o_nnz) 2709 { 2710 Mat_MPIBAIJ *b; 2711 PetscErrorCode ierr; 2712 PetscInt i; 2713 2714 PetscFunctionBegin; 2715 ierr = PetscLayoutSetBlockSize(B->rmap,bs);CHKERRQ(ierr); 2716 ierr = PetscLayoutSetBlockSize(B->cmap,bs);CHKERRQ(ierr); 2717 ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr); 2718 ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr); 2719 ierr = PetscLayoutGetBlockSize(B->rmap,&bs);CHKERRQ(ierr); 2720 2721 if (d_nnz) { 2722 for (i=0; i<B->rmap->n/bs; i++) { 2723 if (d_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %D value %D",i,d_nnz[i]); 2724 } 2725 } 2726 if (o_nnz) { 2727 for (i=0; i<B->rmap->n/bs; i++) { 2728 if (o_nnz[i] < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %D value %D",i,o_nnz[i]); 2729 } 2730 } 2731 2732 b = (Mat_MPIBAIJ*)B->data; 2733 b->bs2 = bs*bs; 2734 b->mbs = B->rmap->n/bs; 2735 b->nbs = B->cmap->n/bs; 2736 b->Mbs = B->rmap->N/bs; 2737 b->Nbs = B->cmap->N/bs; 2738 2739 for (i=0; i<=b->size; i++) { 2740 b->rangebs[i] = B->rmap->range[i]/bs; 2741 } 2742 b->rstartbs = B->rmap->rstart/bs; 2743 b->rendbs = B->rmap->rend/bs; 2744 b->cstartbs = B->cmap->rstart/bs; 2745 b->cendbs = B->cmap->rend/bs; 2746 2747 if (!B->preallocated) { 2748 ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr); 2749 ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr); 2750 ierr = MatSetType(b->A,MATSEQBAIJ);CHKERRQ(ierr); 2751 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr); 2752 ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr); 2753 ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr); 2754 ierr = MatSetType(b->B,MATSEQBAIJ);CHKERRQ(ierr); 2755 ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr); 2756 ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)B),bs,&B->bstash);CHKERRQ(ierr); 2757 } 2758 2759 ierr = MatSeqBAIJSetPreallocation(b->A,bs,d_nz,d_nnz);CHKERRQ(ierr); 2760 ierr = MatSeqBAIJSetPreallocation(b->B,bs,o_nz,o_nnz);CHKERRQ(ierr); 2761 B->preallocated = PETSC_TRUE; 2762 PetscFunctionReturn(0); 2763 } 2764 2765 extern PetscErrorCode MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec); 2766 extern PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal); 2767 2768 #undef __FUNCT__ 2769 #define __FUNCT__ "MatConvert_MPIBAIJ_MPIAdj" 2770 PETSC_EXTERN PetscErrorCode MatConvert_MPIBAIJ_MPIAdj(Mat B, MatType newtype,MatReuse reuse,Mat *adj) 2771 { 2772 Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)B->data; 2773 PetscErrorCode ierr; 2774 Mat_SeqBAIJ *d = (Mat_SeqBAIJ*) b->A->data,*o = (Mat_SeqBAIJ*) b->B->data; 2775 PetscInt M = B->rmap->n/B->rmap->bs,i,*ii,*jj,cnt,j,k,rstart = B->rmap->rstart/B->rmap->bs; 2776 const PetscInt *id = d->i, *jd = d->j, *io = o->i, *jo = o->j, *garray = b->garray; 2777 2778 PetscFunctionBegin; 2779 ierr = PetscMalloc((M+1)*sizeof(PetscInt),&ii);CHKERRQ(ierr); 2780 ii[0] = 0; 2781 for (i=0; i<M; i++) { 2782 if ((id[i+1] - id[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,id[i],id[i+1]); 2783 if ((io[i+1] - io[i]) < 0) SETERRQ3(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Indices wrong %D %D %D",i,io[i],io[i+1]); 2784 ii[i+1] = ii[i] + id[i+1] - id[i] + io[i+1] - io[i]; 2785 /* remove one from count of matrix has diagonal */ 2786 for (j=id[i]; j<id[i+1]; j++) { 2787 if (jd[j] == i) {ii[i+1]--;break;} 2788 } 2789 } 2790 ierr = PetscMalloc(ii[M]*sizeof(PetscInt),&jj);CHKERRQ(ierr); 2791 cnt = 0; 2792 for (i=0; i<M; i++) { 2793 for (j=io[i]; j<io[i+1]; j++) { 2794 if (garray[jo[j]] > rstart) break; 2795 jj[cnt++] = garray[jo[j]]; 2796 } 2797 for (k=id[i]; k<id[i+1]; k++) { 2798 if (jd[k] != i) { 2799 jj[cnt++] = rstart + jd[k]; 2800 } 2801 } 2802 for (; j<io[i+1]; j++) { 2803 jj[cnt++] = garray[jo[j]]; 2804 } 2805 } 2806 ierr = MatCreateMPIAdj(PetscObjectComm((PetscObject)B),M,B->cmap->N/B->rmap->bs,ii,jj,NULL,adj);CHKERRQ(ierr); 2807 PetscFunctionReturn(0); 2808 } 2809 2810 #include <../src/mat/impls/aij/mpi/mpiaij.h> 2811 2812 PETSC_EXTERN PetscErrorCode MatConvert_SeqBAIJ_SeqAIJ(Mat,MatType,MatReuse,Mat*); 2813 2814 #undef __FUNCT__ 2815 #define __FUNCT__ "MatConvert_MPIBAIJ_MPIAIJ" 2816 PETSC_EXTERN PetscErrorCode MatConvert_MPIBAIJ_MPIAIJ(Mat A,MatType newtype,MatReuse reuse,Mat *newmat) 2817 { 2818 PetscErrorCode ierr; 2819 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 2820 Mat B; 2821 Mat_MPIAIJ *b; 2822 2823 PetscFunctionBegin; 2824 if (!A->assembled) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Matrix must be assembled"); 2825 2826 ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr); 2827 ierr = MatSetSizes(B,A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N);CHKERRQ(ierr); 2828 ierr = MatSetType(B,MATMPIAIJ);CHKERRQ(ierr); 2829 ierr = MatSeqAIJSetPreallocation(B,0,NULL);CHKERRQ(ierr); 2830 ierr = MatMPIAIJSetPreallocation(B,0,NULL,0,NULL);CHKERRQ(ierr); 2831 b = (Mat_MPIAIJ*) B->data; 2832 2833 ierr = MatDestroy(&b->A);CHKERRQ(ierr); 2834 ierr = MatDestroy(&b->B);CHKERRQ(ierr); 2835 ierr = MatDisAssemble_MPIBAIJ(A);CHKERRQ(ierr); 2836 ierr = MatConvert_SeqBAIJ_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A);CHKERRQ(ierr); 2837 ierr = MatConvert_SeqBAIJ_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B);CHKERRQ(ierr); 2838 ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2839 ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2840 ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2841 ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2842 if (reuse == MAT_REUSE_MATRIX) { 2843 ierr = MatHeaderReplace(A,B);CHKERRQ(ierr); 2844 } else { 2845 *newmat = B; 2846 } 2847 PetscFunctionReturn(0); 2848 } 2849 2850 #if defined(PETSC_HAVE_MUMPS) 2851 PETSC_EXTERN PetscErrorCode MatGetFactor_baij_mumps(Mat,MatFactorType,Mat*); 2852 #endif 2853 2854 /*MC 2855 MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices. 2856 2857 Options Database Keys: 2858 + -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions() 2859 . -mat_block_size <bs> - set the blocksize used to store the matrix 2860 - -mat_use_hash_table <fact> 2861 2862 Level: beginner 2863 2864 .seealso: MatCreateMPIBAIJ 2865 M*/ 2866 2867 PETSC_EXTERN PetscErrorCode MatConvert_MPIBAIJ_MPIBSTRM(Mat,MatType,MatReuse,Mat*); 2868 2869 #undef __FUNCT__ 2870 #define __FUNCT__ "MatCreate_MPIBAIJ" 2871 PETSC_EXTERN PetscErrorCode MatCreate_MPIBAIJ(Mat B) 2872 { 2873 Mat_MPIBAIJ *b; 2874 PetscErrorCode ierr; 2875 PetscBool flg; 2876 2877 PetscFunctionBegin; 2878 ierr = PetscNewLog(B,Mat_MPIBAIJ,&b);CHKERRQ(ierr); 2879 B->data = (void*)b; 2880 2881 ierr = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr); 2882 B->assembled = PETSC_FALSE; 2883 2884 B->insertmode = NOT_SET_VALUES; 2885 ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);CHKERRQ(ierr); 2886 ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&b->size);CHKERRQ(ierr); 2887 2888 /* build local table of row and column ownerships */ 2889 ierr = PetscMalloc((b->size+1)*sizeof(PetscInt),&b->rangebs);CHKERRQ(ierr); 2890 2891 /* build cache for off array entries formed */ 2892 ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);CHKERRQ(ierr); 2893 2894 b->donotstash = PETSC_FALSE; 2895 b->colmap = NULL; 2896 b->garray = NULL; 2897 b->roworiented = PETSC_TRUE; 2898 2899 /* stuff used in block assembly */ 2900 b->barray = 0; 2901 2902 /* stuff used for matrix vector multiply */ 2903 b->lvec = 0; 2904 b->Mvctx = 0; 2905 2906 /* stuff for MatGetRow() */ 2907 b->rowindices = 0; 2908 b->rowvalues = 0; 2909 b->getrowactive = PETSC_FALSE; 2910 2911 /* hash table stuff */ 2912 b->ht = 0; 2913 b->hd = 0; 2914 b->ht_size = 0; 2915 b->ht_flag = PETSC_FALSE; 2916 b->ht_fact = 0; 2917 b->ht_total_ct = 0; 2918 b->ht_insert_ct = 0; 2919 2920 /* stuff for MatGetSubMatrices_MPIBAIJ_local() */ 2921 b->ijonly = PETSC_FALSE; 2922 2923 ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)B),NULL,"Options for loading MPIBAIJ matrix 1","Mat");CHKERRQ(ierr); 2924 ierr = PetscOptionsBool("-mat_use_hash_table","Use hash table to save memory in constructing matrix","MatSetOption",PETSC_FALSE,&flg,NULL);CHKERRQ(ierr); 2925 if (flg) { 2926 PetscReal fact = 1.39; 2927 ierr = MatSetOption(B,MAT_USE_HASH_TABLE,PETSC_TRUE);CHKERRQ(ierr); 2928 ierr = PetscOptionsReal("-mat_use_hash_table","Use hash table factor","MatMPIBAIJSetHashTableFactor",fact,&fact,NULL);CHKERRQ(ierr); 2929 if (fact <= 1.0) fact = 1.39; 2930 ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr); 2931 ierr = PetscInfo1(B,"Hash table Factor used %5.2f\n",fact);CHKERRQ(ierr); 2932 } 2933 ierr = PetscOptionsEnd();CHKERRQ(ierr); 2934 2935 #if defined(PETSC_HAVE_MUMPS) 2936 ierr = PetscObjectComposeFunction((PetscObject)B,"MatGetFactor_mumps_C",MatGetFactor_baij_mumps);CHKERRQ(ierr); 2937 #endif 2938 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpiadj_C",MatConvert_MPIBAIJ_MPIAdj);CHKERRQ(ierr); 2939 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpiaij_C",MatConvert_MPIBAIJ_MPIAIJ);CHKERRQ(ierr); 2940 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpisbaij_C",MatConvert_MPIBAIJ_MPISBAIJ);CHKERRQ(ierr); 2941 ierr = PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIBAIJ);CHKERRQ(ierr); 2942 ierr = PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIBAIJ);CHKERRQ(ierr); 2943 ierr = PetscObjectComposeFunction((PetscObject)B,"MatGetDiagonalBlock_C",MatGetDiagonalBlock_MPIBAIJ);CHKERRQ(ierr); 2944 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIBAIJSetPreallocation_C",MatMPIBAIJSetPreallocation_MPIBAIJ);CHKERRQ(ierr); 2945 ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIBAIJSetPreallocationCSR_C",MatMPIBAIJSetPreallocationCSR_MPIBAIJ);CHKERRQ(ierr); 2946 ierr = PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIBAIJ);CHKERRQ(ierr); 2947 ierr = PetscObjectComposeFunction((PetscObject)B,"MatSetHashTableFactor_C",MatSetHashTableFactor_MPIBAIJ);CHKERRQ(ierr); 2948 ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpibaij_mpibstrm_C",MatConvert_MPIBAIJ_MPIBSTRM);CHKERRQ(ierr); 2949 ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIBAIJ);CHKERRQ(ierr); 2950 PetscFunctionReturn(0); 2951 } 2952 2953 /*MC 2954 MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices. 2955 2956 This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator, 2957 and MATMPIBAIJ otherwise. 2958 2959 Options Database Keys: 2960 . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions() 2961 2962 Level: beginner 2963 2964 .seealso: MatCreateBAIJ(),MATSEQBAIJ,MATMPIBAIJ, MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR() 2965 M*/ 2966 2967 #undef __FUNCT__ 2968 #define __FUNCT__ "MatMPIBAIJSetPreallocation" 2969 /*@C 2970 MatMPIBAIJSetPreallocation - Allocates memory for a sparse parallel matrix in block AIJ format 2971 (block compressed row). For good matrix assembly performance 2972 the user should preallocate the matrix storage by setting the parameters 2973 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 2974 performance can be increased by more than a factor of 50. 2975 2976 Collective on Mat 2977 2978 Input Parameters: 2979 + A - the matrix 2980 . bs - size of block 2981 . d_nz - number of block nonzeros per block row in diagonal portion of local 2982 submatrix (same for all local rows) 2983 . d_nnz - array containing the number of block nonzeros in the various block rows 2984 of the in diagonal portion of the local (possibly different for each block 2985 row) or NULL. If you plan to factor the matrix you must leave room for the diagonal entry and 2986 set it even if it is zero. 2987 . o_nz - number of block nonzeros per block row in the off-diagonal portion of local 2988 submatrix (same for all local rows). 2989 - o_nnz - array containing the number of nonzeros in the various block rows of the 2990 off-diagonal portion of the local submatrix (possibly different for 2991 each block row) or NULL. 2992 2993 If the *_nnz parameter is given then the *_nz parameter is ignored 2994 2995 Options Database Keys: 2996 + -mat_block_size - size of the blocks to use 2997 - -mat_use_hash_table <fact> 2998 2999 Notes: 3000 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor 3001 than it must be used on all processors that share the object for that argument. 3002 3003 Storage Information: 3004 For a square global matrix we define each processor's diagonal portion 3005 to be its local rows and the corresponding columns (a square submatrix); 3006 each processor's off-diagonal portion encompasses the remainder of the 3007 local matrix (a rectangular submatrix). 3008 3009 The user can specify preallocated storage for the diagonal part of 3010 the local submatrix with either d_nz or d_nnz (not both). Set 3011 d_nz=PETSC_DEFAULT and d_nnz=NULL for PETSc to control dynamic 3012 memory allocation. Likewise, specify preallocated storage for the 3013 off-diagonal part of the local submatrix with o_nz or o_nnz (not both). 3014 3015 Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In 3016 the figure below we depict these three local rows and all columns (0-11). 3017 3018 .vb 3019 0 1 2 3 4 5 6 7 8 9 10 11 3020 -------------------------- 3021 row 3 |o o o d d d o o o o o o 3022 row 4 |o o o d d d o o o o o o 3023 row 5 |o o o d d d o o o o o o 3024 -------------------------- 3025 .ve 3026 3027 Thus, any entries in the d locations are stored in the d (diagonal) 3028 submatrix, and any entries in the o locations are stored in the 3029 o (off-diagonal) submatrix. Note that the d and the o submatrices are 3030 stored simply in the MATSEQBAIJ format for compressed row storage. 3031 3032 Now d_nz should indicate the number of block nonzeros per row in the d matrix, 3033 and o_nz should indicate the number of block nonzeros per row in the o matrix. 3034 In general, for PDE problems in which most nonzeros are near the diagonal, 3035 one expects d_nz >> o_nz. For large problems you MUST preallocate memory 3036 or you will get TERRIBLE performance; see the users' manual chapter on 3037 matrices. 3038 3039 You can call MatGetInfo() to get information on how effective the preallocation was; 3040 for example the fields mallocs,nz_allocated,nz_used,nz_unneeded; 3041 You can also run with the option -info and look for messages with the string 3042 malloc in them to see if additional memory allocation was needed. 3043 3044 Level: intermediate 3045 3046 .keywords: matrix, block, aij, compressed row, sparse, parallel 3047 3048 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocationCSR(), PetscSplitOwnership() 3049 @*/ 3050 PetscErrorCode MatMPIBAIJSetPreallocation(Mat B,PetscInt bs,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[]) 3051 { 3052 PetscErrorCode ierr; 3053 3054 PetscFunctionBegin; 3055 PetscValidHeaderSpecific(B,MAT_CLASSID,1); 3056 PetscValidType(B,1); 3057 PetscValidLogicalCollectiveInt(B,bs,2); 3058 ierr = PetscTryMethod(B,"MatMPIBAIJSetPreallocation_C",(Mat,PetscInt,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,bs,d_nz,d_nnz,o_nz,o_nnz));CHKERRQ(ierr); 3059 PetscFunctionReturn(0); 3060 } 3061 3062 #undef __FUNCT__ 3063 #define __FUNCT__ "MatCreateBAIJ" 3064 /*@C 3065 MatCreateBAIJ - Creates a sparse parallel matrix in block AIJ format 3066 (block compressed row). For good matrix assembly performance 3067 the user should preallocate the matrix storage by setting the parameters 3068 d_nz (or d_nnz) and o_nz (or o_nnz). By setting these parameters accurately, 3069 performance can be increased by more than a factor of 50. 3070 3071 Collective on MPI_Comm 3072 3073 Input Parameters: 3074 + comm - MPI communicator 3075 . bs - size of blockk 3076 . m - number of local rows (or PETSC_DECIDE to have calculated if M is given) 3077 This value should be the same as the local size used in creating the 3078 y vector for the matrix-vector product y = Ax. 3079 . n - number of local columns (or PETSC_DECIDE to have calculated if N is given) 3080 This value should be the same as the local size used in creating the 3081 x vector for the matrix-vector product y = Ax. 3082 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 3083 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 3084 . d_nz - number of nonzero blocks per block row in diagonal portion of local 3085 submatrix (same for all local rows) 3086 . d_nnz - array containing the number of nonzero blocks in the various block rows 3087 of the in diagonal portion of the local (possibly different for each block 3088 row) or NULL. If you plan to factor the matrix you must leave room for the diagonal entry 3089 and set it even if it is zero. 3090 . o_nz - number of nonzero blocks per block row in the off-diagonal portion of local 3091 submatrix (same for all local rows). 3092 - o_nnz - array containing the number of nonzero blocks in the various block rows of the 3093 off-diagonal portion of the local submatrix (possibly different for 3094 each block row) or NULL. 3095 3096 Output Parameter: 3097 . A - the matrix 3098 3099 Options Database Keys: 3100 + -mat_block_size - size of the blocks to use 3101 - -mat_use_hash_table <fact> 3102 3103 It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(), 3104 MatXXXXSetPreallocation() paradgm instead of this routine directly. 3105 [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation] 3106 3107 Notes: 3108 If the *_nnz parameter is given then the *_nz parameter is ignored 3109 3110 A nonzero block is any block that as 1 or more nonzeros in it 3111 3112 The user MUST specify either the local or global matrix dimensions 3113 (possibly both). 3114 3115 If PETSC_DECIDE or PETSC_DETERMINE is used for a particular argument on one processor 3116 than it must be used on all processors that share the object for that argument. 3117 3118 Storage Information: 3119 For a square global matrix we define each processor's diagonal portion 3120 to be its local rows and the corresponding columns (a square submatrix); 3121 each processor's off-diagonal portion encompasses the remainder of the 3122 local matrix (a rectangular submatrix). 3123 3124 The user can specify preallocated storage for the diagonal part of 3125 the local submatrix with either d_nz or d_nnz (not both). Set 3126 d_nz=PETSC_DEFAULT and d_nnz=NULL for PETSc to control dynamic 3127 memory allocation. Likewise, specify preallocated storage for the 3128 off-diagonal part of the local submatrix with o_nz or o_nnz (not both). 3129 3130 Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In 3131 the figure below we depict these three local rows and all columns (0-11). 3132 3133 .vb 3134 0 1 2 3 4 5 6 7 8 9 10 11 3135 -------------------------- 3136 row 3 |o o o d d d o o o o o o 3137 row 4 |o o o d d d o o o o o o 3138 row 5 |o o o d d d o o o o o o 3139 -------------------------- 3140 .ve 3141 3142 Thus, any entries in the d locations are stored in the d (diagonal) 3143 submatrix, and any entries in the o locations are stored in the 3144 o (off-diagonal) submatrix. Note that the d and the o submatrices are 3145 stored simply in the MATSEQBAIJ format for compressed row storage. 3146 3147 Now d_nz should indicate the number of block nonzeros per row in the d matrix, 3148 and o_nz should indicate the number of block nonzeros per row in the o matrix. 3149 In general, for PDE problems in which most nonzeros are near the diagonal, 3150 one expects d_nz >> o_nz. For large problems you MUST preallocate memory 3151 or you will get TERRIBLE performance; see the users' manual chapter on 3152 matrices. 3153 3154 Level: intermediate 3155 3156 .keywords: matrix, block, aij, compressed row, sparse, parallel 3157 3158 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateBAIJ(), MatMPIBAIJSetPreallocation(), MatMPIBAIJSetPreallocationCSR() 3159 @*/ 3160 PetscErrorCode MatCreateBAIJ(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A) 3161 { 3162 PetscErrorCode ierr; 3163 PetscMPIInt size; 3164 3165 PetscFunctionBegin; 3166 ierr = MatCreate(comm,A);CHKERRQ(ierr); 3167 ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr); 3168 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3169 if (size > 1) { 3170 ierr = MatSetType(*A,MATMPIBAIJ);CHKERRQ(ierr); 3171 ierr = MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr); 3172 } else { 3173 ierr = MatSetType(*A,MATSEQBAIJ);CHKERRQ(ierr); 3174 ierr = MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr); 3175 } 3176 PetscFunctionReturn(0); 3177 } 3178 3179 #undef __FUNCT__ 3180 #define __FUNCT__ "MatDuplicate_MPIBAIJ" 3181 static PetscErrorCode MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat) 3182 { 3183 Mat mat; 3184 Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data; 3185 PetscErrorCode ierr; 3186 PetscInt len=0; 3187 3188 PetscFunctionBegin; 3189 *newmat = 0; 3190 ierr = MatCreate(PetscObjectComm((PetscObject)matin),&mat);CHKERRQ(ierr); 3191 ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr); 3192 ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr); 3193 ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr); 3194 3195 mat->factortype = matin->factortype; 3196 mat->preallocated = PETSC_TRUE; 3197 mat->assembled = PETSC_TRUE; 3198 mat->insertmode = NOT_SET_VALUES; 3199 3200 a = (Mat_MPIBAIJ*)mat->data; 3201 mat->rmap->bs = matin->rmap->bs; 3202 a->bs2 = oldmat->bs2; 3203 a->mbs = oldmat->mbs; 3204 a->nbs = oldmat->nbs; 3205 a->Mbs = oldmat->Mbs; 3206 a->Nbs = oldmat->Nbs; 3207 3208 ierr = PetscLayoutReference(matin->rmap,&mat->rmap);CHKERRQ(ierr); 3209 ierr = PetscLayoutReference(matin->cmap,&mat->cmap);CHKERRQ(ierr); 3210 3211 a->size = oldmat->size; 3212 a->rank = oldmat->rank; 3213 a->donotstash = oldmat->donotstash; 3214 a->roworiented = oldmat->roworiented; 3215 a->rowindices = 0; 3216 a->rowvalues = 0; 3217 a->getrowactive = PETSC_FALSE; 3218 a->barray = 0; 3219 a->rstartbs = oldmat->rstartbs; 3220 a->rendbs = oldmat->rendbs; 3221 a->cstartbs = oldmat->cstartbs; 3222 a->cendbs = oldmat->cendbs; 3223 3224 /* hash table stuff */ 3225 a->ht = 0; 3226 a->hd = 0; 3227 a->ht_size = 0; 3228 a->ht_flag = oldmat->ht_flag; 3229 a->ht_fact = oldmat->ht_fact; 3230 a->ht_total_ct = 0; 3231 a->ht_insert_ct = 0; 3232 3233 ierr = PetscMemcpy(a->rangebs,oldmat->rangebs,(a->size+1)*sizeof(PetscInt));CHKERRQ(ierr); 3234 if (oldmat->colmap) { 3235 #if defined(PETSC_USE_CTABLE) 3236 ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr); 3237 #else 3238 ierr = PetscMalloc((a->Nbs)*sizeof(PetscInt),&a->colmap);CHKERRQ(ierr); 3239 ierr = PetscLogObjectMemory((PetscObject)mat,(a->Nbs)*sizeof(PetscInt));CHKERRQ(ierr); 3240 ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(PetscInt));CHKERRQ(ierr); 3241 #endif 3242 } else a->colmap = 0; 3243 3244 if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) { 3245 ierr = PetscMalloc(len*sizeof(PetscInt),&a->garray);CHKERRQ(ierr); 3246 ierr = PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));CHKERRQ(ierr); 3247 ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); 3248 } else a->garray = 0; 3249 3250 ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)matin),matin->rmap->bs,&mat->bstash);CHKERRQ(ierr); 3251 ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr); 3252 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);CHKERRQ(ierr); 3253 ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr); 3254 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);CHKERRQ(ierr); 3255 3256 ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr); 3257 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);CHKERRQ(ierr); 3258 ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr); 3259 ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);CHKERRQ(ierr); 3260 ierr = PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr); 3261 *newmat = mat; 3262 PetscFunctionReturn(0); 3263 } 3264 3265 #undef __FUNCT__ 3266 #define __FUNCT__ "MatLoad_MPIBAIJ" 3267 PetscErrorCode MatLoad_MPIBAIJ(Mat newmat,PetscViewer viewer) 3268 { 3269 PetscErrorCode ierr; 3270 int fd; 3271 PetscInt i,nz,j,rstart,rend; 3272 PetscScalar *vals,*buf; 3273 MPI_Comm comm; 3274 MPI_Status status; 3275 PetscMPIInt rank,size,maxnz; 3276 PetscInt header[4],*rowlengths = 0,M,N,m,*rowners,*cols; 3277 PetscInt *locrowlens = NULL,*procsnz = NULL,*browners = NULL; 3278 PetscInt jj,*mycols,*ibuf,bs=1,Mbs,mbs,extra_rows,mmax; 3279 PetscMPIInt tag = ((PetscObject)viewer)->tag; 3280 PetscInt *dlens = NULL,*odlens = NULL,*mask = NULL,*masked1 = NULL,*masked2 = NULL,rowcount,odcount; 3281 PetscInt dcount,kmax,k,nzcount,tmp,mend,sizesset=1,grows,gcols; 3282 3283 PetscFunctionBegin; 3284 ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr); 3285 ierr = PetscOptionsBegin(comm,NULL,"Options for loading MPIBAIJ matrix 2","Mat");CHKERRQ(ierr); 3286 ierr = PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);CHKERRQ(ierr); 3287 ierr = PetscOptionsEnd();CHKERRQ(ierr); 3288 3289 ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr); 3290 ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr); 3291 if (!rank) { 3292 ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr); 3293 ierr = PetscBinaryRead(fd,(char*)header,4,PETSC_INT);CHKERRQ(ierr); 3294 if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object"); 3295 } 3296 3297 if (newmat->rmap->n < 0 && newmat->rmap->N < 0 && newmat->cmap->n < 0 && newmat->cmap->N < 0) sizesset = 0; 3298 3299 ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr); 3300 M = header[1]; N = header[2]; 3301 3302 /* If global rows/cols are set to PETSC_DECIDE, set it to the sizes given in the file */ 3303 if (sizesset && newmat->rmap->N < 0) newmat->rmap->N = M; 3304 if (sizesset && newmat->cmap->N < 0) newmat->cmap->N = N; 3305 3306 /* If global sizes are set, check if they are consistent with that given in the file */ 3307 if (sizesset) { 3308 ierr = MatGetSize(newmat,&grows,&gcols);CHKERRQ(ierr); 3309 } 3310 if (sizesset && newmat->rmap->N != grows) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows:Matrix in file has (%d) and input matrix has (%d)",M,grows); 3311 if (sizesset && newmat->cmap->N != gcols) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of cols:Matrix in file has (%d) and input matrix has (%d)",N,gcols); 3312 3313 if (M != N) SETERRQ(PetscObjectComm((PetscObject)viewer),PETSC_ERR_SUP,"Can only do square matrices"); 3314 3315 /* 3316 This code adds extra rows to make sure the number of rows is 3317 divisible by the blocksize 3318 */ 3319 Mbs = M/bs; 3320 extra_rows = bs - M + bs*Mbs; 3321 if (extra_rows == bs) extra_rows = 0; 3322 else Mbs++; 3323 if (extra_rows && !rank) { 3324 ierr = PetscInfo(viewer,"Padding loaded matrix to match blocksize\n");CHKERRQ(ierr); 3325 } 3326 3327 /* determine ownership of all rows */ 3328 if (newmat->rmap->n < 0) { /* PETSC_DECIDE */ 3329 mbs = Mbs/size + ((Mbs % size) > rank); 3330 m = mbs*bs; 3331 } else { /* User set */ 3332 m = newmat->rmap->n; 3333 mbs = m/bs; 3334 } 3335 ierr = PetscMalloc2(size+1,PetscInt,&rowners,size+1,PetscInt,&browners);CHKERRQ(ierr); 3336 ierr = MPI_Allgather(&mbs,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr); 3337 3338 /* process 0 needs enough room for process with most rows */ 3339 if (!rank) { 3340 mmax = rowners[1]; 3341 for (i=2; i<=size; i++) { 3342 mmax = PetscMax(mmax,rowners[i]); 3343 } 3344 mmax*=bs; 3345 } else mmax = -1; /* unused, but compiler warns anyway */ 3346 3347 rowners[0] = 0; 3348 for (i=2; i<=size; i++) rowners[i] += rowners[i-1]; 3349 for (i=0; i<=size; i++) browners[i] = rowners[i]*bs; 3350 rstart = rowners[rank]; 3351 rend = rowners[rank+1]; 3352 3353 /* distribute row lengths to all processors */ 3354 ierr = PetscMalloc(m*sizeof(PetscInt),&locrowlens);CHKERRQ(ierr); 3355 if (!rank) { 3356 mend = m; 3357 if (size == 1) mend = mend - extra_rows; 3358 ierr = PetscBinaryRead(fd,locrowlens,mend,PETSC_INT);CHKERRQ(ierr); 3359 for (j=mend; j<m; j++) locrowlens[j] = 1; 3360 ierr = PetscMalloc(mmax*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr); 3361 ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr); 3362 ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr); 3363 for (j=0; j<m; j++) { 3364 procsnz[0] += locrowlens[j]; 3365 } 3366 for (i=1; i<size; i++) { 3367 mend = browners[i+1] - browners[i]; 3368 if (i == size-1) mend = mend - extra_rows; 3369 ierr = PetscBinaryRead(fd,rowlengths,mend,PETSC_INT);CHKERRQ(ierr); 3370 for (j=mend; j<browners[i+1] - browners[i]; j++) rowlengths[j] = 1; 3371 /* calculate the number of nonzeros on each processor */ 3372 for (j=0; j<browners[i+1]-browners[i]; j++) { 3373 procsnz[i] += rowlengths[j]; 3374 } 3375 ierr = MPI_Send(rowlengths,browners[i+1]-browners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr); 3376 } 3377 ierr = PetscFree(rowlengths);CHKERRQ(ierr); 3378 } else { 3379 ierr = MPI_Recv(locrowlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 3380 } 3381 3382 if (!rank) { 3383 /* determine max buffer needed and allocate it */ 3384 maxnz = procsnz[0]; 3385 for (i=1; i<size; i++) { 3386 maxnz = PetscMax(maxnz,procsnz[i]); 3387 } 3388 ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr); 3389 3390 /* read in my part of the matrix column indices */ 3391 nz = procsnz[0]; 3392 ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);CHKERRQ(ierr); 3393 mycols = ibuf; 3394 if (size == 1) nz -= extra_rows; 3395 ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr); 3396 if (size == 1) { 3397 for (i=0; i< extra_rows; i++) mycols[nz+i] = M+i; 3398 } 3399 3400 /* read in every ones (except the last) and ship off */ 3401 for (i=1; i<size-1; i++) { 3402 nz = procsnz[i]; 3403 ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr); 3404 ierr = MPI_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr); 3405 } 3406 /* read in the stuff for the last proc */ 3407 if (size != 1) { 3408 nz = procsnz[size-1] - extra_rows; /* the extra rows are not on the disk */ 3409 ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr); 3410 for (i=0; i<extra_rows; i++) cols[nz+i] = M+i; 3411 ierr = MPI_Send(cols,nz+extra_rows,MPIU_INT,size-1,tag,comm);CHKERRQ(ierr); 3412 } 3413 ierr = PetscFree(cols);CHKERRQ(ierr); 3414 } else { 3415 /* determine buffer space needed for message */ 3416 nz = 0; 3417 for (i=0; i<m; i++) { 3418 nz += locrowlens[i]; 3419 } 3420 ierr = PetscMalloc((nz+1)*sizeof(PetscInt),&ibuf);CHKERRQ(ierr); 3421 mycols = ibuf; 3422 /* receive message of column indices*/ 3423 ierr = MPI_Recv(mycols,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr); 3424 ierr = MPI_Get_count(&status,MPIU_INT,&maxnz);CHKERRQ(ierr); 3425 if (maxnz != nz) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file"); 3426 } 3427 3428 /* loop over local rows, determining number of off diagonal entries */ 3429 ierr = PetscMalloc2(rend-rstart,PetscInt,&dlens,rend-rstart,PetscInt,&odlens);CHKERRQ(ierr); 3430 ierr = PetscMalloc3(Mbs,PetscInt,&mask,Mbs,PetscInt,&masked1,Mbs,PetscInt,&masked2);CHKERRQ(ierr); 3431 ierr = PetscMemzero(mask,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3432 ierr = PetscMemzero(masked1,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3433 ierr = PetscMemzero(masked2,Mbs*sizeof(PetscInt));CHKERRQ(ierr); 3434 rowcount = 0; nzcount = 0; 3435 for (i=0; i<mbs; i++) { 3436 dcount = 0; 3437 odcount = 0; 3438 for (j=0; j<bs; j++) { 3439 kmax = locrowlens[rowcount]; 3440 for (k=0; k<kmax; k++) { 3441 tmp = mycols[nzcount++]/bs; 3442 if (!mask[tmp]) { 3443 mask[tmp] = 1; 3444 if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; 3445 else masked1[dcount++] = tmp; 3446 } 3447 } 3448 rowcount++; 3449 } 3450 3451 dlens[i] = dcount; 3452 odlens[i] = odcount; 3453 3454 /* zero out the mask elements we set */ 3455 for (j=0; j<dcount; j++) mask[masked1[j]] = 0; 3456 for (j=0; j<odcount; j++) mask[masked2[j]] = 0; 3457 } 3458 3459 3460 if (!sizesset) { 3461 ierr = MatSetSizes(newmat,m,m,M+extra_rows,N+extra_rows);CHKERRQ(ierr); 3462 } 3463 ierr = MatMPIBAIJSetPreallocation(newmat,bs,0,dlens,0,odlens);CHKERRQ(ierr); 3464 3465 if (!rank) { 3466 ierr = PetscMalloc((maxnz+1)*sizeof(PetscScalar),&buf);CHKERRQ(ierr); 3467 /* read in my part of the matrix numerical values */ 3468 nz = procsnz[0]; 3469 vals = buf; 3470 mycols = ibuf; 3471 if (size == 1) nz -= extra_rows; 3472 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3473 if (size == 1) { 3474 for (i=0; i< extra_rows; i++) vals[nz+i] = 1.0; 3475 } 3476 3477 /* insert into matrix */ 3478 jj = rstart*bs; 3479 for (i=0; i<m; i++) { 3480 ierr = MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr); 3481 mycols += locrowlens[i]; 3482 vals += locrowlens[i]; 3483 jj++; 3484 } 3485 /* read in other processors (except the last one) and ship out */ 3486 for (i=1; i<size-1; i++) { 3487 nz = procsnz[i]; 3488 vals = buf; 3489 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3490 ierr = MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newmat)->tag,comm);CHKERRQ(ierr); 3491 } 3492 /* the last proc */ 3493 if (size != 1) { 3494 nz = procsnz[i] - extra_rows; 3495 vals = buf; 3496 ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr); 3497 for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0; 3498 ierr = MPIULong_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,((PetscObject)newmat)->tag,comm);CHKERRQ(ierr); 3499 } 3500 ierr = PetscFree(procsnz);CHKERRQ(ierr); 3501 } else { 3502 /* receive numeric values */ 3503 ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&buf);CHKERRQ(ierr); 3504 3505 /* receive message of values*/ 3506 vals = buf; 3507 mycols = ibuf; 3508 ierr = MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newmat)->tag,comm);CHKERRQ(ierr); 3509 3510 /* insert into matrix */ 3511 jj = rstart*bs; 3512 for (i=0; i<m; i++) { 3513 ierr = MatSetValues_MPIBAIJ(newmat,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr); 3514 mycols += locrowlens[i]; 3515 vals += locrowlens[i]; 3516 jj++; 3517 } 3518 } 3519 ierr = PetscFree(locrowlens);CHKERRQ(ierr); 3520 ierr = PetscFree(buf);CHKERRQ(ierr); 3521 ierr = PetscFree(ibuf);CHKERRQ(ierr); 3522 ierr = PetscFree2(rowners,browners);CHKERRQ(ierr); 3523 ierr = PetscFree2(dlens,odlens);CHKERRQ(ierr); 3524 ierr = PetscFree3(mask,masked1,masked2);CHKERRQ(ierr); 3525 ierr = MatAssemblyBegin(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3526 ierr = MatAssemblyEnd(newmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 3527 PetscFunctionReturn(0); 3528 } 3529 3530 #undef __FUNCT__ 3531 #define __FUNCT__ "MatMPIBAIJSetHashTableFactor" 3532 /*@ 3533 MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable. 3534 3535 Input Parameters: 3536 . mat - the matrix 3537 . fact - factor 3538 3539 Not Collective, each process can use a different factor 3540 3541 Level: advanced 3542 3543 Notes: 3544 This can also be set by the command line option: -mat_use_hash_table <fact> 3545 3546 .keywords: matrix, hashtable, factor, HT 3547 3548 .seealso: MatSetOption() 3549 @*/ 3550 PetscErrorCode MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact) 3551 { 3552 PetscErrorCode ierr; 3553 3554 PetscFunctionBegin; 3555 ierr = PetscTryMethod(mat,"MatSetHashTableFactor_C",(Mat,PetscReal),(mat,fact));CHKERRQ(ierr); 3556 PetscFunctionReturn(0); 3557 } 3558 3559 #undef __FUNCT__ 3560 #define __FUNCT__ "MatSetHashTableFactor_MPIBAIJ" 3561 PetscErrorCode MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact) 3562 { 3563 Mat_MPIBAIJ *baij; 3564 3565 PetscFunctionBegin; 3566 baij = (Mat_MPIBAIJ*)mat->data; 3567 baij->ht_fact = fact; 3568 PetscFunctionReturn(0); 3569 } 3570 3571 #undef __FUNCT__ 3572 #define __FUNCT__ "MatMPIBAIJGetSeqBAIJ" 3573 PetscErrorCode MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[]) 3574 { 3575 Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data; 3576 3577 PetscFunctionBegin; 3578 *Ad = a->A; 3579 *Ao = a->B; 3580 *colmap = a->garray; 3581 PetscFunctionReturn(0); 3582 } 3583 3584 /* 3585 Special version for direct calls from Fortran (to eliminate two function call overheads 3586 */ 3587 #if defined(PETSC_HAVE_FORTRAN_CAPS) 3588 #define matmpibaijsetvaluesblocked_ MATMPIBAIJSETVALUESBLOCKED 3589 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE) 3590 #define matmpibaijsetvaluesblocked_ matmpibaijsetvaluesblocked 3591 #endif 3592 3593 #undef __FUNCT__ 3594 #define __FUNCT__ "matmpibiajsetvaluesblocked" 3595 /*@C 3596 MatMPIBAIJSetValuesBlocked - Direct Fortran call to replace call to MatSetValuesBlocked() 3597 3598 Collective on Mat 3599 3600 Input Parameters: 3601 + mat - the matrix 3602 . min - number of input rows 3603 . im - input rows 3604 . nin - number of input columns 3605 . in - input columns 3606 . v - numerical values input 3607 - addvin - INSERT_VALUES or ADD_VALUES 3608 3609 Notes: This has a complete copy of MatSetValuesBlocked_MPIBAIJ() which is terrible code un-reuse. 3610 3611 Level: advanced 3612 3613 .seealso: MatSetValuesBlocked() 3614 @*/ 3615 PetscErrorCode matmpibaijsetvaluesblocked_(Mat *matin,PetscInt *min,const PetscInt im[],PetscInt *nin,const PetscInt in[],const MatScalar v[],InsertMode *addvin) 3616 { 3617 /* convert input arguments to C version */ 3618 Mat mat = *matin; 3619 PetscInt m = *min, n = *nin; 3620 InsertMode addv = *addvin; 3621 3622 Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data; 3623 const MatScalar *value; 3624 MatScalar *barray = baij->barray; 3625 PetscBool roworiented = baij->roworiented; 3626 PetscErrorCode ierr; 3627 PetscInt i,j,ii,jj,row,col,rstart=baij->rstartbs; 3628 PetscInt rend=baij->rendbs,cstart=baij->cstartbs,stepval; 3629 PetscInt cend=baij->cendbs,bs=mat->rmap->bs,bs2=baij->bs2; 3630 3631 PetscFunctionBegin; 3632 /* tasks normally handled by MatSetValuesBlocked() */ 3633 if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv; 3634 #if defined(PETSC_USE_DEBUG) 3635 else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values"); 3636 if (mat->factortype) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Not for factored matrix"); 3637 #endif 3638 if (mat->assembled) { 3639 mat->was_assembled = PETSC_TRUE; 3640 mat->assembled = PETSC_FALSE; 3641 } 3642 ierr = PetscLogEventBegin(MAT_SetValues,mat,0,0,0);CHKERRQ(ierr); 3643 3644 3645 if (!barray) { 3646 ierr = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr); 3647 baij->barray = barray; 3648 } 3649 3650 if (roworiented) stepval = (n-1)*bs; 3651 else stepval = (m-1)*bs; 3652 3653 for (i=0; i<m; i++) { 3654 if (im[i] < 0) continue; 3655 #if defined(PETSC_USE_DEBUG) 3656 if (im[i] >= baij->Mbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %D max %D",im[i],baij->Mbs-1); 3657 #endif 3658 if (im[i] >= rstart && im[i] < rend) { 3659 row = im[i] - rstart; 3660 for (j=0; j<n; j++) { 3661 /* If NumCol = 1 then a copy is not required */ 3662 if ((roworiented) && (n == 1)) { 3663 barray = (MatScalar*)v + i*bs2; 3664 } else if ((!roworiented) && (m == 1)) { 3665 barray = (MatScalar*)v + j*bs2; 3666 } else { /* Here a copy is required */ 3667 if (roworiented) { 3668 value = v + i*(stepval+bs)*bs + j*bs; 3669 } else { 3670 value = v + j*(stepval+bs)*bs + i*bs; 3671 } 3672 for (ii=0; ii<bs; ii++,value+=stepval) { 3673 for (jj=0; jj<bs; jj++) { 3674 *barray++ = *value++; 3675 } 3676 } 3677 barray -=bs2; 3678 } 3679 3680 if (in[j] >= cstart && in[j] < cend) { 3681 col = in[j] - cstart; 3682 ierr = MatSetValuesBlocked_SeqBAIJ(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 3683 } else if (in[j] < 0) continue; 3684 #if defined(PETSC_USE_DEBUG) 3685 else if (in[j] >= baij->Nbs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %D max %D",in[j],baij->Nbs-1); 3686 #endif 3687 else { 3688 if (mat->was_assembled) { 3689 if (!baij->colmap) { 3690 ierr = MatCreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr); 3691 } 3692 3693 #if defined(PETSC_USE_DEBUG) 3694 #if defined(PETSC_USE_CTABLE) 3695 { PetscInt data; 3696 ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr); 3697 if ((data - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap"); 3698 } 3699 #else 3700 if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Incorrect colmap"); 3701 #endif 3702 #endif 3703 #if defined(PETSC_USE_CTABLE) 3704 ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr); 3705 col = (col - 1)/bs; 3706 #else 3707 col = (baij->colmap[in[j]] - 1)/bs; 3708 #endif 3709 if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) { 3710 ierr = MatDisAssemble_MPIBAIJ(mat);CHKERRQ(ierr); 3711 col = in[j]; 3712 } 3713 } else col = in[j]; 3714 ierr = MatSetValuesBlocked_SeqBAIJ(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr); 3715 } 3716 } 3717 } else { 3718 if (!baij->donotstash) { 3719 if (roworiented) { 3720 ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 3721 } else { 3722 ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr); 3723 } 3724 } 3725 } 3726 } 3727 3728 /* task normally handled by MatSetValuesBlocked() */ 3729 ierr = PetscLogEventEnd(MAT_SetValues,mat,0,0,0);CHKERRQ(ierr); 3730 PetscFunctionReturn(0); 3731 } 3732 3733 #undef __FUNCT__ 3734 #define __FUNCT__ "MatCreateMPIBAIJWithArrays" 3735 /*@ 3736 MatCreateMPIBAIJWithArrays - creates a MPI BAIJ matrix using arrays that contain in standard 3737 CSR format the local rows. 3738 3739 Collective on MPI_Comm 3740 3741 Input Parameters: 3742 + comm - MPI communicator 3743 . bs - the block size, only a block size of 1 is supported 3744 . m - number of local rows (Cannot be PETSC_DECIDE) 3745 . n - This value should be the same as the local size used in creating the 3746 x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have 3747 calculated if N is given) For square matrices n is almost always m. 3748 . M - number of global rows (or PETSC_DETERMINE to have calculated if m is given) 3749 . N - number of global columns (or PETSC_DETERMINE to have calculated if n is given) 3750 . i - row indices 3751 . j - column indices 3752 - a - matrix values 3753 3754 Output Parameter: 3755 . mat - the matrix 3756 3757 Level: intermediate 3758 3759 Notes: 3760 The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc; 3761 thus you CANNOT change the matrix entries by changing the values of a[] after you have 3762 called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays. 3763 3764 The i and j indices are 0 based, and i indices are indices corresponding to the local j array. 3765 3766 .keywords: matrix, aij, compressed row, sparse, parallel 3767 3768 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(), 3769 MPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays() 3770 @*/ 3771 PetscErrorCode MatCreateMPIBAIJWithArrays(MPI_Comm comm,PetscInt bs,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat) 3772 { 3773 PetscErrorCode ierr; 3774 3775 PetscFunctionBegin; 3776 if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0"); 3777 if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative"); 3778 ierr = MatCreate(comm,mat);CHKERRQ(ierr); 3779 ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr); 3780 ierr = MatSetType(*mat,MATMPISBAIJ);CHKERRQ(ierr); 3781 ierr = MatMPIBAIJSetPreallocationCSR(*mat,bs,i,j,a);CHKERRQ(ierr); 3782 PetscFunctionReturn(0); 3783 } 3784