1 /* 2 Routines to compute overlapping regions of a parallel MPI matrix 3 and to find submatrices that were shared across processors. 4 */ 5 #include "src/mat/impls/baij/mpi/mpibaij.h" 6 #include "petscbt.h" 7 8 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat,int,IS *); 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,int,char **,int*,int**); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,int,int **,int**,int*); 11 EXTERN PetscErrorCode MatGetRow_MPIBAIJ(Mat,int,int*,int**,PetscScalar**); 12 EXTERN PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,int,int*,int**,PetscScalar**); 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ" 16 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,int imax,IS is[],int ov) 17 { 18 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 19 PetscErrorCode ierr; 20 int i,N=C->N, bs=c->bs; 21 IS *is_new; 22 23 PetscFunctionBegin; 24 ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr); 25 /* Convert the indices into block format */ 26 ierr = ISCompressIndicesGeneral(N,bs,imax,is,is_new);CHKERRQ(ierr); 27 if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 28 for (i=0; i<ov; ++i) { 29 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 30 } 31 for (i=0; i<imax; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);} 32 ierr = ISExpandIndicesGeneral(N,bs,imax,is_new,is);CHKERRQ(ierr); 33 for (i=0; i<imax; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);} 34 ierr = PetscFree(is_new);CHKERRQ(ierr); 35 PetscFunctionReturn(0); 36 } 37 38 /* 39 Sample message format: 40 If a processor A wants processor B to process some elements corresponding 41 to index sets is[1], is[5] 42 mesg [0] = 2 (no of index sets in the mesg) 43 ----------- 44 mesg [1] = 1 => is[1] 45 mesg [2] = sizeof(is[1]); 46 ----------- 47 mesg [5] = 5 => is[5] 48 mesg [6] = sizeof(is[5]); 49 ----------- 50 mesg [7] 51 mesg [n] data(is[1]) 52 ----------- 53 mesg[n+1] 54 mesg[m] data(is[5]) 55 ----------- 56 57 Notes: 58 nrqs - no of requests sent (or to be sent out) 59 nrqr - no of requests recieved (which have to be or which have been processed 60 */ 61 #undef __FUNCT__ 62 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Once" 63 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,int imax,IS is[]) 64 { 65 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 66 int **idx,*n,*w1,*w2,*w3,*w4,*rtable,**data,len,*idx_i; 67 PetscErrorCode ierr; 68 int size,rank,Mbs,i,j,k,**rbuf,row,proc,nrqs,msz,**outdat,**ptr; 69 int *ctr,*pa,*tmp,nrqr,*isz,*isz1,**xdata,**rbuf2; 70 int *onodes1,*olengths1,tag1,tag2,*onodes2,*olengths2; 71 PetscBT *table; 72 MPI_Comm comm; 73 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 74 MPI_Status *s_status,*recv_status; 75 76 PetscFunctionBegin; 77 comm = C->comm; 78 size = c->size; 79 rank = c->rank; 80 Mbs = c->Mbs; 81 82 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 83 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 84 85 len = (imax+1)*sizeof(int*)+ (imax + Mbs)*sizeof(int); 86 ierr = PetscMalloc(len,&idx);CHKERRQ(ierr); 87 n = (int*)(idx + imax); 88 rtable = n + imax; 89 90 for (i=0; i<imax; i++) { 91 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 92 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 93 } 94 95 /* Create hash table for the mapping :row -> proc*/ 96 for (i=0,j=0; i<size; i++) { 97 len = c->rowners[i+1]; 98 for (; j<len; j++) { 99 rtable[j] = i; 100 } 101 } 102 103 /* evaluate communication - mesg to who,length of mesg, and buffer space 104 required. Based on this, buffers are allocated, and data copied into them*/ 105 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr);/* mesg size */ 106 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 107 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 108 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 109 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 110 for (i=0; i<imax; i++) { 111 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 112 idx_i = idx[i]; 113 len = n[i]; 114 for (j=0; j<len; j++) { 115 row = idx_i[j]; 116 if (row < 0) { 117 SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 118 } 119 proc = rtable[row]; 120 w4[proc]++; 121 } 122 for (j=0; j<size; j++){ 123 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 124 } 125 } 126 127 nrqs = 0; /* no of outgoing messages */ 128 msz = 0; /* total mesg length (for all proc */ 129 w1[rank] = 0; /* no mesg sent to itself */ 130 w3[rank] = 0; 131 for (i=0; i<size; i++) { 132 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 133 } 134 /* pa - is list of processors to communicate with */ 135 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); 136 for (i=0,j=0; i<size; i++) { 137 if (w1[i]) {pa[j] = i; j++;} 138 } 139 140 /* Each message would have a header = 1 + 2*(no of IS) + data */ 141 for (i=0; i<nrqs; i++) { 142 j = pa[i]; 143 w1[j] += w2[j] + 2*w3[j]; 144 msz += w1[j]; 145 } 146 147 /* Determine the number of messages to expect, their lengths, from from-ids */ 148 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 149 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 150 151 /* Now post the Irecvs corresponding to these messages */ 152 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 153 154 /* Allocate Memory for outgoing messages */ 155 len = 2*size*sizeof(int*) + (size+msz)*sizeof(int); 156 ierr = PetscMalloc(len,&outdat);CHKERRQ(ierr); 157 ptr = outdat + size; /* Pointers to the data in outgoing buffers */ 158 ierr = PetscMemzero(outdat,2*size*sizeof(int*));CHKERRQ(ierr); 159 tmp = (int*)(outdat + 2*size); 160 ctr = tmp + msz; 161 162 { 163 int *iptr = tmp,ict = 0; 164 for (i=0; i<nrqs; i++) { 165 j = pa[i]; 166 iptr += ict; 167 outdat[j] = iptr; 168 ict = w1[j]; 169 } 170 } 171 172 /* Form the outgoing messages */ 173 /*plug in the headers*/ 174 for (i=0; i<nrqs; i++) { 175 j = pa[i]; 176 outdat[j][0] = 0; 177 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 178 ptr[j] = outdat[j] + 2*w3[j] + 1; 179 } 180 181 /* Memory for doing local proc's work*/ 182 { 183 int *d_p; 184 char *t_p; 185 186 len = (imax)*(sizeof(PetscBT) + sizeof(int*)+ sizeof(int)) + 187 (Mbs)*imax*sizeof(int) + (Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char) + 1; 188 ierr = PetscMalloc(len,&table);CHKERRQ(ierr); 189 ierr = PetscMemzero(table,len);CHKERRQ(ierr); 190 data = (int **)(table + imax); 191 isz = (int *)(data + imax); 192 d_p = (int *)(isz + imax); 193 t_p = (char *)(d_p + Mbs*imax); 194 for (i=0; i<imax; i++) { 195 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 196 data[i] = d_p + (Mbs)*i; 197 } 198 } 199 200 /* Parse the IS and update local tables and the outgoing buf with the data*/ 201 { 202 int n_i,*data_i,isz_i,*outdat_j,ctr_j; 203 PetscBT table_i; 204 205 for (i=0; i<imax; i++) { 206 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 207 n_i = n[i]; 208 table_i = table[i]; 209 idx_i = idx[i]; 210 data_i = data[i]; 211 isz_i = isz[i]; 212 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 213 row = idx_i[j]; 214 proc = rtable[row]; 215 if (proc != rank) { /* copy to the outgoing buffer */ 216 ctr[proc]++; 217 *ptr[proc] = row; 218 ptr[proc]++; 219 } 220 else { /* Update the local table */ 221 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 222 } 223 } 224 /* Update the headers for the current IS */ 225 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 226 if ((ctr_j = ctr[j])) { 227 outdat_j = outdat[j]; 228 k = ++outdat_j[0]; 229 outdat_j[2*k] = ctr_j; 230 outdat_j[2*k-1] = i; 231 } 232 } 233 isz[i] = isz_i; 234 } 235 } 236 237 /* Now post the sends */ 238 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 239 for (i=0; i<nrqs; ++i) { 240 j = pa[i]; 241 ierr = MPI_Isend(outdat[j],w1[j],MPI_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 242 } 243 244 /* No longer need the original indices*/ 245 for (i=0; i<imax; ++i) { 246 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 247 } 248 ierr = PetscFree(idx);CHKERRQ(ierr); 249 250 for (i=0; i<imax; ++i) { 251 ierr = ISDestroy(is[i]);CHKERRQ(ierr); 252 } 253 254 /* Do Local work*/ 255 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 256 257 /* Receive messages*/ 258 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 259 ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr); 260 261 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 262 ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr); 263 264 /* Phase 1 sends are complete - deallocate buffers */ 265 ierr = PetscFree(outdat);CHKERRQ(ierr); 266 ierr = PetscFree(w1);CHKERRQ(ierr); 267 268 ierr = PetscMalloc((nrqr+1)*sizeof(int*),&xdata);CHKERRQ(ierr); 269 ierr = PetscMalloc((nrqr+1)*sizeof(int),&isz1);CHKERRQ(ierr); 270 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 271 ierr = PetscFree(rbuf);CHKERRQ(ierr); 272 273 /* Send the data back*/ 274 /* Do a global reduction to know the buffer space req for incoming messages*/ 275 { 276 int *rw1; 277 278 ierr = PetscMalloc(size*sizeof(int),&rw1);CHKERRQ(ierr); 279 ierr = PetscMemzero(rw1,size*sizeof(int));CHKERRQ(ierr); 280 281 for (i=0; i<nrqr; ++i) { 282 proc = recv_status[i].MPI_SOURCE; 283 if (proc != onodes1[i]) SETERRQ(PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 284 rw1[proc] = isz1[i]; 285 } 286 287 ierr = PetscFree(onodes1);CHKERRQ(ierr); 288 ierr = PetscFree(olengths1);CHKERRQ(ierr); 289 290 /* Determine the number of messages to expect, their lengths, from from-ids */ 291 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 292 PetscFree(rw1); 293 } 294 /* Now post the Irecvs corresponding to these messages */ 295 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 296 297 /* Now post the sends */ 298 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 299 for (i=0; i<nrqr; ++i) { 300 j = recv_status[i].MPI_SOURCE; 301 ierr = MPI_Isend(xdata[i],isz1[i],MPI_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 302 } 303 304 /* receive work done on other processors*/ 305 { 306 int idex,is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 307 PetscBT table_i; 308 MPI_Status *status2; 309 310 ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 311 312 for (i=0; i<nrqs; ++i) { 313 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 314 /* Process the message*/ 315 rbuf2_i = rbuf2[idex]; 316 ct1 = 2*rbuf2_i[0]+1; 317 jmax = rbuf2[idex][0]; 318 for (j=1; j<=jmax; j++) { 319 max = rbuf2_i[2*j]; 320 is_no = rbuf2_i[2*j-1]; 321 isz_i = isz[is_no]; 322 data_i = data[is_no]; 323 table_i = table[is_no]; 324 for (k=0; k<max; k++,ct1++) { 325 row = rbuf2_i[ct1]; 326 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 327 } 328 isz[is_no] = isz_i; 329 } 330 } 331 ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr); 332 ierr = PetscFree(status2);CHKERRQ(ierr); 333 } 334 335 for (i=0; i<imax; ++i) { 336 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);CHKERRQ(ierr); 337 } 338 339 340 ierr = PetscFree(onodes2);CHKERRQ(ierr); 341 ierr = PetscFree(olengths2);CHKERRQ(ierr); 342 343 ierr = PetscFree(pa);CHKERRQ(ierr); 344 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 345 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 346 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 347 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 348 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 349 ierr = PetscFree(table);CHKERRQ(ierr); 350 ierr = PetscFree(s_status);CHKERRQ(ierr); 351 ierr = PetscFree(recv_status);CHKERRQ(ierr); 352 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 353 ierr = PetscFree(xdata);CHKERRQ(ierr); 354 ierr = PetscFree(isz1);CHKERRQ(ierr); 355 PetscFunctionReturn(0); 356 } 357 358 #undef __FUNCT__ 359 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Local" 360 /* 361 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 362 the work on the local processor. 363 364 Inputs: 365 C - MAT_MPIBAIJ; 366 imax - total no of index sets processed at a time; 367 table - an array of char - size = Mbs bits. 368 369 Output: 370 isz - array containing the count of the solution elements corresponding 371 to each index set; 372 data - pointer to the solutions 373 */ 374 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,int imax,PetscBT *table,int *isz,int **data) 375 { 376 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 377 Mat A = c->A,B = c->B; 378 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 379 int start,end,val,max,rstart,cstart,*ai,*aj; 380 int *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 381 PetscBT table_i; 382 383 PetscFunctionBegin; 384 rstart = c->rstart; 385 cstart = c->cstart; 386 ai = a->i; 387 aj = a->j; 388 bi = b->i; 389 bj = b->j; 390 garray = c->garray; 391 392 393 for (i=0; i<imax; i++) { 394 data_i = data[i]; 395 table_i = table[i]; 396 isz_i = isz[i]; 397 for (j=0,max=isz[i]; j<max; j++) { 398 row = data_i[j] - rstart; 399 start = ai[row]; 400 end = ai[row+1]; 401 for (k=start; k<end; k++) { /* Amat */ 402 val = aj[k] + cstart; 403 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 404 } 405 start = bi[row]; 406 end = bi[row+1]; 407 for (k=start; k<end; k++) { /* Bmat */ 408 val = garray[bj[k]]; 409 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 410 } 411 } 412 isz[i] = isz_i; 413 } 414 PetscFunctionReturn(0); 415 } 416 #undef __FUNCT__ 417 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Receive" 418 /* 419 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 420 and return the output 421 422 Input: 423 C - the matrix 424 nrqr - no of messages being processed. 425 rbuf - an array of pointers to the recieved requests 426 427 Output: 428 xdata - array of messages to be sent back 429 isz1 - size of each message 430 431 For better efficiency perhaps we should malloc seperately each xdata[i], 432 then if a remalloc is required we need only copy the data for that one row 433 rather than all previous rows as it is now where a single large chunck of 434 memory is used. 435 436 */ 437 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,int nrqr,int **rbuf,int **xdata,int * isz1) 438 { 439 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 440 Mat A = c->A,B = c->B; 441 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 442 PetscErrorCode ierr; 443 int rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 444 int row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 445 int val,max1,max2,rank,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 446 int *rbuf_i,kmax,rbuf_0; 447 PetscBT xtable; 448 449 PetscFunctionBegin; 450 rank = c->rank; 451 Mbs = c->Mbs; 452 rstart = c->rstart; 453 cstart = c->cstart; 454 ai = a->i; 455 aj = a->j; 456 bi = b->i; 457 bj = b->j; 458 garray = c->garray; 459 460 461 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 462 rbuf_i = rbuf[i]; 463 rbuf_0 = rbuf_i[0]; 464 ct += rbuf_0; 465 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 466 } 467 468 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 469 else max1 = 1; 470 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 471 ierr = PetscMalloc(mem_estimate*sizeof(int),&xdata[0]);CHKERRQ(ierr); 472 ++no_malloc; 473 ierr = PetscBTCreate(Mbs,xtable);CHKERRQ(ierr); 474 ierr = PetscMemzero(isz1,nrqr*sizeof(int));CHKERRQ(ierr); 475 476 ct3 = 0; 477 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 478 rbuf_i = rbuf[i]; 479 rbuf_0 = rbuf_i[0]; 480 ct1 = 2*rbuf_0+1; 481 ct2 = ct1; 482 ct3 += ct1; 483 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 484 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 485 oct2 = ct2; 486 kmax = rbuf_i[2*j]; 487 for (k=0; k<kmax; k++,ct1++) { 488 row = rbuf_i[ct1]; 489 if (!PetscBTLookupSet(xtable,row)) { 490 if (!(ct3 < mem_estimate)) { 491 new_estimate = (int)(1.5*mem_estimate)+1; 492 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 493 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 494 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 495 xdata[0] = tmp; 496 mem_estimate = new_estimate; ++no_malloc; 497 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 498 } 499 xdata[i][ct2++] = row; 500 ct3++; 501 } 502 } 503 for (k=oct2,max2=ct2; k<max2; k++) { 504 row = xdata[i][k] - rstart; 505 start = ai[row]; 506 end = ai[row+1]; 507 for (l=start; l<end; l++) { 508 val = aj[l] + cstart; 509 if (!PetscBTLookupSet(xtable,val)) { 510 if (!(ct3 < mem_estimate)) { 511 new_estimate = (int)(1.5*mem_estimate)+1; 512 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 513 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 514 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 515 xdata[0] = tmp; 516 mem_estimate = new_estimate; ++no_malloc; 517 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 518 } 519 xdata[i][ct2++] = val; 520 ct3++; 521 } 522 } 523 start = bi[row]; 524 end = bi[row+1]; 525 for (l=start; l<end; l++) { 526 val = garray[bj[l]]; 527 if (!PetscBTLookupSet(xtable,val)) { 528 if (!(ct3 < mem_estimate)) { 529 new_estimate = (int)(1.5*mem_estimate)+1; 530 ierr = PetscMalloc(new_estimate * sizeof(int),&tmp);CHKERRQ(ierr); 531 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr); 532 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 533 xdata[0] = tmp; 534 mem_estimate = new_estimate; ++no_malloc; 535 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 536 } 537 xdata[i][ct2++] = val; 538 ct3++; 539 } 540 } 541 } 542 /* Update the header*/ 543 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 544 xdata[i][2*j-1] = rbuf_i[2*j-1]; 545 } 546 xdata[i][0] = rbuf_0; 547 xdata[i+1] = xdata[i] + ct2; 548 isz1[i] = ct2; /* size of each message */ 549 } 550 ierr = PetscBTDestroy(xtable);CHKERRQ(ierr); 551 PetscLogInfo(0,"MatIncreaseOverlap_MPIBAIJ:[%d] Allocated %d bytes, required %d, no of mallocs = %d\n",rank,mem_estimate,ct3,no_malloc); 552 PetscFunctionReturn(0); 553 } 554 555 static PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat,int,const IS[],const IS[],MatReuse,Mat *); 556 557 #undef __FUNCT__ 558 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ" 559 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 560 { 561 IS *isrow_new,*iscol_new; 562 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 563 PetscErrorCode ierr; 564 int nmax,nstages_local,nstages,i,pos,max_no,N=C->N,bs=c->bs; 565 566 PetscFunctionBegin; 567 /* The compression and expansion should be avoided. Does'nt point 568 out errors might change the indices hence buggey */ 569 570 ierr = PetscMalloc(2*(ismax+1)*sizeof(IS),&isrow_new);CHKERRQ(ierr); 571 iscol_new = isrow_new + ismax; 572 ierr = ISCompressIndicesSorted(N,bs,ismax,isrow,isrow_new);CHKERRQ(ierr); 573 ierr = ISCompressIndicesSorted(N,bs,ismax,iscol,iscol_new);CHKERRQ(ierr); 574 575 /* Allocate memory to hold all the submatrices */ 576 if (scall != MAT_REUSE_MATRIX) { 577 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 578 } 579 /* Determine the number of stages through which submatrices are done */ 580 nmax = 20*1000000 / (c->Nbs * sizeof(int)); 581 if (!nmax) nmax = 1; 582 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 583 584 /* Make sure every processor loops through the nstages */ 585 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr); 586 for (i=0,pos=0; i<nstages; i++) { 587 if (pos+nmax <= ismax) max_no = nmax; 588 else if (pos == ismax) max_no = 0; 589 else max_no = ismax-pos; 590 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos);CHKERRQ(ierr); 591 pos += max_no; 592 } 593 594 for (i=0; i<ismax; i++) { 595 ierr = ISDestroy(isrow_new[i]);CHKERRQ(ierr); 596 ierr = ISDestroy(iscol_new[i]);CHKERRQ(ierr); 597 } 598 ierr = PetscFree(isrow_new);CHKERRQ(ierr); 599 PetscFunctionReturn(0); 600 } 601 602 #if defined (PETSC_USE_CTABLE) 603 #undef __FUNCT__ 604 #define __FUNCT__ "PetscGetProc" 605 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank) 606 { 607 PetscInt nGlobalNd = proc_gnode[size]; 608 PetscMPIInt fproc = (int) ((float)row * (float)size / (float)nGlobalNd + 0.5); 609 610 PetscFunctionBegin; 611 if (fproc > size) fproc = size; 612 while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) { 613 if (row < proc_gnode[fproc]) fproc--; 614 else fproc++; 615 } 616 *rank = fproc; 617 PetscFunctionReturn(0); 618 } 619 #endif 620 621 /* -------------------------------------------------------------------------*/ 622 #undef __FUNCT__ 623 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ_local" 624 static PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,int ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 625 { 626 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 627 Mat A = c->A; 628 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat; 629 int **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,start,end,size; 630 PetscErrorCode ierr; 631 int **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row,proc; 632 int nrqs,msz,**ptr,idex,*req_size,*ctr,*pa,*tmp,tcol,nrqr; 633 int **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2; 634 int **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i; 635 int len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 636 int bs=c->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB; 637 int cstart = c->cstart,nzA,nzB,*a_i=a->i,*b_i=b->i,imark; 638 int *bmap = c->garray,ctmp,rstart=c->rstart,tag0,tag1,tag2,tag3; 639 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 640 MPI_Request *r_waits4,*s_waits3,*s_waits4; 641 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 642 MPI_Status *r_status3,*r_status4,*s_status4; 643 MPI_Comm comm; 644 MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i,*vworkA,*vworkB; 645 MatScalar *a_a=a->a,*b_a=b->a; 646 PetscTruth flag; 647 int *onodes1,*olengths1; 648 649 #if defined (PETSC_USE_CTABLE) 650 int tt; 651 PetscTable *rowmaps,*colmaps,lrow1_grow1,lcol1_gcol1; 652 #else 653 int **cmap,*cmap_i,*rtable,*rmap_i,**rmap, Mbs = c->Mbs; 654 #endif 655 656 PetscFunctionBegin; 657 comm = C->comm; 658 tag0 = C->tag; 659 size = c->size; 660 rank = c->rank; 661 662 /* Get some new tags to keep the communication clean */ 663 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 664 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 665 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 666 667 /* Check if the col indices are sorted */ 668 for (i=0; i<ismax; i++) { 669 ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr); 670 if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"IS is not sorted"); 671 } 672 673 len = (2*ismax+1)*(sizeof(int*)+ sizeof(int)); 674 #if !defined (PETSC_USE_CTABLE) 675 len += (Mbs+1)*sizeof(int); 676 #endif 677 ierr = PetscMalloc(len,&irow);CHKERRQ(ierr); 678 icol = irow + ismax; 679 nrow = (int*)(icol + ismax); 680 ncol = nrow + ismax; 681 #if !defined (PETSC_USE_CTABLE) 682 rtable = ncol + ismax; 683 /* Create hash table for the mapping :row -> proc*/ 684 for (i=0,j=0; i<size; i++) { 685 jmax = c->rowners[i+1]; 686 for (; j<jmax; j++) { 687 rtable[j] = i; 688 } 689 } 690 #endif 691 692 for (i=0; i<ismax; i++) { 693 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 694 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 695 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 696 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 697 } 698 699 /* evaluate communication - mesg to who,length of mesg,and buffer space 700 required. Based on this, buffers are allocated, and data copied into them*/ 701 ierr = PetscMalloc(size*4*sizeof(int),&w1);CHKERRQ(ierr); /* mesg size */ 702 w2 = w1 + size; /* if w2[i] marked, then a message to proc i*/ 703 w3 = w2 + size; /* no of IS that needs to be sent to proc i */ 704 w4 = w3 + size; /* temp work space used in determining w1, w2, w3 */ 705 ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 706 for (i=0; i<ismax; i++) { 707 ierr = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/ 708 jmax = nrow[i]; 709 irow_i = irow[i]; 710 for (j=0; j<jmax; j++) { 711 row = irow_i[j]; 712 #if defined (PETSC_USE_CTABLE) 713 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 714 #else 715 proc = rtable[row]; 716 #endif 717 w4[proc]++; 718 } 719 for (j=0; j<size; j++) { 720 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 721 } 722 } 723 724 nrqs = 0; /* no of outgoing messages */ 725 msz = 0; /* total mesg length for all proc */ 726 w1[rank] = 0; /* no mesg sent to intself */ 727 w3[rank] = 0; 728 for (i=0; i<size; i++) { 729 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 730 } 731 ierr = PetscMalloc((nrqs+1)*sizeof(int),&pa);CHKERRQ(ierr); /*(proc -array)*/ 732 for (i=0,j=0; i<size; i++) { 733 if (w1[i]) { pa[j] = i; j++; } 734 } 735 736 /* Each message would have a header = 1 + 2*(no of IS) + data */ 737 for (i=0; i<nrqs; i++) { 738 j = pa[i]; 739 w1[j] += w2[j] + 2* w3[j]; 740 msz += w1[j]; 741 } 742 743 /* Determine the number of messages to expect, their lengths, from from-ids */ 744 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 745 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 746 747 /* Now post the Irecvs corresponding to these messages */ 748 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 749 750 ierr = PetscFree(onodes1);CHKERRQ(ierr); 751 ierr = PetscFree(olengths1);CHKERRQ(ierr); 752 753 /* Allocate Memory for outgoing messages */ 754 len = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int); 755 ierr = PetscMalloc(len,&sbuf1);CHKERRQ(ierr); 756 ptr = sbuf1 + size; /* Pointers to the data in outgoing buffers */ 757 ierr = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr); 758 /* allocate memory for outgoing data + buf to receive the first reply */ 759 tmp = (int*)(ptr + size); 760 ctr = tmp + 2*msz; 761 762 { 763 int *iptr = tmp,ict = 0; 764 for (i=0; i<nrqs; i++) { 765 j = pa[i]; 766 iptr += ict; 767 sbuf1[j] = iptr; 768 ict = w1[j]; 769 } 770 } 771 772 /* Form the outgoing messages */ 773 /* Initialise the header space */ 774 for (i=0; i<nrqs; i++) { 775 j = pa[i]; 776 sbuf1[j][0] = 0; 777 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr); 778 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 779 } 780 781 /* Parse the isrow and copy data into outbuf */ 782 for (i=0; i<ismax; i++) { 783 ierr = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr); 784 irow_i = irow[i]; 785 jmax = nrow[i]; 786 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 787 row = irow_i[j]; 788 #if defined (PETSC_USE_CTABLE) 789 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 790 #else 791 proc = rtable[row]; 792 #endif 793 if (proc != rank) { /* copy to the outgoing buf*/ 794 ctr[proc]++; 795 *ptr[proc] = row; 796 ptr[proc]++; 797 } 798 } 799 /* Update the headers for the current IS */ 800 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 801 if ((ctr_j = ctr[j])) { 802 sbuf1_j = sbuf1[j]; 803 k = ++sbuf1_j[0]; 804 sbuf1_j[2*k] = ctr_j; 805 sbuf1_j[2*k-1] = i; 806 } 807 } 808 } 809 810 /* Now post the sends */ 811 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 812 for (i=0; i<nrqs; ++i) { 813 j = pa[i]; 814 ierr = MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 815 } 816 817 /* Post Recieves to capture the buffer size */ 818 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 819 ierr = PetscMalloc((nrqs+1)*sizeof(int*),&rbuf2);CHKERRQ(ierr); 820 rbuf2[0] = tmp + msz; 821 for (i=1; i<nrqs; ++i) { 822 j = pa[i]; 823 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 824 } 825 for (i=0; i<nrqs; ++i) { 826 j = pa[i]; 827 ierr = MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 828 } 829 830 /* Send to other procs the buf size they should allocate */ 831 832 /* Receive messages*/ 833 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 834 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 835 len = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*); 836 ierr = PetscMalloc(len,&sbuf2);CHKERRQ(ierr); 837 req_size = (int*)(sbuf2 + nrqr); 838 req_source = req_size + nrqr; 839 840 { 841 Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data; 842 int *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i; 843 844 for (i=0; i<nrqr; ++i) { 845 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 846 req_size[idex] = 0; 847 rbuf1_i = rbuf1[idex]; 848 start = 2*rbuf1_i[0] + 1; 849 ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);CHKERRQ(ierr); 850 ierr = PetscMalloc(end*sizeof(int),&sbuf2[idex]);CHKERRQ(ierr); 851 sbuf2_i = sbuf2[idex]; 852 for (j=start; j<end; j++) { 853 id = rbuf1_i[j] - rstart; 854 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 855 sbuf2_i[j] = ncols; 856 req_size[idex] += ncols; 857 } 858 req_source[idex] = r_status1[i].MPI_SOURCE; 859 /* form the header */ 860 sbuf2_i[0] = req_size[idex]; 861 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 862 ierr = MPI_Isend(sbuf2_i,end,MPI_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 863 } 864 } 865 ierr = PetscFree(r_status1);CHKERRQ(ierr); 866 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 867 868 /* recv buffer sizes */ 869 /* Receive messages*/ 870 871 ierr = PetscMalloc((nrqs+1)*sizeof(int*),&rbuf3);CHKERRQ(ierr); 872 ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr); 873 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 874 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 875 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 876 877 for (i=0; i<nrqs; ++i) { 878 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 879 ierr = PetscMalloc(rbuf2[idex][0]*sizeof(int),&rbuf3[idex]);CHKERRQ(ierr); 880 ierr = PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);CHKERRQ(ierr); 881 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPI_INT, 882 r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 883 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR, 884 r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 885 } 886 ierr = PetscFree(r_status2);CHKERRQ(ierr); 887 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 888 889 /* Wait on sends1 and sends2 */ 890 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 891 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 892 893 ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr); 894 ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr); 895 ierr = PetscFree(s_status1);CHKERRQ(ierr); 896 ierr = PetscFree(s_status2);CHKERRQ(ierr); 897 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 898 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 899 900 /* Now allocate buffers for a->j, and send them off */ 901 ierr = PetscMalloc((nrqr+1)*sizeof(int*),&sbuf_aj);CHKERRQ(ierr); 902 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 903 ierr = PetscMalloc((j+1)*sizeof(int),&sbuf_aj[0]);CHKERRQ(ierr); 904 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 905 906 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 907 { 908 for (i=0; i<nrqr; i++) { 909 rbuf1_i = rbuf1[i]; 910 sbuf_aj_i = sbuf_aj[i]; 911 ct1 = 2*rbuf1_i[0] + 1; 912 ct2 = 0; 913 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 914 kmax = rbuf1[i][2*j]; 915 for (k=0; k<kmax; k++,ct1++) { 916 row = rbuf1_i[ct1] - rstart; 917 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 918 ncols = nzA + nzB; 919 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 920 921 /* load the column indices for this row into cols*/ 922 cols = sbuf_aj_i + ct2; 923 for (l=0; l<nzB; l++) { 924 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 925 else break; 926 } 927 imark = l; 928 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 929 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 930 ct2 += ncols; 931 } 932 } 933 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 934 } 935 } 936 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 937 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 938 939 /* Allocate buffers for a->a, and send them off */ 940 ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);CHKERRQ(ierr); 941 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 942 ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr); 943 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 944 945 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 946 { 947 for (i=0; i<nrqr; i++) { 948 rbuf1_i = rbuf1[i]; 949 sbuf_aa_i = sbuf_aa[i]; 950 ct1 = 2*rbuf1_i[0]+1; 951 ct2 = 0; 952 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 953 kmax = rbuf1_i[2*j]; 954 for (k=0; k<kmax; k++,ct1++) { 955 row = rbuf1_i[ct1] - rstart; 956 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 957 ncols = nzA + nzB; 958 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 959 vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2; 960 961 /* load the column values for this row into vals*/ 962 vals = sbuf_aa_i+ct2*bs2; 963 for (l=0; l<nzB; l++) { 964 if ((bmap[cworkB[l]]) < cstart) { 965 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 966 } 967 else break; 968 } 969 imark = l; 970 for (l=0; l<nzA; l++) { 971 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 972 } 973 for (l=imark; l<nzB; l++) { 974 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 975 } 976 ct2 += ncols; 977 } 978 } 979 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 980 } 981 } 982 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 983 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 984 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 985 986 /* Form the matrix */ 987 /* create col map */ 988 { 989 int *icol_i; 990 #if defined (PETSC_USE_CTABLE) 991 /* Create row map*/ 992 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&colmaps);CHKERRQ(ierr); 993 for (i=0; i<ismax; i++) { 994 ierr = PetscTableCreate(ncol[i]+1,&colmaps[i]);CHKERRQ(ierr); 995 } 996 #else 997 len = (1+ismax)*sizeof(int*)+ ismax*c->Nbs*sizeof(int); 998 ierr = PetscMalloc(len,&cmap);CHKERRQ(ierr); 999 cmap[0] = (int*)(cmap + ismax); 1000 ierr = PetscMemzero(cmap[0],(1+ismax*c->Nbs)*sizeof(int));CHKERRQ(ierr); 1001 for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; } 1002 #endif 1003 for (i=0; i<ismax; i++) { 1004 jmax = ncol[i]; 1005 icol_i = icol[i]; 1006 #if defined (PETSC_USE_CTABLE) 1007 lcol1_gcol1 = colmaps[i]; 1008 for (j=0; j<jmax; j++) { 1009 ierr = PetscTableAdd(lcol1_gcol1,icol_i[j]+1,j+1);CHKERRQ(ierr); 1010 } 1011 #else 1012 cmap_i = cmap[i]; 1013 for (j=0; j<jmax; j++) { 1014 cmap_i[icol_i[j]] = j+1; 1015 } 1016 #endif 1017 } 1018 } 1019 1020 /* Create lens which is required for MatCreate... */ 1021 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 1022 len = (1+ismax)*sizeof(int*)+ j*sizeof(int); 1023 ierr = PetscMalloc(len,&lens);CHKERRQ(ierr); 1024 lens[0] = (int*)(lens + ismax); 1025 ierr = PetscMemzero(lens[0],j*sizeof(int));CHKERRQ(ierr); 1026 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 1027 1028 /* Update lens from local data */ 1029 for (i=0; i<ismax; i++) { 1030 jmax = nrow[i]; 1031 #if defined (PETSC_USE_CTABLE) 1032 lcol1_gcol1 = colmaps[i]; 1033 #else 1034 cmap_i = cmap[i]; 1035 #endif 1036 irow_i = irow[i]; 1037 lens_i = lens[i]; 1038 for (j=0; j<jmax; j++) { 1039 row = irow_i[j]; 1040 #if defined (PETSC_USE_CTABLE) 1041 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 1042 #else 1043 proc = rtable[row]; 1044 #endif 1045 if (proc == rank) { 1046 /* Get indices from matA and then from matB */ 1047 row = row - rstart; 1048 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1049 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1050 #if defined (PETSC_USE_CTABLE) 1051 for (k=0; k<nzA; k++) { 1052 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1053 if (tt) { lens_i[j]++; } 1054 } 1055 for (k=0; k<nzB; k++) { 1056 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1057 if (tt) { lens_i[j]++; } 1058 } 1059 #else 1060 for (k=0; k<nzA; k++) { 1061 if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; } 1062 } 1063 for (k=0; k<nzB; k++) { 1064 if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; } 1065 } 1066 #endif 1067 } 1068 } 1069 } 1070 #if defined (PETSC_USE_CTABLE) 1071 /* Create row map*/ 1072 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rowmaps);CHKERRQ(ierr); 1073 for (i=0; i<ismax; i++){ 1074 ierr = PetscTableCreate(nrow[i]+1,&rowmaps[i]);CHKERRQ(ierr); 1075 } 1076 #else 1077 /* Create row map*/ 1078 len = (1+ismax)*sizeof(int*)+ ismax*Mbs*sizeof(int); 1079 ierr = PetscMalloc(len,&rmap);CHKERRQ(ierr); 1080 rmap[0] = (int*)(rmap + ismax); 1081 ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(int));CHKERRQ(ierr); 1082 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;} 1083 #endif 1084 for (i=0; i<ismax; i++) { 1085 irow_i = irow[i]; 1086 jmax = nrow[i]; 1087 #if defined (PETSC_USE_CTABLE) 1088 lrow1_grow1 = rowmaps[i]; 1089 for (j=0; j<jmax; j++) { 1090 ierr = PetscTableAdd(lrow1_grow1,irow_i[j]+1,j+1);CHKERRQ(ierr); 1091 } 1092 #else 1093 rmap_i = rmap[i]; 1094 for (j=0; j<jmax; j++) { 1095 rmap_i[irow_i[j]] = j; 1096 } 1097 #endif 1098 } 1099 1100 /* Update lens from offproc data */ 1101 { 1102 int *rbuf2_i,*rbuf3_i,*sbuf1_i; 1103 1104 for (tmp2=0; tmp2<nrqs; tmp2++) { 1105 ierr = MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);CHKERRQ(ierr); 1106 idex = pa[i]; 1107 sbuf1_i = sbuf1[idex]; 1108 jmax = sbuf1_i[0]; 1109 ct1 = 2*jmax+1; 1110 ct2 = 0; 1111 rbuf2_i = rbuf2[i]; 1112 rbuf3_i = rbuf3[i]; 1113 for (j=1; j<=jmax; j++) { 1114 is_no = sbuf1_i[2*j-1]; 1115 max1 = sbuf1_i[2*j]; 1116 lens_i = lens[is_no]; 1117 #if defined (PETSC_USE_CTABLE) 1118 lcol1_gcol1 = colmaps[is_no]; 1119 lrow1_grow1 = rowmaps[is_no]; 1120 #else 1121 cmap_i = cmap[is_no]; 1122 rmap_i = rmap[is_no]; 1123 #endif 1124 for (k=0; k<max1; k++,ct1++) { 1125 #if defined (PETSC_USE_CTABLE) 1126 ierr = PetscTableFind(lrow1_grow1,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1127 row--; 1128 if(row < 0) { SETERRQ(PETSC_ERR_PLIB,"row not found in table"); } 1129 #else 1130 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1131 #endif 1132 max2 = rbuf2_i[ct1]; 1133 for (l=0; l<max2; l++,ct2++) { 1134 #if defined (PETSC_USE_CTABLE) 1135 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr); 1136 if (tt) { 1137 lens_i[row]++; 1138 } 1139 #else 1140 if (cmap_i[rbuf3_i[ct2]]) { 1141 lens_i[row]++; 1142 } 1143 #endif 1144 } 1145 } 1146 } 1147 } 1148 } 1149 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1150 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1151 ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr); 1152 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1153 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1154 1155 /* Create the submatrices */ 1156 if (scall == MAT_REUSE_MATRIX) { 1157 /* 1158 Assumes new rows are same length as the old rows, hence bug! 1159 */ 1160 for (i=0; i<ismax; i++) { 1161 mat = (Mat_SeqBAIJ *)(submats[i]->data); 1162 if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || mat->bs != bs)) { 1163 SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1164 } 1165 ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(int),&flag);CHKERRQ(ierr); 1166 if (flag == PETSC_FALSE) { 1167 SETERRQ(PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros"); 1168 } 1169 /* Initial matrix as if empty */ 1170 ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(int));CHKERRQ(ierr); 1171 submats[i]->factor = C->factor; 1172 } 1173 } else { 1174 for (i=0; i<ismax; i++) { 1175 ierr = MatCreate(PETSC_COMM_SELF,nrow[i]*bs,ncol[i]*bs,nrow[i]*bs,ncol[i]*bs,submats+i);CHKERRQ(ierr); 1176 ierr = MatSetType(submats[i],A->type_name);CHKERRQ(ierr); 1177 ierr = MatSeqBAIJSetPreallocation(submats[i],a->bs,0,lens[i]);CHKERRQ(ierr); 1178 ierr = MatSeqSBAIJSetPreallocation(submats[i],a->bs,0,lens[i]);CHKERRQ(ierr); 1179 } 1180 } 1181 1182 /* Assemble the matrices */ 1183 /* First assemble the local rows */ 1184 { 1185 int ilen_row,*imat_ilen,*imat_j,*imat_i; 1186 MatScalar *imat_a; 1187 1188 for (i=0; i<ismax; i++) { 1189 mat = (Mat_SeqBAIJ*)submats[i]->data; 1190 imat_ilen = mat->ilen; 1191 imat_j = mat->j; 1192 imat_i = mat->i; 1193 imat_a = mat->a; 1194 1195 #if defined (PETSC_USE_CTABLE) 1196 lcol1_gcol1 = colmaps[i]; 1197 lrow1_grow1 = rowmaps[i]; 1198 #else 1199 cmap_i = cmap[i]; 1200 rmap_i = rmap[i]; 1201 #endif 1202 irow_i = irow[i]; 1203 jmax = nrow[i]; 1204 for (j=0; j<jmax; j++) { 1205 row = irow_i[j]; 1206 #if defined (PETSC_USE_CTABLE) 1207 ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr); 1208 #else 1209 proc = rtable[row]; 1210 #endif 1211 if (proc == rank) { 1212 row = row - rstart; 1213 nzA = a_i[row+1] - a_i[row]; 1214 nzB = b_i[row+1] - b_i[row]; 1215 cworkA = a_j + a_i[row]; 1216 cworkB = b_j + b_i[row]; 1217 vworkA = a_a + a_i[row]*bs2; 1218 vworkB = b_a + b_i[row]*bs2; 1219 #if defined (PETSC_USE_CTABLE) 1220 ierr = PetscTableFind(lrow1_grow1,row+rstart+1,&row);CHKERRQ(ierr); 1221 row--; 1222 if (row < 0) { SETERRQ(PETSC_ERR_PLIB,"row not found in table"); } 1223 #else 1224 row = rmap_i[row + rstart]; 1225 #endif 1226 mat_i = imat_i[row]; 1227 mat_a = imat_a + mat_i*bs2; 1228 mat_j = imat_j + mat_i; 1229 ilen_row = imat_ilen[row]; 1230 1231 /* load the column indices for this row into cols*/ 1232 for (l=0; l<nzB; l++) { 1233 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1234 #if defined (PETSC_USE_CTABLE) 1235 ierr = PetscTableFind(lcol1_gcol1,ctmp+1,&tcol);CHKERRQ(ierr); 1236 if (tcol) { 1237 #else 1238 if ((tcol = cmap_i[ctmp])) { 1239 #endif 1240 *mat_j++ = tcol - 1; 1241 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1242 mat_a += bs2; 1243 ilen_row++; 1244 } 1245 } else break; 1246 } 1247 imark = l; 1248 for (l=0; l<nzA; l++) { 1249 #if defined (PETSC_USE_CTABLE) 1250 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1251 if (tcol) { 1252 #else 1253 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1254 #endif 1255 *mat_j++ = tcol - 1; 1256 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1257 mat_a += bs2; 1258 ilen_row++; 1259 } 1260 } 1261 for (l=imark; l<nzB; l++) { 1262 #if defined (PETSC_USE_CTABLE) 1263 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1264 if (tcol) { 1265 #else 1266 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1267 #endif 1268 *mat_j++ = tcol - 1; 1269 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1270 mat_a += bs2; 1271 ilen_row++; 1272 } 1273 } 1274 imat_ilen[row] = ilen_row; 1275 } 1276 } 1277 1278 } 1279 } 1280 1281 /* Now assemble the off proc rows*/ 1282 { 1283 int *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1284 int *imat_j,*imat_i; 1285 MatScalar *imat_a,*rbuf4_i; 1286 1287 for (tmp2=0; tmp2<nrqs; tmp2++) { 1288 ierr = MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);CHKERRQ(ierr); 1289 idex = pa[i]; 1290 sbuf1_i = sbuf1[idex]; 1291 jmax = sbuf1_i[0]; 1292 ct1 = 2*jmax + 1; 1293 ct2 = 0; 1294 rbuf2_i = rbuf2[i]; 1295 rbuf3_i = rbuf3[i]; 1296 rbuf4_i = rbuf4[i]; 1297 for (j=1; j<=jmax; j++) { 1298 is_no = sbuf1_i[2*j-1]; 1299 #if defined (PETSC_USE_CTABLE) 1300 lrow1_grow1 = rowmaps[is_no]; 1301 lcol1_gcol1 = colmaps[is_no]; 1302 #else 1303 rmap_i = rmap[is_no]; 1304 cmap_i = cmap[is_no]; 1305 #endif 1306 mat = (Mat_SeqBAIJ*)submats[is_no]->data; 1307 imat_ilen = mat->ilen; 1308 imat_j = mat->j; 1309 imat_i = mat->i; 1310 imat_a = mat->a; 1311 max1 = sbuf1_i[2*j]; 1312 for (k=0; k<max1; k++,ct1++) { 1313 row = sbuf1_i[ct1]; 1314 #if defined (PETSC_USE_CTABLE) 1315 ierr = PetscTableFind(lrow1_grow1,row+1,&row);CHKERRQ(ierr); 1316 row--; 1317 if(row < 0) { SETERRQ(PETSC_ERR_PLIB,"row not found in table"); } 1318 #else 1319 row = rmap_i[row]; 1320 #endif 1321 ilen = imat_ilen[row]; 1322 mat_i = imat_i[row]; 1323 mat_a = imat_a + mat_i*bs2; 1324 mat_j = imat_j + mat_i; 1325 max2 = rbuf2_i[ct1]; 1326 for (l=0; l<max2; l++,ct2++) { 1327 #if defined (PETSC_USE_CTABLE) 1328 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1329 if (tcol) { 1330 #else 1331 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1332 #endif 1333 *mat_j++ = tcol - 1; 1334 /* *mat_a++= rbuf4_i[ct2]; */ 1335 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1336 mat_a += bs2; 1337 ilen++; 1338 } 1339 } 1340 imat_ilen[row] = ilen; 1341 } 1342 } 1343 } 1344 } 1345 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1346 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1347 ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr); 1348 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1349 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1350 1351 /* Restore the indices */ 1352 for (i=0; i<ismax; i++) { 1353 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1354 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1355 } 1356 1357 /* Destroy allocated memory */ 1358 ierr = PetscFree(irow);CHKERRQ(ierr); 1359 ierr = PetscFree(w1);CHKERRQ(ierr); 1360 ierr = PetscFree(pa);CHKERRQ(ierr); 1361 1362 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1363 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1364 for (i=0; i<nrqr; ++i) { 1365 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1366 } 1367 for (i=0; i<nrqs; ++i) { 1368 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1369 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1370 } 1371 1372 ierr = PetscFree(sbuf2);CHKERRQ(ierr); 1373 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1374 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1375 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1376 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1377 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1378 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1379 1380 #if defined (PETSC_USE_CTABLE) 1381 for (i=0; i<ismax; i++){ 1382 ierr = PetscTableDelete(rowmaps[i]);CHKERRQ(ierr); 1383 ierr = PetscTableDelete(colmaps[i]);CHKERRQ(ierr); 1384 } 1385 ierr = PetscFree(colmaps);CHKERRQ(ierr); 1386 ierr = PetscFree(rowmaps);CHKERRQ(ierr); 1387 #else 1388 ierr = PetscFree(rmap);CHKERRQ(ierr); 1389 ierr = PetscFree(cmap);CHKERRQ(ierr); 1390 #endif 1391 ierr = PetscFree(lens);CHKERRQ(ierr); 1392 1393 for (i=0; i<ismax; i++) { 1394 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1395 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1396 } 1397 1398 PetscFunctionReturn(0); 1399 } 1400 1401