1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix 4 and to find submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/baij/mpi/mpibaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*); 11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 13 14 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 15 { 16 PetscErrorCode ierr; 17 PetscInt i,N=C->cmap->N, bs=C->rmap->bs; 18 IS *is_new; 19 20 PetscFunctionBegin; 21 ierr = PetscMalloc1(imax,&is_new);CHKERRQ(ierr); 22 /* Convert the indices into block format */ 23 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);CHKERRQ(ierr); 24 if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n"); 25 for (i=0; i<ov; ++i) { 26 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 27 } 28 for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);} 29 ierr = ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);CHKERRQ(ierr); 30 for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);} 31 ierr = PetscFree(is_new);CHKERRQ(ierr); 32 PetscFunctionReturn(0); 33 } 34 35 /* 36 Sample message format: 37 If a processor A wants processor B to process some elements corresponding 38 to index sets is[1], is[5] 39 mesg [0] = 2 (no of index sets in the mesg) 40 ----------- 41 mesg [1] = 1 => is[1] 42 mesg [2] = sizeof(is[1]); 43 ----------- 44 mesg [5] = 5 => is[5] 45 mesg [6] = sizeof(is[5]); 46 ----------- 47 mesg [7] 48 mesg [n] data(is[1]) 49 ----------- 50 mesg[n+1] 51 mesg[m] data(is[5]) 52 ----------- 53 54 Notes: 55 nrqs - no of requests sent (or to be sent out) 56 nrqr - no of requests recieved (which have to be or which have been processed 57 */ 58 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[]) 59 { 60 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 61 const PetscInt **idx,*idx_i; 62 PetscInt *n,*w3,*w4,**data,len; 63 PetscErrorCode ierr; 64 PetscMPIInt size,rank,tag1,tag2,*w2,*w1,nrqr; 65 PetscInt Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr; 66 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p; 67 PetscMPIInt *onodes1,*olengths1,*onodes2,*olengths2; 68 PetscBT *table; 69 MPI_Comm comm; 70 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 71 MPI_Status *s_status,*recv_status; 72 char *t_p; 73 74 PetscFunctionBegin; 75 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 76 size = c->size; 77 rank = c->rank; 78 Mbs = c->Mbs; 79 80 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 81 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 82 83 ierr = PetscMalloc2(imax+1,&idx,imax,&n);CHKERRQ(ierr); 84 85 for (i=0; i<imax; i++) { 86 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 87 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 88 } 89 90 /* evaluate communication - mesg to who,length of mesg, and buffer space 91 required. Based on this, buffers are allocated, and data copied into them*/ 92 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); 93 for (i=0; i<imax; i++) { 94 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 95 idx_i = idx[i]; 96 len = n[i]; 97 for (j=0; j<len; j++) { 98 row = idx_i[j]; 99 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 100 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 101 w4[proc]++; 102 } 103 for (j=0; j<size; j++) { 104 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 105 } 106 } 107 108 nrqs = 0; /* no of outgoing messages */ 109 msz = 0; /* total mesg length (for all proc */ 110 w1[rank] = 0; /* no mesg sent to itself */ 111 w3[rank] = 0; 112 for (i=0; i<size; i++) { 113 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 114 } 115 /* pa - is list of processors to communicate with */ 116 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); 117 for (i=0,j=0; i<size; i++) { 118 if (w1[i]) {pa[j] = i; j++;} 119 } 120 121 /* Each message would have a header = 1 + 2*(no of IS) + data */ 122 for (i=0; i<nrqs; i++) { 123 j = pa[i]; 124 w1[j] += w2[j] + 2*w3[j]; 125 msz += w1[j]; 126 } 127 128 /* Determine the number of messages to expect, their lengths, from from-ids */ 129 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 130 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 131 132 /* Now post the Irecvs corresponding to these messages */ 133 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 134 135 /* Allocate Memory for outgoing messages */ 136 ierr = PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);CHKERRQ(ierr); 137 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 138 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 139 { 140 PetscInt *iptr = tmp,ict = 0; 141 for (i=0; i<nrqs; i++) { 142 j = pa[i]; 143 iptr += ict; 144 outdat[j] = iptr; 145 ict = w1[j]; 146 } 147 } 148 149 /* Form the outgoing messages */ 150 /*plug in the headers*/ 151 for (i=0; i<nrqs; i++) { 152 j = pa[i]; 153 outdat[j][0] = 0; 154 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 155 ptr[j] = outdat[j] + 2*w3[j] + 1; 156 } 157 158 /* Memory for doing local proc's work*/ 159 { 160 ierr = PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mbs*imax,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,&t_p);CHKERRQ(ierr); 161 162 for (i=0; i<imax; i++) { 163 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 164 data[i] = d_p + (Mbs)*i; 165 } 166 } 167 168 /* Parse the IS and update local tables and the outgoing buf with the data*/ 169 { 170 PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j; 171 PetscBT table_i; 172 173 for (i=0; i<imax; i++) { 174 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 175 n_i = n[i]; 176 table_i = table[i]; 177 idx_i = idx[i]; 178 data_i = data[i]; 179 isz_i = isz[i]; 180 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 181 row = idx_i[j]; 182 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 183 if (proc != rank) { /* copy to the outgoing buffer */ 184 ctr[proc]++; 185 *ptr[proc] = row; 186 ptr[proc]++; 187 } else { /* Update the local table */ 188 if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row; 189 } 190 } 191 /* Update the headers for the current IS */ 192 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 193 if ((ctr_j = ctr[j])) { 194 outdat_j = outdat[j]; 195 k = ++outdat_j[0]; 196 outdat_j[2*k] = ctr_j; 197 outdat_j[2*k-1] = i; 198 } 199 } 200 isz[i] = isz_i; 201 } 202 } 203 204 /* Now post the sends */ 205 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 206 for (i=0; i<nrqs; ++i) { 207 j = pa[i]; 208 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 209 } 210 211 /* No longer need the original indices*/ 212 for (i=0; i<imax; ++i) { 213 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 214 } 215 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 216 217 for (i=0; i<imax; ++i) { 218 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 219 } 220 221 /* Do Local work*/ 222 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 223 224 /* Receive messages*/ 225 ierr = PetscMalloc1(nrqr+1,&recv_status);CHKERRQ(ierr); 226 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 227 228 ierr = PetscMalloc1(nrqs+1,&s_status);CHKERRQ(ierr); 229 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 230 231 /* Phase 1 sends are complete - deallocate buffers */ 232 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 233 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 234 235 ierr = PetscMalloc1(nrqr+1,&xdata);CHKERRQ(ierr); 236 ierr = PetscMalloc1(nrqr+1,&isz1);CHKERRQ(ierr); 237 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 238 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 239 ierr = PetscFree(rbuf);CHKERRQ(ierr); 240 241 /* Send the data back*/ 242 /* Do a global reduction to know the buffer space req for incoming messages*/ 243 { 244 PetscMPIInt *rw1; 245 246 ierr = PetscCalloc1(size,&rw1);CHKERRQ(ierr); 247 248 for (i=0; i<nrqr; ++i) { 249 proc = recv_status[i].MPI_SOURCE; 250 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 251 rw1[proc] = isz1[i]; 252 } 253 254 ierr = PetscFree(onodes1);CHKERRQ(ierr); 255 ierr = PetscFree(olengths1);CHKERRQ(ierr); 256 257 /* Determine the number of messages to expect, their lengths, from from-ids */ 258 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 259 ierr = PetscFree(rw1);CHKERRQ(ierr); 260 } 261 /* Now post the Irecvs corresponding to these messages */ 262 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 263 264 /* Now post the sends */ 265 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 266 for (i=0; i<nrqr; ++i) { 267 j = recv_status[i].MPI_SOURCE; 268 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 269 } 270 271 /* receive work done on other processors*/ 272 { 273 PetscMPIInt idex; 274 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 275 PetscBT table_i; 276 MPI_Status *status2; 277 278 ierr = PetscMalloc1(PetscMax(nrqr,nrqs)+1,&status2);CHKERRQ(ierr); 279 for (i=0; i<nrqs; ++i) { 280 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 281 /* Process the message*/ 282 rbuf2_i = rbuf2[idex]; 283 ct1 = 2*rbuf2_i[0]+1; 284 jmax = rbuf2[idex][0]; 285 for (j=1; j<=jmax; j++) { 286 max = rbuf2_i[2*j]; 287 is_no = rbuf2_i[2*j-1]; 288 isz_i = isz[is_no]; 289 data_i = data[is_no]; 290 table_i = table[is_no]; 291 for (k=0; k<max; k++,ct1++) { 292 row = rbuf2_i[ct1]; 293 if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row; 294 } 295 isz[is_no] = isz_i; 296 } 297 } 298 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 299 ierr = PetscFree(status2);CHKERRQ(ierr); 300 } 301 302 for (i=0; i<imax; ++i) { 303 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 304 } 305 306 307 ierr = PetscFree(onodes2);CHKERRQ(ierr); 308 ierr = PetscFree(olengths2);CHKERRQ(ierr); 309 310 ierr = PetscFree(pa);CHKERRQ(ierr); 311 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 312 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 313 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 314 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 315 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 316 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 317 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 318 ierr = PetscFree(s_status);CHKERRQ(ierr); 319 ierr = PetscFree(recv_status);CHKERRQ(ierr); 320 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 321 ierr = PetscFree(xdata);CHKERRQ(ierr); 322 ierr = PetscFree(isz1);CHKERRQ(ierr); 323 PetscFunctionReturn(0); 324 } 325 326 /* 327 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 328 the work on the local processor. 329 330 Inputs: 331 C - MAT_MPIBAIJ; 332 imax - total no of index sets processed at a time; 333 table - an array of char - size = Mbs bits. 334 335 Output: 336 isz - array containing the count of the solution elements corresponding 337 to each index set; 338 data - pointer to the solutions 339 */ 340 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data) 341 { 342 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 343 Mat A = c->A,B = c->B; 344 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 345 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 346 PetscInt *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 347 PetscBT table_i; 348 349 PetscFunctionBegin; 350 rstart = c->rstartbs; 351 cstart = c->cstartbs; 352 ai = a->i; 353 aj = a->j; 354 bi = b->i; 355 bj = b->j; 356 garray = c->garray; 357 358 359 for (i=0; i<imax; i++) { 360 data_i = data[i]; 361 table_i = table[i]; 362 isz_i = isz[i]; 363 for (j=0,max=isz[i]; j<max; j++) { 364 row = data_i[j] - rstart; 365 start = ai[row]; 366 end = ai[row+1]; 367 for (k=start; k<end; k++) { /* Amat */ 368 val = aj[k] + cstart; 369 if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val; 370 } 371 start = bi[row]; 372 end = bi[row+1]; 373 for (k=start; k<end; k++) { /* Bmat */ 374 val = garray[bj[k]]; 375 if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val; 376 } 377 } 378 isz[i] = isz_i; 379 } 380 PetscFunctionReturn(0); 381 } 382 /* 383 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 384 and return the output 385 386 Input: 387 C - the matrix 388 nrqr - no of messages being processed. 389 rbuf - an array of pointers to the recieved requests 390 391 Output: 392 xdata - array of messages to be sent back 393 isz1 - size of each message 394 395 For better efficiency perhaps we should malloc separately each xdata[i], 396 then if a remalloc is required we need only copy the data for that one row 397 rather than all previous rows as it is now where a single large chunck of 398 memory is used. 399 400 */ 401 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 402 { 403 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 404 Mat A = c->A,B = c->B; 405 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 406 PetscErrorCode ierr; 407 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 408 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 409 PetscInt val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 410 PetscInt *rbuf_i,kmax,rbuf_0; 411 PetscBT xtable; 412 413 PetscFunctionBegin; 414 Mbs = c->Mbs; 415 rstart = c->rstartbs; 416 cstart = c->cstartbs; 417 ai = a->i; 418 aj = a->j; 419 bi = b->i; 420 bj = b->j; 421 garray = c->garray; 422 423 424 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 425 rbuf_i = rbuf[i]; 426 rbuf_0 = rbuf_i[0]; 427 ct += rbuf_0; 428 for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j]; 429 } 430 431 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 432 else max1 = 1; 433 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 434 ierr = PetscMalloc1(mem_estimate,&xdata[0]);CHKERRQ(ierr); 435 ++no_malloc; 436 ierr = PetscBTCreate(Mbs,&xtable);CHKERRQ(ierr); 437 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 438 439 ct3 = 0; 440 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 441 rbuf_i = rbuf[i]; 442 rbuf_0 = rbuf_i[0]; 443 ct1 = 2*rbuf_0+1; 444 ct2 = ct1; 445 ct3 += ct1; 446 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 447 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 448 oct2 = ct2; 449 kmax = rbuf_i[2*j]; 450 for (k=0; k<kmax; k++,ct1++) { 451 row = rbuf_i[ct1]; 452 if (!PetscBTLookupSet(xtable,row)) { 453 if (!(ct3 < mem_estimate)) { 454 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 455 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 456 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 457 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 458 xdata[0] = tmp; 459 mem_estimate = new_estimate; ++no_malloc; 460 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 461 } 462 xdata[i][ct2++] = row; 463 ct3++; 464 } 465 } 466 for (k=oct2,max2=ct2; k<max2; k++) { 467 row = xdata[i][k] - rstart; 468 start = ai[row]; 469 end = ai[row+1]; 470 for (l=start; l<end; l++) { 471 val = aj[l] + cstart; 472 if (!PetscBTLookupSet(xtable,val)) { 473 if (!(ct3 < mem_estimate)) { 474 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 475 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 476 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 477 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 478 xdata[0] = tmp; 479 mem_estimate = new_estimate; ++no_malloc; 480 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 481 } 482 xdata[i][ct2++] = val; 483 ct3++; 484 } 485 } 486 start = bi[row]; 487 end = bi[row+1]; 488 for (l=start; l<end; l++) { 489 val = garray[bj[l]]; 490 if (!PetscBTLookupSet(xtable,val)) { 491 if (!(ct3 < mem_estimate)) { 492 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 493 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 494 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 495 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 496 xdata[0] = tmp; 497 mem_estimate = new_estimate; ++no_malloc; 498 for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 499 } 500 xdata[i][ct2++] = val; 501 ct3++; 502 } 503 } 504 } 505 /* Update the header*/ 506 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 507 xdata[i][2*j-1] = rbuf_i[2*j-1]; 508 } 509 xdata[i][0] = rbuf_0; 510 xdata[i+1] = xdata[i] + ct2; 511 isz1[i] = ct2; /* size of each message */ 512 } 513 ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr); 514 ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 515 PetscFunctionReturn(0); 516 } 517 518 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 519 { 520 IS *isrow_block,*iscol_block; 521 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 522 PetscErrorCode ierr; 523 PetscInt nmax,nstages_local,nstages,i,pos,max_no,N=C->cmap->N,bs=C->rmap->bs; 524 Mat_SeqBAIJ *subc; 525 Mat_SubMat *smat; 526 527 PetscFunctionBegin; 528 /* The compression and expansion should be avoided. Doesn't point 529 out errors, might change the indices, hence buggey */ 530 ierr = PetscMalloc2(ismax+1,&isrow_block,ismax+1,&iscol_block);CHKERRQ(ierr); 531 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_block);CHKERRQ(ierr); 532 ierr = ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_block);CHKERRQ(ierr); 533 534 /* Determine the number of stages through which submatrices are done */ 535 nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt)); 536 if (!nmax) nmax = 1; 537 538 if (scall == MAT_INITIAL_MATRIX) { 539 nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); 540 541 /* Make sure every processor loops through the nstages */ 542 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 543 544 ierr = PetscMalloc1(ismax+1,submat);CHKERRQ(ierr); 545 } else { 546 subc = (Mat_SeqBAIJ*)((*submat)[0]->data); 547 smat = subc->submatis1; 548 if (!smat) { 549 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"MatGetSubMatrices(...,MAT_REUSE_MATRIX,...) requires submat"); 550 } 551 nstages = smat->nstages; 552 } 553 554 for (i=0,pos=0; i<nstages; i++) { 555 if (pos+nmax <= ismax) max_no = nmax; 556 else if (pos == ismax) max_no = 0; 557 else max_no = ismax-pos; 558 559 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_block+pos,iscol_block+pos,scall,*submat+pos);CHKERRQ(ierr); 560 pos += max_no; 561 } 562 563 if (scall == MAT_INITIAL_MATRIX && ismax) { 564 /* save nstages for reuse */ 565 subc = (Mat_SeqBAIJ*)((*submat)[0]->data); 566 smat = subc->submatis1; 567 if (!smat) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_NULL,"smat does not exit"); 568 smat->nstages = nstages; 569 } 570 571 for (i=0; i<ismax; i++) { 572 ierr = ISDestroy(&isrow_block[i]);CHKERRQ(ierr); 573 ierr = ISDestroy(&iscol_block[i]);CHKERRQ(ierr); 574 } 575 ierr = PetscFree2(isrow_block,iscol_block);CHKERRQ(ierr); 576 PetscFunctionReturn(0); 577 } 578 579 #if defined(PETSC_USE_CTABLE) 580 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank) 581 { 582 PetscInt nGlobalNd = proc_gnode[size]; 583 PetscMPIInt fproc; 584 PetscErrorCode ierr; 585 586 PetscFunctionBegin; 587 ierr = PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)),&fproc);CHKERRQ(ierr); 588 if (fproc > size) fproc = size; 589 while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) { 590 if (row < proc_gnode[fproc]) fproc--; 591 else fproc++; 592 } 593 *rank = fproc; 594 PetscFunctionReturn(0); 595 } 596 #endif 597 598 /* -------------------------------------------------------------------------*/ 599 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */ 600 601 PetscErrorCode MatDestroy_MPIBAIJ_MatGetSubmatrices(Mat C) 602 { 603 PetscErrorCode ierr; 604 Mat_SeqBAIJ *c = (Mat_SeqBAIJ*)C->data; 605 Mat_SubMat *submatj = c->submatis1; 606 PetscInt i; 607 608 PetscFunctionBegin; 609 if (!submatj->id) { /* delete data that are linked only to submats[id=0] */ 610 ierr = PetscFree4(submatj->sbuf1,submatj->ptr,submatj->tmp,submatj->ctr);CHKERRQ(ierr); 611 612 for (i=0; i<submatj->nrqr; ++i) { 613 ierr = PetscFree(submatj->sbuf2[i]);CHKERRQ(ierr); 614 } 615 ierr = PetscFree3(submatj->sbuf2,submatj->req_size,submatj->req_source1);CHKERRQ(ierr); 616 617 if (submatj->rbuf1) { 618 ierr = PetscFree(submatj->rbuf1[0]);CHKERRQ(ierr); 619 ierr = PetscFree(submatj->rbuf1);CHKERRQ(ierr); 620 } 621 622 for (i=0; i<submatj->nrqs; ++i) { 623 ierr = PetscFree(submatj->rbuf3[i]);CHKERRQ(ierr); 624 } 625 ierr = PetscFree3(submatj->req_source2,submatj->rbuf2,submatj->rbuf3);CHKERRQ(ierr); 626 ierr = PetscFree(submatj->pa);CHKERRQ(ierr); 627 } 628 629 #if defined(PETSC_USE_CTABLE) 630 ierr = PetscTableDestroy((PetscTable*)&submatj->rmap);CHKERRQ(ierr); 631 if (submatj->cmap_loc) {ierr = PetscFree(submatj->cmap_loc);CHKERRQ(ierr);} 632 ierr = PetscFree(submatj->rmap_loc);CHKERRQ(ierr); 633 #else 634 ierr = PetscFree(submatj->rmap);CHKERRQ(ierr); 635 #endif 636 637 if (!submatj->allcolumns) { 638 #if defined(PETSC_USE_CTABLE) 639 ierr = PetscTableDestroy((PetscTable*)&submatj->cmap);CHKERRQ(ierr); 640 #else 641 ierr = PetscFree(submatj->cmap);CHKERRQ(ierr); 642 #endif 643 } 644 ierr = submatj->destroy(C);CHKERRQ(ierr); 645 ierr = PetscFree(submatj->row2proc);CHKERRQ(ierr); 646 647 ierr = PetscFree(submatj);CHKERRQ(ierr); 648 PetscFunctionReturn(0); 649 } 650 651 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 652 { 653 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 654 Mat A = c->A; 655 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*subc; 656 const PetscInt **icol,**irow; 657 PetscInt *nrow,*ncol,start; 658 PetscErrorCode ierr; 659 PetscMPIInt rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr; 660 PetscInt **sbuf1,**sbuf2,*sbuf2_i,i,j,k,l,ct1,ct2,**rbuf1,row,proc=-1; 661 PetscInt nrqs=0,msz,**ptr=NULL,*req_size=NULL,*ctr=NULL,*pa,*tmp=NULL,tcol; 662 PetscInt **rbuf3=NULL,*req_source1=NULL,*req_source2,**sbuf_aj,**rbuf2=NULL,max1,max2; 663 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 664 #if defined(PETSC_USE_CTABLE) 665 PetscTable *cmap,cmap_i=NULL,*rmap,rmap_i; 666 #else 667 PetscInt **cmap,*cmap_i=NULL,**rmap,*rmap_i; 668 #endif 669 const PetscInt *irow_i,*icol_i; 670 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 671 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 672 MPI_Request *r_waits4,*s_waits3,*s_waits4; 673 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 674 MPI_Status *r_status3,*r_status4,*s_status4; 675 MPI_Comm comm; 676 PetscScalar **rbuf4,*rbuf4_i=NULL,**sbuf_aa,*vals,*mat_a=NULL,*imat_a,*sbuf_aa_i; 677 PetscMPIInt *onodes1,*olengths1,end; 678 PetscInt **row2proc,*row2proc_i,*imat_ilen,*imat_j,*imat_i; 679 Mat_SubMat **smats,*smat_i; 680 PetscBool *issorted,colflag,iscsorted=PETSC_TRUE; 681 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,ilen; 682 PetscInt bs=C->rmap->bs,bs2=c->bs2,rstart = c->rstartbs; 683 PetscBool ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */ 684 PetscInt nzA,nzB,*a_i=a->i,*b_i=b->i,*a_j = a->j,*b_j = b->j,ctmp,imark,*cworkA,*cworkB; 685 PetscScalar *vworkA=NULL,*vworkB=NULL,*a_a = a->a,*b_a = b->a; 686 PetscInt cstart = c->cstartbs,*bmap = c->garray; 687 PetscBool *allrows,*allcolumns; 688 689 PetscFunctionBegin; 690 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 691 size = c->size; 692 rank = c->rank; 693 694 ierr = PetscMalloc6(ismax,&smats,ismax,&row2proc,ismax,&cmap,ismax,&rmap,ismax,&allcolumns,ismax,&allrows);CHKERRQ(ierr); 695 ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);CHKERRQ(ierr); 696 697 for (i=0; i<ismax; i++) { 698 ierr = ISSorted(iscol[i],&issorted[i]);CHKERRQ(ierr); 699 if (!issorted[i]) iscsorted = issorted[i]; /* columns are not sorted! */ 700 ierr = ISSorted(isrow[i],&issorted[i]);CHKERRQ(ierr); 701 702 /* Check for special case: allcolumns */ 703 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 704 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 705 706 if (colflag && ncol[i] == c->Nbs) { 707 allcolumns[i] = PETSC_TRUE; 708 icol[i] = NULL; 709 } else { 710 allcolumns[i] = PETSC_FALSE; 711 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 712 } 713 714 /* Check for special case: allrows */ 715 ierr = ISIdentity(isrow[i],&colflag);CHKERRQ(ierr); 716 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 717 if (colflag && nrow[i] == c->Mbs) { 718 allrows[i] = PETSC_TRUE; 719 irow[i] = NULL; 720 } else { 721 allrows[i] = PETSC_FALSE; 722 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 723 } 724 } 725 726 if (scall == MAT_REUSE_MATRIX) { 727 /* Assumes new rows are same length as the old rows */ 728 for (i=0; i<ismax; i++) { 729 subc = (Mat_SeqBAIJ*)(submats[i]->data); 730 if (subc->mbs != nrow[i] || subc->nbs != ncol[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 731 732 /* Initial matrix as if empty */ 733 ierr = PetscMemzero(subc->ilen,subc->mbs*sizeof(PetscInt));CHKERRQ(ierr); 734 735 /* Initial matrix as if empty */ 736 submats[i]->factortype = C->factortype; 737 738 smat_i = subc->submatis1; 739 smats[i] = smat_i; 740 741 nrqs = smat_i->nrqs; 742 nrqr = smat_i->nrqr; 743 rbuf1 = smat_i->rbuf1; 744 rbuf2 = smat_i->rbuf2; 745 rbuf3 = smat_i->rbuf3; 746 req_source2 = smat_i->req_source2; 747 748 sbuf1 = smat_i->sbuf1; 749 sbuf2 = smat_i->sbuf2; 750 ptr = smat_i->ptr; 751 tmp = smat_i->tmp; 752 ctr = smat_i->ctr; 753 754 pa = smat_i->pa; 755 req_size = smat_i->req_size; 756 req_source1 = smat_i->req_source1; 757 758 allcolumns[i] = smat_i->allcolumns; 759 allrows[i] = smat_i->allrows; 760 row2proc[i] = smat_i->row2proc; 761 rmap[i] = smat_i->rmap; 762 cmap[i] = smat_i->cmap; 763 } 764 } else { /* scall == MAT_INITIAL_MATRIX */ 765 /* Get some new tags to keep the communication clean */ 766 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 767 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 768 769 /* evaluate communication - mesg to who, length of mesg, and buffer space 770 required. Based on this, buffers are allocated, and data copied into them*/ 771 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); /* mesg size, initialize work vectors */ 772 773 for (i=0; i<ismax; i++) { 774 jmax = nrow[i]; 775 irow_i = irow[i]; 776 777 ierr = PetscMalloc1(jmax,&row2proc_i);CHKERRQ(ierr); 778 row2proc[i] = row2proc_i; 779 780 if (issorted[i]) proc = 0; 781 for (j=0; j<jmax; j++) { 782 if (!issorted[i]) proc = 0; 783 if (allrows[i]) row = j; 784 else row = irow_i[j]; 785 786 while (row >= c->rangebs[proc+1]) proc++; 787 w4[proc]++; 788 row2proc_i[j] = proc; /* map row index to proc */ 789 } 790 for (j=0; j<size; j++) { 791 if (w4[j]) { w1[j] += w4[j]; w3[j]++; w4[j] = 0;} 792 } 793 } 794 795 nrqs = 0; /* no of outgoing messages */ 796 msz = 0; /* total mesg length (for all procs) */ 797 w1[rank] = 0; /* no mesg sent to self */ 798 w3[rank] = 0; 799 for (i=0; i<size; i++) { 800 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 801 } 802 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 803 for (i=0,j=0; i<size; i++) { 804 if (w1[i]) { pa[j] = i; j++; } 805 } 806 807 /* Each message would have a header = 1 + 2*(no of IS) + data */ 808 for (i=0; i<nrqs; i++) { 809 j = pa[i]; 810 w1[j] += w2[j] + 2* w3[j]; 811 msz += w1[j]; 812 } 813 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 814 815 /* Determine the number of messages to expect, their lengths, from from-ids */ 816 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 817 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 818 819 /* Now post the Irecvs corresponding to these messages */ 820 tag0 = ((PetscObject)C)->tag; 821 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 822 823 ierr = PetscFree(onodes1);CHKERRQ(ierr); 824 ierr = PetscFree(olengths1);CHKERRQ(ierr); 825 826 /* Allocate Memory for outgoing messages */ 827 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 828 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 829 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 830 831 { 832 PetscInt *iptr = tmp; 833 k = 0; 834 for (i=0; i<nrqs; i++) { 835 j = pa[i]; 836 iptr += k; 837 sbuf1[j] = iptr; 838 k = w1[j]; 839 } 840 } 841 842 /* Form the outgoing messages. Initialize the header space */ 843 for (i=0; i<nrqs; i++) { 844 j = pa[i]; 845 sbuf1[j][0] = 0; 846 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 847 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 848 } 849 850 /* Parse the isrow and copy data into outbuf */ 851 for (i=0; i<ismax; i++) { 852 row2proc_i = row2proc[i]; 853 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 854 irow_i = irow[i]; 855 jmax = nrow[i]; 856 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 857 proc = row2proc_i[j]; 858 if (allrows[i]) row = j; 859 else row = irow_i[j]; 860 861 if (proc != rank) { /* copy to the outgoing buf*/ 862 ctr[proc]++; 863 *ptr[proc] = row; 864 ptr[proc]++; 865 } 866 } 867 /* Update the headers for the current IS */ 868 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 869 if ((ctr_j = ctr[j])) { 870 sbuf1_j = sbuf1[j]; 871 k = ++sbuf1_j[0]; 872 sbuf1_j[2*k] = ctr_j; 873 sbuf1_j[2*k-1] = i; 874 } 875 } 876 } 877 878 /* Now post the sends */ 879 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 880 for (i=0; i<nrqs; ++i) { 881 j = pa[i]; 882 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 883 } 884 885 /* Post Receives to capture the buffer size */ 886 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 887 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 888 rbuf2[0] = tmp + msz; 889 for (i=1; i<nrqs; ++i) { 890 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 891 } 892 for (i=0; i<nrqs; ++i) { 893 j = pa[i]; 894 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);CHKERRQ(ierr); 895 } 896 897 /* Send to other procs the buf size they should allocate */ 898 /* Receive messages*/ 899 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 900 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 901 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 902 903 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 904 for (i=0; i<nrqr; ++i) { 905 req_size[i] = 0; 906 rbuf1_i = rbuf1[i]; 907 start = 2*rbuf1_i[0] + 1; 908 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 909 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 910 sbuf2_i = sbuf2[i]; 911 for (j=start; j<end; j++) { 912 row = rbuf1_i[j] - rstart; 913 ncols = a_i[row+1] - a_i[row] + b_i[row+1] - b_i[row]; 914 sbuf2_i[j] = ncols; 915 req_size[i] += ncols; 916 } 917 req_source1[i] = r_status1[i].MPI_SOURCE; 918 /* form the header */ 919 sbuf2_i[0] = req_size[i]; 920 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 921 922 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 923 } 924 925 ierr = PetscFree(r_status1);CHKERRQ(ierr); 926 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 927 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 928 929 /* Receive messages*/ 930 ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr); 931 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 932 933 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 934 for (i=0; i<nrqs; ++i) { 935 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 936 req_source2[i] = r_status2[i].MPI_SOURCE; 937 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 938 } 939 ierr = PetscFree(r_status2);CHKERRQ(ierr); 940 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 941 942 /* Wait on sends1 and sends2 */ 943 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 944 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 945 946 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 947 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 948 ierr = PetscFree(s_status1);CHKERRQ(ierr); 949 ierr = PetscFree(s_status2);CHKERRQ(ierr); 950 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 951 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 952 953 /* Now allocate sending buffers for a->j, and send them off */ 954 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 955 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 956 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 957 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 958 959 ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr); 960 { 961 962 for (i=0; i<nrqr; i++) { 963 rbuf1_i = rbuf1[i]; 964 sbuf_aj_i = sbuf_aj[i]; 965 ct1 = 2*rbuf1_i[0] + 1; 966 ct2 = 0; 967 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 968 kmax = rbuf1[i][2*j]; 969 for (k=0; k<kmax; k++,ct1++) { 970 row = rbuf1_i[ct1] - rstart; 971 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 972 ncols = nzA + nzB; 973 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 974 975 /* load the column indices for this row into cols */ 976 cols = sbuf_aj_i + ct2; 977 for (l=0; l<nzB; l++) { 978 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 979 else break; 980 } 981 imark = l; 982 for (l=0; l<nzA; l++) {cols[imark+l] = cstart + cworkA[l];} 983 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 984 ct2 += ncols; 985 } 986 } 987 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 988 } 989 } 990 ierr = PetscMalloc2(nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr); 991 992 /* create col map: global col of C -> local col of submatrices */ 993 #if defined(PETSC_USE_CTABLE) 994 for (i=0; i<ismax; i++) { 995 if (!allcolumns[i]) { 996 ierr = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr); 997 998 jmax = ncol[i]; 999 icol_i = icol[i]; 1000 cmap_i = cmap[i]; 1001 for (j=0; j<jmax; j++) { 1002 ierr = PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1003 } 1004 } else cmap[i] = NULL; 1005 } 1006 #else 1007 for (i=0; i<ismax; i++) { 1008 if (!allcolumns[i]) { 1009 ierr = PetscCalloc1(c->Nbs,&cmap[i]);CHKERRQ(ierr); 1010 jmax = ncol[i]; 1011 icol_i = icol[i]; 1012 cmap_i = cmap[i]; 1013 for (j=0; j<jmax; j++) cmap_i[icol_i[j]] = j+1; 1014 } else cmap[i] = NULL; 1015 } 1016 #endif 1017 1018 /* Create lens which is required for MatCreate... */ 1019 for (i=0,j=0; i<ismax; i++) j += nrow[i]; 1020 ierr = PetscMalloc1(ismax,&lens);CHKERRQ(ierr); 1021 1022 if (ismax) { 1023 ierr = PetscCalloc1(j,&lens[0]);CHKERRQ(ierr); 1024 } 1025 for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1]; 1026 1027 /* Update lens from local data */ 1028 for (i=0; i<ismax; i++) { 1029 row2proc_i = row2proc[i]; 1030 jmax = nrow[i]; 1031 if (!allcolumns[i]) cmap_i = cmap[i]; 1032 irow_i = irow[i]; 1033 lens_i = lens[i]; 1034 for (j=0; j<jmax; j++) { 1035 if (allrows[i]) row = j; 1036 else row = irow_i[j]; /* global blocked row of C */ 1037 1038 proc = row2proc_i[j]; 1039 if (proc == rank) { 1040 /* Get indices from matA and then from matB */ 1041 #if defined(PETSC_USE_CTABLE) 1042 PetscInt tt; 1043 #endif 1044 row = row - rstart; 1045 nzA = a_i[row+1] - a_i[row]; 1046 nzB = b_i[row+1] - b_i[row]; 1047 cworkA = a_j + a_i[row]; 1048 cworkB = b_j + b_i[row]; 1049 1050 if (!allcolumns[i]) { 1051 #if defined(PETSC_USE_CTABLE) 1052 for (k=0; k<nzA; k++) { 1053 ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1054 if (tt) lens_i[j]++; 1055 } 1056 for (k=0; k<nzB; k++) { 1057 ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1058 if (tt) lens_i[j]++; 1059 } 1060 1061 #else 1062 for (k=0; k<nzA; k++) { 1063 if (cmap_i[cstart + cworkA[k]]) lens_i[j]++; 1064 } 1065 for (k=0; k<nzB; k++) { 1066 if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++; 1067 } 1068 #endif 1069 } else { /* allcolumns */ 1070 lens_i[j] = nzA + nzB; 1071 } 1072 } 1073 } 1074 } 1075 1076 /* Create row map: global row of C -> local row of submatrices */ 1077 for (i=0; i<ismax; i++) { 1078 if (!allrows[i]) { 1079 #if defined(PETSC_USE_CTABLE) 1080 ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr); 1081 irow_i = irow[i]; 1082 jmax = nrow[i]; 1083 for (j=0; j<jmax; j++) { 1084 if (allrows[i]) { 1085 ierr = PetscTableAdd(rmap[i],j+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1086 } else { 1087 ierr = PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1088 } 1089 } 1090 #else 1091 ierr = PetscCalloc1(c->Mbs,&rmap[i]);CHKERRQ(ierr); 1092 rmap_i = rmap[i]; 1093 irow_i = irow[i]; 1094 jmax = nrow[i]; 1095 for (j=0; j<jmax; j++) { 1096 if (allrows[i]) rmap_i[j] = j; 1097 else rmap_i[irow_i[j]] = j; 1098 } 1099 #endif 1100 } else rmap[i] = NULL; 1101 } 1102 1103 /* Update lens from offproc data */ 1104 { 1105 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 1106 1107 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 1108 for (tmp2=0; tmp2<nrqs; tmp2++) { 1109 sbuf1_i = sbuf1[pa[tmp2]]; 1110 jmax = sbuf1_i[0]; 1111 ct1 = 2*jmax+1; 1112 ct2 = 0; 1113 rbuf2_i = rbuf2[tmp2]; 1114 rbuf3_i = rbuf3[tmp2]; 1115 for (j=1; j<=jmax; j++) { 1116 is_no = sbuf1_i[2*j-1]; 1117 max1 = sbuf1_i[2*j]; 1118 lens_i = lens[is_no]; 1119 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1120 rmap_i = rmap[is_no]; 1121 for (k=0; k<max1; k++,ct1++) { 1122 if (allrows[is_no]) { 1123 row = sbuf1_i[ct1]; 1124 } else { 1125 #if defined(PETSC_USE_CTABLE) 1126 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1127 row--; 1128 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1129 #else 1130 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1131 #endif 1132 } 1133 max2 = rbuf2_i[ct1]; 1134 for (l=0; l<max2; l++,ct2++) { 1135 if (!allcolumns[is_no]) { 1136 #if defined(PETSC_USE_CTABLE) 1137 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1138 #else 1139 tcol = cmap_i[rbuf3_i[ct2]]; 1140 #endif 1141 if (tcol) lens_i[row]++; 1142 } else { /* allcolumns */ 1143 lens_i[row]++; /* lens_i[row] += max2 ? */ 1144 } 1145 } 1146 } 1147 } 1148 } 1149 } 1150 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1151 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 1152 ierr = PetscFree2(r_status3,s_status3);CHKERRQ(ierr); 1153 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1154 1155 /* Create the submatrices */ 1156 for (i=0; i<ismax; i++) { 1157 PetscInt bs_tmp; 1158 if (ijonly) bs_tmp = 1; 1159 else bs_tmp = bs; 1160 1161 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 1162 ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1163 1164 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 1165 ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); 1166 ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */ 1167 1168 /* create struct Mat_SubMat and attached it to submat */ 1169 ierr = PetscNew(&smat_i);CHKERRQ(ierr); 1170 subc = (Mat_SeqBAIJ*)submats[i]->data; 1171 subc->submatis1 = smat_i; 1172 smats[i] = smat_i; 1173 1174 smat_i->destroy = submats[i]->ops->destroy; 1175 submats[i]->ops->destroy = MatDestroy_MPIBAIJ_MatGetSubmatrices; 1176 submats[i]->factortype = C->factortype; 1177 1178 smat_i->id = i; 1179 smat_i->nrqs = nrqs; 1180 smat_i->nrqr = nrqr; 1181 smat_i->rbuf1 = rbuf1; 1182 smat_i->rbuf2 = rbuf2; 1183 smat_i->rbuf3 = rbuf3; 1184 smat_i->sbuf2 = sbuf2; 1185 smat_i->req_source2 = req_source2; 1186 1187 smat_i->sbuf1 = sbuf1; 1188 smat_i->ptr = ptr; 1189 smat_i->tmp = tmp; 1190 smat_i->ctr = ctr; 1191 1192 smat_i->pa = pa; 1193 smat_i->req_size = req_size; 1194 smat_i->req_source1 = req_source1; 1195 1196 smat_i->allcolumns = allcolumns[i]; 1197 smat_i->allrows = allrows[i]; 1198 smat_i->singleis = PETSC_FALSE; 1199 smat_i->row2proc = row2proc[i]; 1200 smat_i->rmap = rmap[i]; 1201 smat_i->cmap = cmap[i]; 1202 } 1203 1204 if (ismax) {ierr = PetscFree(lens[0]);CHKERRQ(ierr);} 1205 ierr = PetscFree(lens);CHKERRQ(ierr); 1206 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1207 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1208 1209 } /* endof scall == MAT_INITIAL_MATRIX */ 1210 1211 /* Post recv matrix values */ 1212 if (!ijonly) { 1213 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 1214 ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr); 1215 ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr); 1216 ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr); 1217 ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr); 1218 for (i=0; i<nrqs; ++i) { 1219 ierr = PetscMalloc1(rbuf2[i][0]*bs2,&rbuf4[i]);CHKERRQ(ierr); 1220 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0]*bs2,MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 1221 } 1222 1223 /* Allocate sending buffers for a->a, and send them off */ 1224 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 1225 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1226 1227 ierr = PetscMalloc1((j+1)*bs2,&sbuf_aa[0]);CHKERRQ(ierr); 1228 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 1229 1230 ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr); 1231 1232 for (i=0; i<nrqr; i++) { 1233 rbuf1_i = rbuf1[i]; 1234 sbuf_aa_i = sbuf_aa[i]; 1235 ct1 = 2*rbuf1_i[0]+1; 1236 ct2 = 0; 1237 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1238 kmax = rbuf1_i[2*j]; 1239 for (k=0; k<kmax; k++,ct1++) { 1240 row = rbuf1_i[ct1] - rstart; 1241 nzA = a_i[row+1] - a_i[row]; 1242 nzB = b_i[row+1] - b_i[row]; 1243 ncols = nzA + nzB; 1244 cworkB = b_j + b_i[row]; 1245 vworkA = a_a + a_i[row]*bs2; 1246 vworkB = b_a + b_i[row]*bs2; 1247 1248 /* load the column values for this row into vals*/ 1249 vals = sbuf_aa_i+ct2*bs2; 1250 for (l=0; l<nzB; l++) { 1251 if ((bmap[cworkB[l]]) < cstart) { 1252 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1253 } else break; 1254 } 1255 imark = l; 1256 for (l=0; l<nzA; l++) { 1257 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1258 } 1259 for (l=imark; l<nzB; l++) { 1260 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1261 } 1262 1263 ct2 += ncols; 1264 } 1265 } 1266 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 1267 } 1268 } 1269 1270 if (!ismax) { 1271 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 1272 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1273 } 1274 1275 /* Assemble the matrices */ 1276 /* First assemble the local rows */ 1277 for (i=0; i<ismax; i++) { 1278 row2proc_i = row2proc[i]; 1279 subc = (Mat_SeqBAIJ*)submats[i]->data; 1280 imat_ilen = subc->ilen; 1281 imat_j = subc->j; 1282 imat_i = subc->i; 1283 imat_a = subc->a; 1284 1285 if (!allcolumns[i]) cmap_i = cmap[i]; 1286 rmap_i = rmap[i]; 1287 irow_i = irow[i]; 1288 jmax = nrow[i]; 1289 for (j=0; j<jmax; j++) { 1290 if (allrows[i]) row = j; 1291 else row = irow_i[j]; 1292 proc = row2proc_i[j]; 1293 1294 if (proc == rank) { 1295 1296 row = row - rstart; 1297 nzA = a_i[row+1] - a_i[row]; 1298 nzB = b_i[row+1] - b_i[row]; 1299 cworkA = a_j + a_i[row]; 1300 cworkB = b_j + b_i[row]; 1301 if (!ijonly) { 1302 vworkA = a_a + a_i[row]*bs2; 1303 vworkB = b_a + b_i[row]*bs2; 1304 } 1305 1306 if (allrows[i]) { 1307 row = row+rstart; 1308 } else { 1309 #if defined(PETSC_USE_CTABLE) 1310 ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr); 1311 row--; 1312 1313 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1314 #else 1315 row = rmap_i[row + rstart]; 1316 #endif 1317 } 1318 mat_i = imat_i[row]; 1319 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1320 mat_j = imat_j + mat_i; 1321 ilen = imat_ilen[row]; 1322 1323 /* load the column indices for this row into cols*/ 1324 if (!allcolumns[i]) { 1325 for (l=0; l<nzB; l++) { 1326 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1327 #if defined(PETSC_USE_CTABLE) 1328 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr); 1329 if (tcol) { 1330 #else 1331 if ((tcol = cmap_i[ctmp])) { 1332 #endif 1333 *mat_j++ = tcol - 1; 1334 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1335 mat_a += bs2; 1336 ilen++; 1337 } 1338 } else break; 1339 } 1340 imark = l; 1341 for (l=0; l<nzA; l++) { 1342 #if defined(PETSC_USE_CTABLE) 1343 ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1344 if (tcol) { 1345 #else 1346 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1347 #endif 1348 *mat_j++ = tcol - 1; 1349 if (!ijonly) { 1350 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1351 mat_a += bs2; 1352 } 1353 ilen++; 1354 } 1355 } 1356 for (l=imark; l<nzB; l++) { 1357 #if defined(PETSC_USE_CTABLE) 1358 ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1359 if (tcol) { 1360 #else 1361 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1362 #endif 1363 *mat_j++ = tcol - 1; 1364 if (!ijonly) { 1365 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1366 mat_a += bs2; 1367 } 1368 ilen++; 1369 } 1370 } 1371 } else { /* allcolumns */ 1372 for (l=0; l<nzB; l++) { 1373 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1374 *mat_j++ = ctmp; 1375 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1376 mat_a += bs2; 1377 ilen++; 1378 } else break; 1379 } 1380 imark = l; 1381 for (l=0; l<nzA; l++) { 1382 *mat_j++ = cstart+cworkA[l]; 1383 if (!ijonly) { 1384 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1385 mat_a += bs2; 1386 } 1387 ilen++; 1388 } 1389 for (l=imark; l<nzB; l++) { 1390 *mat_j++ = bmap[cworkB[l]]; 1391 if (!ijonly) { 1392 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1393 mat_a += bs2; 1394 } 1395 ilen++; 1396 } 1397 } 1398 imat_ilen[row] = ilen; 1399 } 1400 } 1401 } 1402 1403 /* Now assemble the off proc rows */ 1404 if (!ijonly) { 1405 ierr = MPI_Waitall(nrqs,r_waits4,r_status4);CHKERRQ(ierr); 1406 } 1407 for (tmp2=0; tmp2<nrqs; tmp2++) { 1408 sbuf1_i = sbuf1[pa[tmp2]]; 1409 jmax = sbuf1_i[0]; 1410 ct1 = 2*jmax + 1; 1411 ct2 = 0; 1412 rbuf2_i = rbuf2[tmp2]; 1413 rbuf3_i = rbuf3[tmp2]; 1414 if (!ijonly) rbuf4_i = rbuf4[tmp2]; 1415 for (j=1; j<=jmax; j++) { 1416 is_no = sbuf1_i[2*j-1]; 1417 rmap_i = rmap[is_no]; 1418 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1419 subc = (Mat_SeqBAIJ*)submats[is_no]->data; 1420 imat_ilen = subc->ilen; 1421 imat_j = subc->j; 1422 imat_i = subc->i; 1423 if (!ijonly) imat_a = subc->a; 1424 max1 = sbuf1_i[2*j]; 1425 for (k=0; k<max1; k++,ct1++) { /* for each recved block row */ 1426 row = sbuf1_i[ct1]; 1427 1428 if (allrows[is_no]) { 1429 row = sbuf1_i[ct1]; 1430 } else { 1431 #if defined(PETSC_USE_CTABLE) 1432 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 1433 row--; 1434 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1435 #else 1436 row = rmap_i[row]; 1437 #endif 1438 } 1439 ilen = imat_ilen[row]; 1440 mat_i = imat_i[row]; 1441 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1442 mat_j = imat_j + mat_i; 1443 max2 = rbuf2_i[ct1]; 1444 if (!allcolumns[is_no]) { 1445 for (l=0; l<max2; l++,ct2++) { 1446 #if defined(PETSC_USE_CTABLE) 1447 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1448 #else 1449 tcol = cmap_i[rbuf3_i[ct2]]; 1450 #endif 1451 if (tcol) { 1452 *mat_j++ = tcol - 1; 1453 if (!ijonly) { 1454 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1455 mat_a += bs2; 1456 } 1457 ilen++; 1458 } 1459 } 1460 } else { /* allcolumns */ 1461 for (l=0; l<max2; l++,ct2++) { 1462 *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */ 1463 if (!ijonly) { 1464 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1465 mat_a += bs2; 1466 } 1467 ilen++; 1468 } 1469 } 1470 imat_ilen[row] = ilen; 1471 } 1472 } 1473 } 1474 1475 if (!iscsorted) { /* sort column indices of the rows */ 1476 MatScalar *work; 1477 1478 ierr = PetscMalloc1(bs2,&work);CHKERRQ(ierr); 1479 for (i=0; i<ismax; i++) { 1480 subc = (Mat_SeqBAIJ*)submats[i]->data; 1481 imat_ilen = subc->ilen; 1482 imat_j = subc->j; 1483 imat_i = subc->i; 1484 if (!ijonly) imat_a = subc->a; 1485 if (allcolumns[i]) continue; 1486 1487 jmax = nrow[i]; 1488 for (j=0; j<jmax; j++) { 1489 mat_i = imat_i[j]; 1490 mat_j = imat_j + mat_i; 1491 ilen = imat_ilen[j]; 1492 if (ijonly) { 1493 ierr = PetscSortInt(ilen,mat_j);CHKERRQ(ierr); 1494 } else { 1495 mat_a = imat_a + mat_i*bs2; 1496 ierr = PetscSortIntWithDataArray(ilen,mat_j,mat_a,bs2*sizeof(MatScalar),work);CHKERRQ(ierr); 1497 } 1498 } 1499 } 1500 ierr = PetscFree(work);CHKERRQ(ierr); 1501 } 1502 1503 if (!ijonly) { 1504 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1505 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1506 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 1507 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1508 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1509 } 1510 1511 /* Restore the indices */ 1512 for (i=0; i<ismax; i++) { 1513 if (!allrows[i]) { 1514 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1515 } 1516 if (!allcolumns[i]) { 1517 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1518 } 1519 } 1520 1521 for (i=0; i<ismax; i++) { 1522 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1523 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1524 } 1525 1526 /* Destroy allocated memory */ 1527 if (!ismax) { 1528 ierr = PetscFree(pa);CHKERRQ(ierr); 1529 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 1530 for (i=0; i<nrqr; ++i) { 1531 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1532 } 1533 for (i=0; i<nrqs; ++i) { 1534 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1535 } 1536 1537 ierr = PetscFree3(sbuf2,req_size,req_source1);CHKERRQ(ierr); 1538 ierr = PetscFree3(req_source2,rbuf2,rbuf3);CHKERRQ(ierr); 1539 } 1540 ierr = PetscFree5(irow,icol,nrow,ncol,issorted);CHKERRQ(ierr); 1541 ierr = PetscFree6(smats,row2proc,cmap,rmap,allcolumns,allrows);CHKERRQ(ierr); 1542 1543 if (!ijonly) { 1544 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1545 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1546 1547 for (i=0; i<nrqs; ++i) { 1548 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1549 } 1550 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1551 } 1552 c->ijonly = PETSC_FALSE; /* set back to the default */ 1553 PetscFunctionReturn(0); 1554 } 1555