1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix 4 and to find submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/baij/mpi/mpibaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*); 11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 13 14 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 15 { 16 PetscErrorCode ierr; 17 PetscInt i,N=C->cmap->N, bs=C->rmap->bs; 18 IS *is_new; 19 20 PetscFunctionBegin; 21 ierr = PetscMalloc1(imax,&is_new);CHKERRQ(ierr); 22 /* Convert the indices into block format */ 23 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);CHKERRQ(ierr); 24 if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n"); 25 for (i=0; i<ov; ++i) { 26 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 27 } 28 for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);} 29 ierr = ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);CHKERRQ(ierr); 30 for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);} 31 ierr = PetscFree(is_new);CHKERRQ(ierr); 32 PetscFunctionReturn(0); 33 } 34 35 /* 36 Sample message format: 37 If a processor A wants processor B to process some elements corresponding 38 to index sets is[1], is[5] 39 mesg [0] = 2 (no of index sets in the mesg) 40 ----------- 41 mesg [1] = 1 => is[1] 42 mesg [2] = sizeof(is[1]); 43 ----------- 44 mesg [5] = 5 => is[5] 45 mesg [6] = sizeof(is[5]); 46 ----------- 47 mesg [7] 48 mesg [n] data(is[1]) 49 ----------- 50 mesg[n+1] 51 mesg[m] data(is[5]) 52 ----------- 53 54 Notes: 55 nrqs - no of requests sent (or to be sent out) 56 nrqr - no of requests recieved (which have to be or which have been processed 57 */ 58 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[]) 59 { 60 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 61 const PetscInt **idx,*idx_i; 62 PetscInt *n,*w3,*w4,**data,len; 63 PetscErrorCode ierr; 64 PetscMPIInt size,rank,tag1,tag2,*w2,*w1,nrqr; 65 PetscInt Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr; 66 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p; 67 PetscMPIInt *onodes1,*olengths1,*onodes2,*olengths2; 68 PetscBT *table; 69 MPI_Comm comm; 70 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 71 MPI_Status *s_status,*recv_status; 72 char *t_p; 73 74 PetscFunctionBegin; 75 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 76 size = c->size; 77 rank = c->rank; 78 Mbs = c->Mbs; 79 80 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 81 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 82 83 ierr = PetscMalloc2(imax+1,&idx,imax,&n);CHKERRQ(ierr); 84 85 for (i=0; i<imax; i++) { 86 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 87 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 88 } 89 90 /* evaluate communication - mesg to who,length of mesg, and buffer space 91 required. Based on this, buffers are allocated, and data copied into them*/ 92 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); 93 for (i=0; i<imax; i++) { 94 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 95 idx_i = idx[i]; 96 len = n[i]; 97 for (j=0; j<len; j++) { 98 row = idx_i[j]; 99 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 100 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 101 w4[proc]++; 102 } 103 for (j=0; j<size; j++) { 104 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 105 } 106 } 107 108 nrqs = 0; /* no of outgoing messages */ 109 msz = 0; /* total mesg length (for all proc */ 110 w1[rank] = 0; /* no mesg sent to itself */ 111 w3[rank] = 0; 112 for (i=0; i<size; i++) { 113 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 114 } 115 /* pa - is list of processors to communicate with */ 116 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); 117 for (i=0,j=0; i<size; i++) { 118 if (w1[i]) {pa[j] = i; j++;} 119 } 120 121 /* Each message would have a header = 1 + 2*(no of IS) + data */ 122 for (i=0; i<nrqs; i++) { 123 j = pa[i]; 124 w1[j] += w2[j] + 2*w3[j]; 125 msz += w1[j]; 126 } 127 128 /* Determine the number of messages to expect, their lengths, from from-ids */ 129 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 130 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 131 132 /* Now post the Irecvs corresponding to these messages */ 133 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 134 135 /* Allocate Memory for outgoing messages */ 136 ierr = PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);CHKERRQ(ierr); 137 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 138 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 139 { 140 PetscInt *iptr = tmp,ict = 0; 141 for (i=0; i<nrqs; i++) { 142 j = pa[i]; 143 iptr += ict; 144 outdat[j] = iptr; 145 ict = w1[j]; 146 } 147 } 148 149 /* Form the outgoing messages */ 150 /*plug in the headers*/ 151 for (i=0; i<nrqs; i++) { 152 j = pa[i]; 153 outdat[j][0] = 0; 154 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 155 ptr[j] = outdat[j] + 2*w3[j] + 1; 156 } 157 158 /* Memory for doing local proc's work*/ 159 { 160 ierr = PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mbs*imax,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,&t_p);CHKERRQ(ierr); 161 162 for (i=0; i<imax; i++) { 163 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 164 data[i] = d_p + (Mbs)*i; 165 } 166 } 167 168 /* Parse the IS and update local tables and the outgoing buf with the data*/ 169 { 170 PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j; 171 PetscBT table_i; 172 173 for (i=0; i<imax; i++) { 174 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 175 n_i = n[i]; 176 table_i = table[i]; 177 idx_i = idx[i]; 178 data_i = data[i]; 179 isz_i = isz[i]; 180 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 181 row = idx_i[j]; 182 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 183 if (proc != rank) { /* copy to the outgoing buffer */ 184 ctr[proc]++; 185 *ptr[proc] = row; 186 ptr[proc]++; 187 } else { /* Update the local table */ 188 if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row; 189 } 190 } 191 /* Update the headers for the current IS */ 192 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 193 if ((ctr_j = ctr[j])) { 194 outdat_j = outdat[j]; 195 k = ++outdat_j[0]; 196 outdat_j[2*k] = ctr_j; 197 outdat_j[2*k-1] = i; 198 } 199 } 200 isz[i] = isz_i; 201 } 202 } 203 204 /* Now post the sends */ 205 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 206 for (i=0; i<nrqs; ++i) { 207 j = pa[i]; 208 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 209 } 210 211 /* No longer need the original indices*/ 212 for (i=0; i<imax; ++i) { 213 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 214 } 215 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 216 217 for (i=0; i<imax; ++i) { 218 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 219 } 220 221 /* Do Local work*/ 222 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 223 224 /* Receive messages*/ 225 ierr = PetscMalloc1(nrqr+1,&recv_status);CHKERRQ(ierr); 226 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 227 228 ierr = PetscMalloc1(nrqs+1,&s_status);CHKERRQ(ierr); 229 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 230 231 /* Phase 1 sends are complete - deallocate buffers */ 232 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 233 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 234 235 ierr = PetscMalloc1(nrqr+1,&xdata);CHKERRQ(ierr); 236 ierr = PetscMalloc1(nrqr+1,&isz1);CHKERRQ(ierr); 237 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 238 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 239 ierr = PetscFree(rbuf);CHKERRQ(ierr); 240 241 /* Send the data back*/ 242 /* Do a global reduction to know the buffer space req for incoming messages*/ 243 { 244 PetscMPIInt *rw1; 245 246 ierr = PetscCalloc1(size,&rw1);CHKERRQ(ierr); 247 248 for (i=0; i<nrqr; ++i) { 249 proc = recv_status[i].MPI_SOURCE; 250 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 251 rw1[proc] = isz1[i]; 252 } 253 254 ierr = PetscFree(onodes1);CHKERRQ(ierr); 255 ierr = PetscFree(olengths1);CHKERRQ(ierr); 256 257 /* Determine the number of messages to expect, their lengths, from from-ids */ 258 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 259 ierr = PetscFree(rw1);CHKERRQ(ierr); 260 } 261 /* Now post the Irecvs corresponding to these messages */ 262 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 263 264 /* Now post the sends */ 265 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 266 for (i=0; i<nrqr; ++i) { 267 j = recv_status[i].MPI_SOURCE; 268 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 269 } 270 271 /* receive work done on other processors*/ 272 { 273 PetscMPIInt idex; 274 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 275 PetscBT table_i; 276 MPI_Status *status2; 277 278 ierr = PetscMalloc1(PetscMax(nrqr,nrqs)+1,&status2);CHKERRQ(ierr); 279 for (i=0; i<nrqs; ++i) { 280 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 281 /* Process the message*/ 282 rbuf2_i = rbuf2[idex]; 283 ct1 = 2*rbuf2_i[0]+1; 284 jmax = rbuf2[idex][0]; 285 for (j=1; j<=jmax; j++) { 286 max = rbuf2_i[2*j]; 287 is_no = rbuf2_i[2*j-1]; 288 isz_i = isz[is_no]; 289 data_i = data[is_no]; 290 table_i = table[is_no]; 291 for (k=0; k<max; k++,ct1++) { 292 row = rbuf2_i[ct1]; 293 if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row; 294 } 295 isz[is_no] = isz_i; 296 } 297 } 298 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 299 ierr = PetscFree(status2);CHKERRQ(ierr); 300 } 301 302 for (i=0; i<imax; ++i) { 303 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 304 } 305 306 307 ierr = PetscFree(onodes2);CHKERRQ(ierr); 308 ierr = PetscFree(olengths2);CHKERRQ(ierr); 309 310 ierr = PetscFree(pa);CHKERRQ(ierr); 311 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 312 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 313 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 314 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 315 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 316 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 317 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 318 ierr = PetscFree(s_status);CHKERRQ(ierr); 319 ierr = PetscFree(recv_status);CHKERRQ(ierr); 320 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 321 ierr = PetscFree(xdata);CHKERRQ(ierr); 322 ierr = PetscFree(isz1);CHKERRQ(ierr); 323 PetscFunctionReturn(0); 324 } 325 326 /* 327 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 328 the work on the local processor. 329 330 Inputs: 331 C - MAT_MPIBAIJ; 332 imax - total no of index sets processed at a time; 333 table - an array of char - size = Mbs bits. 334 335 Output: 336 isz - array containing the count of the solution elements corresponding 337 to each index set; 338 data - pointer to the solutions 339 */ 340 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data) 341 { 342 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 343 Mat A = c->A,B = c->B; 344 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 345 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 346 PetscInt *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 347 PetscBT table_i; 348 349 PetscFunctionBegin; 350 rstart = c->rstartbs; 351 cstart = c->cstartbs; 352 ai = a->i; 353 aj = a->j; 354 bi = b->i; 355 bj = b->j; 356 garray = c->garray; 357 358 359 for (i=0; i<imax; i++) { 360 data_i = data[i]; 361 table_i = table[i]; 362 isz_i = isz[i]; 363 for (j=0,max=isz[i]; j<max; j++) { 364 row = data_i[j] - rstart; 365 start = ai[row]; 366 end = ai[row+1]; 367 for (k=start; k<end; k++) { /* Amat */ 368 val = aj[k] + cstart; 369 if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val; 370 } 371 start = bi[row]; 372 end = bi[row+1]; 373 for (k=start; k<end; k++) { /* Bmat */ 374 val = garray[bj[k]]; 375 if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val; 376 } 377 } 378 isz[i] = isz_i; 379 } 380 PetscFunctionReturn(0); 381 } 382 /* 383 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 384 and return the output 385 386 Input: 387 C - the matrix 388 nrqr - no of messages being processed. 389 rbuf - an array of pointers to the recieved requests 390 391 Output: 392 xdata - array of messages to be sent back 393 isz1 - size of each message 394 395 For better efficiency perhaps we should malloc separately each xdata[i], 396 then if a remalloc is required we need only copy the data for that one row 397 rather than all previous rows as it is now where a single large chunck of 398 memory is used. 399 400 */ 401 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 402 { 403 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 404 Mat A = c->A,B = c->B; 405 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 406 PetscErrorCode ierr; 407 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 408 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 409 PetscInt val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 410 PetscInt *rbuf_i,kmax,rbuf_0; 411 PetscBT xtable; 412 413 PetscFunctionBegin; 414 Mbs = c->Mbs; 415 rstart = c->rstartbs; 416 cstart = c->cstartbs; 417 ai = a->i; 418 aj = a->j; 419 bi = b->i; 420 bj = b->j; 421 garray = c->garray; 422 423 424 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 425 rbuf_i = rbuf[i]; 426 rbuf_0 = rbuf_i[0]; 427 ct += rbuf_0; 428 for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j]; 429 } 430 431 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 432 else max1 = 1; 433 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 434 ierr = PetscMalloc1(mem_estimate,&xdata[0]);CHKERRQ(ierr); 435 ++no_malloc; 436 ierr = PetscBTCreate(Mbs,&xtable);CHKERRQ(ierr); 437 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 438 439 ct3 = 0; 440 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 441 rbuf_i = rbuf[i]; 442 rbuf_0 = rbuf_i[0]; 443 ct1 = 2*rbuf_0+1; 444 ct2 = ct1; 445 ct3 += ct1; 446 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 447 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 448 oct2 = ct2; 449 kmax = rbuf_i[2*j]; 450 for (k=0; k<kmax; k++,ct1++) { 451 row = rbuf_i[ct1]; 452 if (!PetscBTLookupSet(xtable,row)) { 453 if (!(ct3 < mem_estimate)) { 454 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 455 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 456 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 457 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 458 xdata[0] = tmp; 459 mem_estimate = new_estimate; ++no_malloc; 460 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 461 } 462 xdata[i][ct2++] = row; 463 ct3++; 464 } 465 } 466 for (k=oct2,max2=ct2; k<max2; k++) { 467 row = xdata[i][k] - rstart; 468 start = ai[row]; 469 end = ai[row+1]; 470 for (l=start; l<end; l++) { 471 val = aj[l] + cstart; 472 if (!PetscBTLookupSet(xtable,val)) { 473 if (!(ct3 < mem_estimate)) { 474 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 475 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 476 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 477 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 478 xdata[0] = tmp; 479 mem_estimate = new_estimate; ++no_malloc; 480 for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 481 } 482 xdata[i][ct2++] = val; 483 ct3++; 484 } 485 } 486 start = bi[row]; 487 end = bi[row+1]; 488 for (l=start; l<end; l++) { 489 val = garray[bj[l]]; 490 if (!PetscBTLookupSet(xtable,val)) { 491 if (!(ct3 < mem_estimate)) { 492 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 493 ierr = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr); 494 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 495 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 496 xdata[0] = tmp; 497 mem_estimate = new_estimate; ++no_malloc; 498 for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1]; 499 } 500 xdata[i][ct2++] = val; 501 ct3++; 502 } 503 } 504 } 505 /* Update the header*/ 506 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 507 xdata[i][2*j-1] = rbuf_i[2*j-1]; 508 } 509 xdata[i][0] = rbuf_0; 510 xdata[i+1] = xdata[i] + ct2; 511 isz1[i] = ct2; /* size of each message */ 512 } 513 ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr); 514 ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 515 PetscFunctionReturn(0); 516 } 517 518 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 519 { 520 IS *isrow_block,*iscol_block; 521 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 522 PetscErrorCode ierr; 523 PetscInt nmax,nstages_local,nstages,i,pos,max_no,ncol,nrow,N=C->cmap->N,bs=C->rmap->bs; 524 PetscBool colflag,*allcolumns,*allrows; 525 526 PetscFunctionBegin; 527 //printf("MatGetSubMatrices_MPIBAIJ scall %d, bs %d\n",scall,bs); 528 /* The compression and expansion should be avoided. Doesn't point 529 out errors, might change the indices, hence buggey */ 530 ierr = PetscMalloc2(ismax+1,&isrow_block,ismax+1,&iscol_block);CHKERRQ(ierr); 531 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_block);CHKERRQ(ierr); 532 ierr = ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_block);CHKERRQ(ierr); 533 534 /* Check for special case: each processor gets entire matrix columns -- rm! */ 535 ierr = PetscMalloc2(ismax+1,&allcolumns,ismax+1,&allrows);CHKERRQ(ierr); 536 for (i=0; i<ismax; i++) { 537 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 538 ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr); 539 if (colflag && ncol == C->cmap->N) allcolumns[i] = PETSC_TRUE; 540 else allcolumns[i] = PETSC_FALSE; 541 542 ierr = ISIdentity(isrow[i],&colflag);CHKERRQ(ierr); 543 ierr = ISGetLocalSize(isrow[i],&nrow);CHKERRQ(ierr); 544 if (colflag && nrow == C->rmap->N) allrows[i] = PETSC_TRUE; 545 else allrows[i] = PETSC_FALSE; 546 } 547 548 /* Allocate memory to hold all the submatrices */ 549 if (scall == MAT_INITIAL_MATRIX) { 550 ierr = PetscMalloc1(ismax+1,submat);CHKERRQ(ierr); 551 } 552 /* Determine the number of stages through which submatrices are done */ 553 nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt)); 554 if (!nmax) nmax = 1; 555 nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0); 556 557 /* Make sure every processor loops through the nstages */ 558 ierr = MPIU_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr); 559 for (i=0,pos=0; i<nstages; i++) { 560 if (pos+nmax <= ismax) max_no = nmax; 561 else if (pos == ismax) max_no = 0; 562 else max_no = ismax-pos; 563 564 PetscBool isbaij; 565 ierr = PetscObjectTypeCompare((PetscObject)C,MATMPIBAIJ,&isbaij);CHKERRQ(ierr); 566 if (isbaij) { 567 ierr = MatGetSubMatrices_MPIBAIJ_local_new(C,max_no,isrow_block+pos,iscol_block+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);CHKERRQ(ierr); 568 } else { 569 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_block+pos,iscol_block+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);CHKERRQ(ierr); 570 } 571 pos += max_no; 572 } 573 574 for (i=0; i<ismax; i++) { 575 ierr = ISDestroy(&isrow_block[i]);CHKERRQ(ierr); 576 ierr = ISDestroy(&iscol_block[i]);CHKERRQ(ierr); 577 } 578 ierr = PetscFree2(isrow_block,iscol_block);CHKERRQ(ierr); 579 ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr); 580 PetscFunctionReturn(0); 581 } 582 583 #if defined(PETSC_USE_CTABLE) 584 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank) 585 { 586 PetscInt nGlobalNd = proc_gnode[size]; 587 PetscMPIInt fproc; 588 PetscErrorCode ierr; 589 590 PetscFunctionBegin; 591 ierr = PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)),&fproc);CHKERRQ(ierr); 592 if (fproc > size) fproc = size; 593 while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) { 594 if (row < proc_gnode[fproc]) fproc--; 595 else fproc++; 596 } 597 *rank = fproc; 598 PetscFunctionReturn(0); 599 } 600 #endif 601 602 /* -------------------------------------------------------------------------*/ 603 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */ 604 605 PetscErrorCode MatDestroy_MPIBAIJ_MatGetSubmatrices(Mat C) 606 { 607 PetscErrorCode ierr; 608 Mat_SeqBAIJ *c = (Mat_SeqBAIJ*)C->data; 609 Mat_SubMat *submatj = c->submatis1; 610 PetscInt i; 611 612 PetscFunctionBegin; 613 if (!submatj->id) { /* delete data that are linked only to submats[id=0] */ 614 ierr = PetscFree4(submatj->sbuf1,submatj->ptr,submatj->tmp,submatj->ctr);CHKERRQ(ierr); 615 616 for (i=0; i<submatj->nrqr; ++i) { 617 ierr = PetscFree(submatj->sbuf2[i]);CHKERRQ(ierr); 618 } 619 ierr = PetscFree3(submatj->sbuf2,submatj->req_size,submatj->req_source1);CHKERRQ(ierr); 620 621 if (submatj->rbuf1) { 622 ierr = PetscFree(submatj->rbuf1[0]);CHKERRQ(ierr); 623 ierr = PetscFree(submatj->rbuf1);CHKERRQ(ierr); 624 } 625 626 for (i=0; i<submatj->nrqs; ++i) { 627 ierr = PetscFree(submatj->rbuf3[i]);CHKERRQ(ierr); 628 } 629 ierr = PetscFree3(submatj->req_source2,submatj->rbuf2,submatj->rbuf3);CHKERRQ(ierr); 630 ierr = PetscFree(submatj->pa);CHKERRQ(ierr); 631 } 632 633 #if defined(PETSC_USE_CTABLE) 634 ierr = PetscTableDestroy((PetscTable*)&submatj->rmap);CHKERRQ(ierr); 635 if (submatj->cmap_loc) {ierr = PetscFree(submatj->cmap_loc);CHKERRQ(ierr);} 636 ierr = PetscFree(submatj->rmap_loc);CHKERRQ(ierr); 637 #else 638 ierr = PetscFree(submatj->rmap);CHKERRQ(ierr); 639 #endif 640 641 if (!submatj->allcolumns) { 642 #if defined(PETSC_USE_CTABLE) 643 ierr = PetscTableDestroy((PetscTable*)&submatj->cmap);CHKERRQ(ierr); 644 #else 645 ierr = PetscFree(submatj->cmap);CHKERRQ(ierr); 646 #endif 647 } 648 ierr = submatj->destroy(C);CHKERRQ(ierr); 649 ierr = PetscFree(submatj->row2proc);CHKERRQ(ierr); 650 651 ierr = PetscFree(submatj);CHKERRQ(ierr); 652 PetscFunctionReturn(0); 653 } 654 655 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local_new(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats) 656 { 657 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 658 Mat A = c->A; 659 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*subc; 660 const PetscInt **icol,**irow; 661 PetscInt *nrow,*ncol,start; 662 PetscErrorCode ierr; 663 PetscMPIInt rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr; 664 PetscInt **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,**rbuf1,row,proc=-1; 665 PetscInt nrqs=0,msz,**ptr=NULL,*req_size=NULL,*ctr=NULL,*pa,*tmp=NULL,tcol; 666 PetscInt **rbuf3=NULL,*req_source1=NULL,*req_source2,**sbuf_aj,**rbuf2=NULL,max1,max2; 667 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 668 #if defined(PETSC_USE_CTABLE) 669 PetscTable *cmap,cmap_i=NULL,*rmap,rmap_i; 670 #else 671 PetscInt **cmap,*cmap_i=NULL,**rmap,*rmap_i; 672 #endif 673 const PetscInt *irow_i; 674 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 675 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3; 676 MPI_Request *r_waits4,*s_waits3,*s_waits4; 677 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2; 678 MPI_Status *r_status3,*r_status4,*s_status4; 679 MPI_Comm comm; 680 PetscScalar **rbuf4,*rbuf4_i,**sbuf_aa,*vals,*mat_a,*imat_a,*sbuf_aa_i; 681 PetscMPIInt *onodes1,*olengths1,end; 682 PetscInt **row2proc,*row2proc_i,ilen_row,*imat_ilen,*imat_j,*imat_i; 683 Mat_SubMat **smats,*smat_i; 684 PetscBool *issorted,colflag,iscsorted=PETSC_TRUE; 685 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,ilen; 686 687 PetscInt bs=C->rmap->bs,bs2=c->bs2,rstart = c->rstartbs; 688 PetscBool ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */ 689 PetscInt nzA,nzB,*a_i=a->i,*b_i=b->i,*a_j = a->j,*b_j = b->j,ctmp,imark,*cworkA,*cworkB; 690 PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a; 691 PetscInt cstart = c->cstartbs,*bmap = c->garray; 692 693 PetscFunctionBegin; 694 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 695 size = c->size; 696 rank = c->rank; 697 698 ierr = PetscMalloc5(ismax,&smats,ismax,&row2proc,ismax,&cmap,ismax,&rmap,ismax,&allcolumns);CHKERRQ(ierr); 699 ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);CHKERRQ(ierr); 700 701 for (i=0; i<ismax; i++) { 702 ierr = ISSorted(iscol[i],&issorted[i]);CHKERRQ(ierr); 703 if (!issorted[i]) iscsorted = issorted[i]; 704 705 ierr = ISSorted(isrow[i],&issorted[i]);CHKERRQ(ierr); 706 707 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 708 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 709 710 /* Check for special case: allcolumn */ 711 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 712 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 713 714 //if (colflag && ncol[i] == c->Nbs) { 715 // allcolumns[i] = PETSC_TRUE; 716 //icol[i] = NULL; 717 //} else { 718 allcolumns[i] = PETSC_FALSE; 719 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 720 //} 721 } 722 //printf("[%d] nrow %d, ncol %d\n",rank,nrow[0],ncol[0]); 723 724 if (scall == MAT_REUSE_MATRIX) { 725 /* Assumes new rows are same length as the old rows */ 726 for (i=0; i<ismax; i++) { 727 subc = (Mat_SeqBAIJ*)(submats[i]->data); 728 if (subc->mbs != nrow[i] || subc->nbs != ncol[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 729 730 /* Initial matrix as if empty */ 731 ierr = PetscMemzero(subc->ilen,subc->mbs*sizeof(PetscInt));CHKERRQ(ierr); 732 733 /* Initial matrix as if empty */ 734 submats[i]->factortype = C->factortype; 735 736 smat_i = subc->submatis1; 737 smats[i] = smat_i; 738 739 nrqs = smat_i->nrqs; 740 nrqr = smat_i->nrqr; 741 rbuf1 = smat_i->rbuf1; 742 rbuf2 = smat_i->rbuf2; 743 rbuf3 = smat_i->rbuf3; 744 req_source2 = smat_i->req_source2; 745 746 sbuf1 = smat_i->sbuf1; 747 sbuf2 = smat_i->sbuf2; 748 ptr = smat_i->ptr; 749 tmp = smat_i->tmp; 750 ctr = smat_i->ctr; 751 752 pa = smat_i->pa; 753 req_size = smat_i->req_size; 754 req_source1 = smat_i->req_source1; 755 756 allcolumns[i] = smat_i->allcolumns; 757 row2proc[i] = smat_i->row2proc; 758 rmap[i] = smat_i->rmap; 759 cmap[i] = smat_i->cmap; 760 } 761 } else { /* scall == MAT_INITIAL_MATRIX */ 762 /* Get some new tags to keep the communication clean */ 763 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 764 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 765 766 /* evaluate communication - mesg to who, length of mesg, and buffer space 767 required. Based on this, buffers are allocated, and data copied into them*/ 768 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); /* mesg size, initialize work vectors */ 769 770 for (i=0; i<ismax; i++) { 771 jmax = nrow[i]; 772 irow_i = irow[i]; 773 774 ierr = PetscMalloc1(jmax,&row2proc_i);CHKERRQ(ierr); 775 row2proc[i] = row2proc_i; 776 777 if (issorted[i]) proc = 0; 778 for (j=0; j<jmax; j++) { 779 if (!issorted[i]) proc = 0; 780 row = irow_i[j]; 781 while (row >= c->rangebs[proc+1]) proc++; 782 w4[proc]++; 783 row2proc_i[j] = proc; /* map row index to proc */ 784 } 785 for (j=0; j<size; j++) { 786 if (w4[j]) { w1[j] += w4[j]; w3[j]++; w4[j] = 0;} 787 } 788 } 789 790 nrqs = 0; /* no of outgoing messages */ 791 msz = 0; /* total mesg length (for all procs) */ 792 w1[rank] = 0; /* no mesg sent to self */ 793 w3[rank] = 0; 794 for (i=0; i<size; i++) { 795 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 796 } 797 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 798 for (i=0,j=0; i<size; i++) { 799 if (w1[i]) { pa[j] = i; j++; } 800 } 801 802 /* Each message would have a header = 1 + 2*(no of IS) + data */ 803 for (i=0; i<nrqs; i++) { 804 j = pa[i]; 805 w1[j] += w2[j] + 2* w3[j]; 806 msz += w1[j]; 807 } 808 ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr); 809 810 /* Determine the number of messages to expect, their lengths, from from-ids */ 811 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 812 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 813 814 /* Now post the Irecvs corresponding to these messages */ 815 tag0 = ((PetscObject)C)->tag; 816 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 817 /* printf("[%d] nrqs %d, nrqr %d\n",rank,nrqs,nrqr); */ 818 819 ierr = PetscFree(onodes1);CHKERRQ(ierr); 820 ierr = PetscFree(olengths1);CHKERRQ(ierr); 821 822 /* Allocate Memory for outgoing messages */ 823 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 824 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 825 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 826 827 { 828 PetscInt *iptr = tmp; 829 k = 0; 830 for (i=0; i<nrqs; i++) { 831 j = pa[i]; 832 iptr += k; 833 sbuf1[j] = iptr; 834 k = w1[j]; 835 } 836 } 837 838 /* Form the outgoing messages. Initialize the header space */ 839 for (i=0; i<nrqs; i++) { 840 j = pa[i]; 841 sbuf1[j][0] = 0; 842 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 843 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 844 } 845 846 /* Parse the isrow and copy data into outbuf */ 847 for (i=0; i<ismax; i++) { 848 row2proc_i = row2proc[i]; 849 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 850 irow_i = irow[i]; 851 jmax = nrow[i]; 852 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 853 proc = row2proc_i[j]; 854 if (proc != rank) { /* copy to the outgoing buf*/ 855 ctr[proc]++; 856 *ptr[proc] = irow_i[j]; 857 ptr[proc]++; 858 } 859 } 860 /* Update the headers for the current IS */ 861 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 862 if ((ctr_j = ctr[j])) { 863 sbuf1_j = sbuf1[j]; 864 k = ++sbuf1_j[0]; 865 sbuf1_j[2*k] = ctr_j; 866 sbuf1_j[2*k-1] = i; 867 } 868 } 869 } 870 871 /* Now post the sends */ 872 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 873 for (i=0; i<nrqs; ++i) { 874 j = pa[i]; 875 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 876 } 877 878 /* Post Receives to capture the buffer size */ 879 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 880 ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr); 881 rbuf2[0] = tmp + msz; 882 for (i=1; i<nrqs; ++i) { 883 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 884 } 885 for (i=0; i<nrqs; ++i) { 886 j = pa[i]; 887 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);CHKERRQ(ierr); 888 } 889 890 /* Send to other procs the buf size they should allocate */ 891 /* Receive messages*/ 892 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 893 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 894 ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr); 895 { 896 PetscInt *sAi = a->i,*sBi = b->i,id; 897 PetscInt *sbuf2_i; 898 899 ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr); 900 for (i=0; i<nrqr; ++i) { 901 req_size[i] = 0; 902 rbuf1_i = rbuf1[i]; 903 start = 2*rbuf1_i[0] + 1; 904 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 905 ierr = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr); 906 sbuf2_i = sbuf2[i]; 907 for (j=start; j<end; j++) { 908 id = rbuf1_i[j] - rstart; 909 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 910 sbuf2_i[j] = ncols; 911 req_size[i] += ncols; 912 } 913 req_source1[i] = r_status1[i].MPI_SOURCE; 914 /* form the header */ 915 sbuf2_i[0] = req_size[i]; 916 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 917 918 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr); 919 } 920 } 921 ierr = PetscFree(r_status1);CHKERRQ(ierr); 922 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 923 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 924 925 /* Receive messages*/ 926 ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr); 927 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 928 929 ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr); 930 for (i=0; i<nrqs; ++i) { 931 ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr); 932 req_source2[i] = r_status2[i].MPI_SOURCE; 933 ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr); 934 } 935 ierr = PetscFree(r_status2);CHKERRQ(ierr); 936 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 937 938 /* Wait on sends1 and sends2 */ 939 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 940 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 941 942 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 943 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 944 ierr = PetscFree(s_status1);CHKERRQ(ierr); 945 ierr = PetscFree(s_status2);CHKERRQ(ierr); 946 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 947 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 948 949 /* Now allocate sending buffers for a->j, and send them off */ 950 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 951 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 952 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 953 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 954 955 ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr); 956 { 957 958 for (i=0; i<nrqr; i++) { 959 rbuf1_i = rbuf1[i]; 960 sbuf_aj_i = sbuf_aj[i]; 961 ct1 = 2*rbuf1_i[0] + 1; 962 ct2 = 0; 963 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 964 kmax = rbuf1[i][2*j]; 965 for (k=0; k<kmax; k++,ct1++) { 966 row = rbuf1_i[ct1] - rstart; 967 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 968 ncols = nzA + nzB; 969 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 970 971 /* load the column indices for this row into cols */ 972 cols = sbuf_aj_i + ct2; 973 for (l=0; l<nzB; l++) { 974 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 975 else break; 976 } 977 imark = l; 978 for (l=0; l<nzA; l++) {cols[imark+l] = cstart + cworkA[l];} 979 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 980 ct2 += ncols; 981 } 982 } 983 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr); 984 } 985 } 986 ierr = PetscMalloc2(nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr); 987 988 /* create col map: global col of C -> local col of submatrices */ 989 const PetscInt *icol_i; 990 #if defined(PETSC_USE_CTABLE) 991 for (i=0; i<ismax; i++) { 992 if (!allcolumns[i]) { 993 ierr = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr); 994 995 jmax = ncol[i]; 996 icol_i = icol[i]; 997 cmap_i = cmap[i]; 998 for (j=0; j<jmax; j++) { 999 ierr = PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1000 } 1001 } else cmap[i] = NULL; 1002 } 1003 #else 1004 for (i=0; i<ismax; i++) { 1005 if (!allcolumns[i]) { 1006 ierr = PetscCalloc1(c->Nbs,&cmap[i]);CHKERRQ(ierr); 1007 jmax = ncol[i]; 1008 icol_i = icol[i]; 1009 cmap_i = cmap[i]; 1010 for (j=0; j<jmax; j++) cmap_i[icol_i[j]] = j+1; 1011 } else cmap[i] = NULL; 1012 } 1013 #endif 1014 1015 /* Create lens which is required for MatCreate... */ 1016 for (i=0,j=0; i<ismax; i++) j += nrow[i]; 1017 ierr = PetscMalloc1(ismax,&lens);CHKERRQ(ierr); 1018 1019 if (ismax) { 1020 ierr = PetscCalloc1(j,&lens[0]);CHKERRQ(ierr); 1021 } 1022 for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1]; 1023 1024 /* Update lens from local data */ 1025 for (i=0; i<ismax; i++) { 1026 row2proc_i = row2proc[i]; 1027 jmax = nrow[i]; 1028 if (!allcolumns[i]) cmap_i = cmap[i]; 1029 irow_i = irow[i]; 1030 lens_i = lens[i]; 1031 for (j=0; j<jmax; j++) { 1032 row = irow_i[j]; /* global blocked row of C */ 1033 proc = row2proc_i[j]; 1034 if (proc == rank) { 1035 /* Get indices from matA and then from matB */ 1036 #if defined(PETSC_USE_CTABLE) 1037 PetscInt tt; 1038 #endif 1039 row = row - rstart; 1040 nzA = a_i[row+1] - a_i[row]; 1041 nzB = b_i[row+1] - b_i[row]; 1042 cworkA = a_j + a_i[row]; 1043 cworkB = b_j + b_i[row]; 1044 1045 if (!allcolumns[i]) { 1046 #if defined(PETSC_USE_CTABLE) 1047 for (k=0; k<nzA; k++) { 1048 ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1049 if (tt) lens_i[j]++; 1050 } 1051 for (k=0; k<nzB; k++) { 1052 ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1053 if (tt) lens_i[j]++; 1054 } 1055 1056 #else 1057 for (k=0; k<nzA; k++) { 1058 if (cmap_i[cstart + cworkA[k]]) lens_i[j]++; 1059 } 1060 for (k=0; k<nzB; k++) { 1061 if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++; 1062 } 1063 #endif 1064 } else { /* allcolumns */ 1065 lens_i[j] = nzA + nzB; 1066 } 1067 } 1068 } 1069 } 1070 1071 /* Create row map: global row of C -> local row of submatrices */ 1072 #if defined(PETSC_USE_CTABLE) 1073 for (i=0; i<ismax; i++) { 1074 ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr); 1075 irow_i = irow[i]; 1076 jmax = nrow[i]; 1077 for (j=0; j<jmax; j++) { 1078 ierr = PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1079 } 1080 } 1081 #else 1082 for (i=0; i<ismax; i++) { 1083 ierr = PetscCalloc1(c->Mbs,&rmap[i]);CHKERRQ(ierr); 1084 rmap_i = rmap[i]; 1085 irow_i = irow[i]; 1086 jmax = nrow[i]; 1087 for (j=0; j<jmax; j++) { 1088 rmap_i[irow_i[j]] = j; 1089 } 1090 } 1091 #endif 1092 1093 /* Update lens from offproc data */ 1094 { 1095 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 1096 1097 ierr = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr); 1098 for (tmp2=0; tmp2<nrqs; tmp2++) { 1099 sbuf1_i = sbuf1[pa[tmp2]]; 1100 jmax = sbuf1_i[0]; 1101 ct1 = 2*jmax+1; 1102 ct2 = 0; 1103 rbuf2_i = rbuf2[tmp2]; 1104 rbuf3_i = rbuf3[tmp2]; 1105 for (j=1; j<=jmax; j++) { 1106 is_no = sbuf1_i[2*j-1]; 1107 max1 = sbuf1_i[2*j]; 1108 lens_i = lens[is_no]; 1109 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1110 rmap_i = rmap[is_no]; 1111 for (k=0; k<max1; k++,ct1++) { 1112 #if defined(PETSC_USE_CTABLE) 1113 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1114 row--; 1115 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1116 #else 1117 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1118 #endif 1119 max2 = rbuf2_i[ct1]; 1120 for (l=0; l<max2; l++,ct2++) { 1121 if (!allcolumns[is_no]) { 1122 #if defined(PETSC_USE_CTABLE) 1123 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1124 #else 1125 tcol = cmap_i[rbuf3_i[ct2]]; 1126 #endif 1127 if (tcol) lens_i[row]++; 1128 } else { /* allcolumns */ 1129 lens_i[row]++; /* lens_i[row] += max2 ? */ 1130 } 1131 } 1132 } 1133 } 1134 } 1135 } 1136 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1137 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 1138 ierr = PetscFree2(r_status3,s_status3);CHKERRQ(ierr); 1139 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1140 1141 /* Create the submatrices */ 1142 for (i=0; i<ismax; i++) { 1143 PetscInt bs_tmp; 1144 if (ijonly) bs_tmp = 1; 1145 else bs_tmp = bs; 1146 1147 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 1148 ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr); 1149 1150 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 1151 ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); 1152 ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */ 1153 1154 /* create struct Mat_SubMat and attached it to submat */ 1155 ierr = PetscNew(&smat_i);CHKERRQ(ierr); 1156 subc = (Mat_SeqBAIJ*)submats[i]->data; 1157 subc->submatis1 = smat_i; 1158 smats[i] = smat_i; 1159 1160 smat_i->destroy = submats[i]->ops->destroy; 1161 submats[i]->ops->destroy = MatDestroy_MPIBAIJ_MatGetSubmatrices; 1162 submats[i]->factortype = C->factortype; 1163 1164 smat_i->id = i; 1165 smat_i->nrqs = nrqs; 1166 smat_i->nrqr = nrqr; 1167 smat_i->rbuf1 = rbuf1; 1168 smat_i->rbuf2 = rbuf2; 1169 smat_i->rbuf3 = rbuf3; 1170 smat_i->sbuf2 = sbuf2; 1171 smat_i->req_source2 = req_source2; 1172 1173 smat_i->sbuf1 = sbuf1; 1174 smat_i->ptr = ptr; 1175 smat_i->tmp = tmp; 1176 smat_i->ctr = ctr; 1177 1178 smat_i->pa = pa; 1179 smat_i->req_size = req_size; 1180 smat_i->req_source1 = req_source1; 1181 1182 smat_i->allcolumns = allcolumns[i]; 1183 smat_i->singleis = PETSC_FALSE; 1184 smat_i->row2proc = row2proc[i]; 1185 smat_i->rmap = rmap[i]; 1186 smat_i->cmap = cmap[i]; 1187 } 1188 1189 if (ismax) {ierr = PetscFree(lens[0]);CHKERRQ(ierr);} 1190 ierr = PetscFree(lens);CHKERRQ(ierr); 1191 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1192 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1193 1194 } /* endof scall == MAT_INITIAL_MATRIX */ 1195 1196 /* Post recv matrix values */ 1197 ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr); 1198 ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr); 1199 ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr); 1200 ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr); 1201 ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr); 1202 for (i=0; i<nrqs; ++i) { 1203 ierr = PetscMalloc1(rbuf2[i][0]*bs2,&rbuf4[i]);CHKERRQ(ierr); 1204 ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0]*bs2,MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr); 1205 } 1206 1207 /* Allocate sending buffers for a->a, and send them off */ 1208 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 1209 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1210 1211 ierr = PetscMalloc1((j+1)*bs2,&sbuf_aa[0]);CHKERRQ(ierr); 1212 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 1213 1214 ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr); 1215 1216 for (i=0; i<nrqr; i++) { 1217 rbuf1_i = rbuf1[i]; 1218 sbuf_aa_i = sbuf_aa[i]; 1219 ct1 = 2*rbuf1_i[0]+1; 1220 ct2 = 0; 1221 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1222 kmax = rbuf1_i[2*j]; 1223 for (k=0; k<kmax; k++,ct1++) { 1224 row = rbuf1_i[ct1] - rstart; 1225 nzA = a_i[row+1] - a_i[row]; 1226 nzB = b_i[row+1] - b_i[row]; 1227 ncols = nzA + nzB; 1228 cworkB = b_j + b_i[row]; 1229 vworkA = a_a + a_i[row]*bs2; 1230 vworkB = b_a + b_i[row]*bs2; 1231 1232 /* load the column values for this row into vals*/ 1233 vals = sbuf_aa_i+ct2*bs2; 1234 for (l=0; l<nzB; l++) { 1235 if ((bmap[cworkB[l]]) < cstart) { 1236 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1237 } else break; 1238 } 1239 imark = l; 1240 for (l=0; l<nzA; l++) { 1241 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);//error in proc[1]??? 1242 } 1243 for (l=imark; l<nzB; l++) { 1244 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1245 } 1246 1247 ct2 += ncols; 1248 } 1249 } 1250 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr); 1251 } 1252 1253 if (!ismax) { 1254 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 1255 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1256 } 1257 1258 /* Assemble the matrices */ 1259 /* First assemble the local rows */ 1260 for (i=0; i<ismax; i++) { 1261 row2proc_i = row2proc[i]; 1262 subc = (Mat_SeqBAIJ*)submats[i]->data; 1263 imat_ilen = subc->ilen; 1264 imat_j = subc->j; 1265 imat_i = subc->i; 1266 imat_a = subc->a; 1267 1268 if (!allcolumns[i]) cmap_i = cmap[i]; 1269 rmap_i = rmap[i]; 1270 irow_i = irow[i]; 1271 jmax = nrow[i]; 1272 for (j=0; j<jmax; j++) { 1273 row = irow_i[j]; 1274 proc = row2proc_i[j]; 1275 1276 if (proc == rank) { 1277 1278 row = row - rstart; 1279 nzA = a_i[row+1] - a_i[row]; 1280 nzB = b_i[row+1] - b_i[row]; 1281 cworkA = a_j + a_i[row]; 1282 cworkB = b_j + b_i[row]; 1283 if (!ijonly) { 1284 vworkA = a_a + a_i[row]*bs2; 1285 vworkB = b_a + b_i[row]*bs2; 1286 } 1287 #if defined(PETSC_USE_CTABLE) 1288 ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr); 1289 row--; 1290 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1291 #else 1292 row = rmap_i[row + rstart]; 1293 #endif 1294 mat_i = imat_i[row]; 1295 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1296 mat_j = imat_j + mat_i; 1297 ilen_row = imat_ilen[row]; 1298 1299 /* load the column indices for this row into cols*/ 1300 if (!allcolumns[i]) { 1301 for (l=0; l<nzB; l++) { 1302 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1303 #if defined(PETSC_USE_CTABLE) 1304 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr); 1305 if (tcol) { 1306 #else 1307 if ((tcol = cmap_i[ctmp])) { 1308 #endif 1309 *mat_j++ = tcol - 1; 1310 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1311 mat_a += bs2; 1312 ilen_row++; 1313 } 1314 } else break; 1315 } 1316 imark = l; 1317 for (l=0; l<nzA; l++) { 1318 #if defined(PETSC_USE_CTABLE) 1319 ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1320 if (tcol) { 1321 #else 1322 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1323 #endif 1324 *mat_j++ = tcol - 1; 1325 if (!ijonly) { 1326 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1327 mat_a += bs2; 1328 } 1329 ilen_row++; 1330 } 1331 } 1332 for (l=imark; l<nzB; l++) { 1333 #if defined(PETSC_USE_CTABLE) 1334 ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1335 if (tcol) { 1336 #else 1337 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1338 #endif 1339 *mat_j++ = tcol - 1; 1340 if (!ijonly) { 1341 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1342 mat_a += bs2; 1343 } 1344 ilen_row++; 1345 } 1346 } 1347 } else { /* allcolumns */ 1348 for (l=0; l<nzB; l++) { 1349 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1350 *mat_j++ = ctmp; 1351 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1352 mat_a += bs2; 1353 ilen_row++; 1354 } else break; 1355 } 1356 imark = l; 1357 for (l=0; l<nzA; l++) { 1358 *mat_j++ = cstart+cworkA[l]; 1359 if (!ijonly) { 1360 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1361 mat_a += bs2; 1362 } 1363 ilen_row++; 1364 } 1365 for (l=imark; l<nzB; l++) { 1366 *mat_j++ = bmap[cworkB[l]]; 1367 if (!ijonly) { 1368 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1369 mat_a += bs2; 1370 } 1371 ilen_row++; 1372 } 1373 } 1374 imat_ilen[row] = ilen_row; 1375 } 1376 } 1377 } 1378 1379 /* Now assemble the off proc rows */ 1380 if (!ijonly) { 1381 ierr = MPI_Waitall(nrqs,r_waits4,r_status4);CHKERRQ(ierr); 1382 } 1383 for (tmp2=0; tmp2<nrqs; tmp2++) { 1384 sbuf1_i = sbuf1[pa[tmp2]]; 1385 jmax = sbuf1_i[0]; 1386 ct1 = 2*jmax + 1; 1387 ct2 = 0; 1388 rbuf2_i = rbuf2[tmp2]; 1389 rbuf3_i = rbuf3[tmp2]; 1390 if (!ijonly) rbuf4_i = rbuf4[tmp2]; 1391 for (j=1; j<=jmax; j++) { 1392 is_no = sbuf1_i[2*j-1]; 1393 rmap_i = rmap[is_no]; 1394 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1395 subc = (Mat_SeqBAIJ*)submats[is_no]->data; 1396 imat_ilen = subc->ilen; 1397 imat_j = subc->j; 1398 imat_i = subc->i; 1399 if (!ijonly) imat_a = subc->a; 1400 max1 = sbuf1_i[2*j]; 1401 for (k=0; k<max1; k++,ct1++) { /* for each recved block row */ 1402 row = sbuf1_i[ct1]; 1403 #if defined(PETSC_USE_CTABLE) 1404 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 1405 row--; 1406 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 1407 #else 1408 row = rmap_i[row]; 1409 #endif 1410 ilen = imat_ilen[row]; 1411 mat_i = imat_i[row]; 1412 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1413 mat_j = imat_j + mat_i; 1414 max2 = rbuf2_i[ct1]; 1415 if (!allcolumns[is_no]) { 1416 for (l=0; l<max2; l++,ct2++) { 1417 #if defined(PETSC_USE_CTABLE) 1418 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1419 #else 1420 tcol = cmap_i[rbuf3_i[ct2]]; 1421 #endif 1422 if (tcol) { 1423 *mat_j++ = tcol - 1; 1424 if (!ijonly) { 1425 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1426 mat_a += bs2; 1427 } 1428 //*mat_a++ = rbuf4_i[ct2]; 1429 ilen++; 1430 } 1431 } 1432 } else { /* allcolumns */ 1433 for (l=0; l<max2; l++,ct2++) { 1434 *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */ 1435 //*mat_a++ = rbuf4_i[ct2]; 1436 if (!ijonly) { 1437 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1438 mat_a += bs2; 1439 } 1440 ilen++; 1441 } 1442 } 1443 imat_ilen[row] = ilen; 1444 } 1445 } 1446 } 1447 1448 if (!iscsorted) { /* sort column indices of the rows -- not done yet!!! */ 1449 for (i=0; i<ismax; i++) { 1450 subc = (Mat_SeqBAIJ*)submats[i]->data; 1451 imat_j = subc->j; 1452 imat_i = subc->i; 1453 imat_a = subc->a; 1454 imat_ilen = subc->ilen; 1455 1456 if (allcolumns[i]) continue; 1457 jmax = nrow[i]; 1458 for (j=0; j<jmax; j++) { 1459 PetscInt ilen; 1460 1461 mat_i = imat_i[j]; 1462 if (!ijonly) mat_a = imat_a + mat_i; 1463 mat_j = imat_j + mat_i; 1464 ilen = imat_ilen[j]; 1465 ierr = PetscSortIntWithScalarArray(ilen,mat_j,mat_a);CHKERRQ(ierr); 1466 } 1467 } 1468 } 1469 1470 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1471 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1472 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 1473 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1474 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1475 1476 /* Restore the indices */ 1477 for (i=0; i<ismax; i++) { 1478 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1479 if (!allcolumns[i]) { 1480 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1481 } 1482 } 1483 1484 for (i=0; i<ismax; i++) { 1485 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1486 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1487 } 1488 1489 /* Destroy allocated memory */ 1490 if (!ismax) { 1491 ierr = PetscFree(pa);CHKERRQ(ierr); 1492 1493 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 1494 for (i=0; i<nrqr; ++i) { 1495 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1496 } 1497 for (i=0; i<nrqs; ++i) { 1498 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1499 } 1500 1501 ierr = PetscFree3(sbuf2,req_size,req_source1);CHKERRQ(ierr); 1502 ierr = PetscFree3(req_source2,rbuf2,rbuf3);CHKERRQ(ierr); 1503 } 1504 1505 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1506 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1507 ierr = PetscFree5(irow,icol,nrow,ncol,issorted);CHKERRQ(ierr); 1508 1509 for (i=0; i<nrqs; ++i) { 1510 ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr); 1511 } 1512 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1513 1514 ierr = PetscFree5(smats,row2proc,cmap,rmap,allcolumns);CHKERRQ(ierr); 1515 PetscFunctionReturn(0); 1516 } 1517 1518 //------------ endof new ------------- 1519 1520 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats) 1521 { 1522 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 1523 Mat A = c->A; 1524 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat; 1525 const PetscInt **irow,**icol,*irow_i; 1526 PetscInt *nrow,*ncol,*w3,*w4,start; 1527 PetscErrorCode ierr; 1528 PetscMPIInt size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc; 1529 PetscInt **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row; 1530 PetscInt nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol; 1531 PetscInt **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2; 1532 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 1533 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 1534 PetscInt bs =C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB; 1535 PetscInt cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark; 1536 PetscInt *bmap = c->garray,ctmp,rstart=c->rstartbs; 1537 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3; 1538 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3; 1539 MPI_Comm comm; 1540 PetscBool flag; 1541 PetscMPIInt *onodes1,*olengths1; 1542 PetscBool ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */ 1543 1544 /* variables below are used for the matrix numerical values - case of !ijonly */ 1545 MPI_Request *r_waits4,*s_waits4; 1546 MPI_Status *r_status4,*s_status4; 1547 MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a = NULL,*sbuf_aa_i,*vworkA = NULL,*vworkB = NULL; 1548 MatScalar *a_a=a->a,*b_a=b->a; 1549 1550 #if defined(PETSC_USE_CTABLE) 1551 PetscInt tt; 1552 PetscTable *rmap,*cmap,rmap_i,cmap_i=NULL; 1553 #else 1554 PetscInt **cmap,*cmap_i=NULL,*rtable,*rmap_i,**rmap, Mbs = c->Mbs; 1555 #endif 1556 1557 PetscFunctionBegin; 1558 ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr); 1559 tag0 = ((PetscObject)C)->tag; 1560 size = c->size; 1561 rank = c->rank; 1562 //if (!rank) printf("MatGetSubMatrices_MPIBAIJ scall %d, bs %d\n",scall,bs); 1563 1564 /* Get some new tags to keep the communication clean */ 1565 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 1566 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 1567 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 1568 1569 #if defined(PETSC_USE_CTABLE) 1570 ierr = PetscMalloc4(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol);CHKERRQ(ierr); 1571 #else 1572 ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,Mbs+1,&rtable);CHKERRQ(ierr); 1573 /* Create hash table for the mapping :row -> proc*/ 1574 for (i=0,j=0; i<size; i++) { 1575 jmax = C->rmap->range[i+1]/bs; 1576 for (; j<jmax; j++) rtable[j] = i; 1577 } 1578 #endif 1579 1580 for (i=0; i<ismax; i++) { 1581 if (allrows[i]) { 1582 irow[i] = NULL; 1583 nrow[i] = C->rmap->N/bs; 1584 } else { 1585 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 1586 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 1587 } 1588 1589 if (allcolumns[i]) { 1590 icol[i] = NULL; 1591 ncol[i] = C->cmap->N/bs; 1592 } else { 1593 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 1594 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 1595 } 1596 } 1597 1598 /* evaluate communication - mesg to who,length of mesg,and buffer space 1599 required. Based on this, buffers are allocated, and data copied into them*/ 1600 ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr); 1601 for (i=0; i<ismax; i++) { 1602 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 1603 jmax = nrow[i]; 1604 irow_i = irow[i]; 1605 for (j=0; j<jmax; j++) { 1606 if (allrows[i]) row = j; 1607 else row = irow_i[j]; 1608 1609 #if defined(PETSC_USE_CTABLE) 1610 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1611 #else 1612 proc = rtable[row]; 1613 #endif 1614 w4[proc]++; 1615 } 1616 for (j=0; j<size; j++) { 1617 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 1618 } 1619 } 1620 1621 nrqs = 0; /* no of outgoing messages */ 1622 msz = 0; /* total mesg length for all proc */ 1623 w1[rank] = 0; /* no mesg sent to intself */ 1624 w3[rank] = 0; 1625 for (i=0; i<size; i++) { 1626 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 1627 } 1628 ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/ 1629 for (i=0,j=0; i<size; i++) { 1630 if (w1[i]) { pa[j] = i; j++; } 1631 } 1632 1633 /* Each message would have a header = 1 + 2*(no of IS) + data */ 1634 for (i=0; i<nrqs; i++) { 1635 j = pa[i]; 1636 w1[j] += w2[j] + 2* w3[j]; 1637 msz += w1[j]; 1638 } 1639 1640 /* Determine the number of messages to expect, their lengths, from from-ids */ 1641 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 1642 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 1643 1644 /* Now post the Irecvs corresponding to these messages */ 1645 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 1646 1647 ierr = PetscFree(onodes1);CHKERRQ(ierr); 1648 ierr = PetscFree(olengths1);CHKERRQ(ierr); 1649 1650 /* Allocate Memory for outgoing messages */ 1651 ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr); 1652 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 1653 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 1654 { 1655 PetscInt *iptr = tmp,ict = 0; 1656 for (i=0; i<nrqs; i++) { 1657 j = pa[i]; 1658 iptr += ict; 1659 sbuf1[j] = iptr; 1660 ict = w1[j]; 1661 } 1662 } 1663 1664 /* Form the outgoing messages */ 1665 /* Initialise the header space */ 1666 for (i=0; i<nrqs; i++) { 1667 j = pa[i]; 1668 sbuf1[j][0] = 0; 1669 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 1670 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 1671 } 1672 1673 /* Parse the isrow and copy data into outbuf */ 1674 for (i=0; i<ismax; i++) { 1675 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 1676 irow_i = irow[i]; 1677 jmax = nrow[i]; 1678 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 1679 if (allrows[i]) row = j; 1680 else row = irow_i[j]; 1681 1682 #if defined(PETSC_USE_CTABLE) 1683 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1684 #else 1685 proc = rtable[row]; 1686 #endif 1687 if (proc != rank) { /* copy to the outgoing buf*/ 1688 ctr[proc]++; 1689 *ptr[proc] = row; 1690 ptr[proc]++; 1691 } 1692 } 1693 /* Update the headers for the current IS */ 1694 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 1695 if ((ctr_j = ctr[j])) { 1696 sbuf1_j = sbuf1[j]; 1697 k = ++sbuf1_j[0]; 1698 sbuf1_j[2*k] = ctr_j; 1699 sbuf1_j[2*k-1] = i; 1700 } 1701 } 1702 } 1703 1704 /* Now post the sends */ 1705 ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr); 1706 for (i=0; i<nrqs; ++i) { 1707 j = pa[i]; 1708 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 1709 } 1710 1711 /* Post Recieves to capture the buffer size */ 1712 ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr); 1713 ierr = PetscMalloc1(nrqs+1,&rbuf2);CHKERRQ(ierr); 1714 rbuf2[0] = tmp + msz; 1715 for (i=1; i<nrqs; ++i) { 1716 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 1717 } 1718 for (i=0; i<nrqs; ++i) { 1719 j = pa[i]; 1720 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 1721 } 1722 1723 /* Send to other procs the buf size they should allocate */ 1724 1725 /* Receive messages*/ 1726 ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr); 1727 ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr); 1728 ierr = PetscMalloc3(nrqr+1,&sbuf2,nrqr,&req_size,nrqr,&req_source);CHKERRQ(ierr); 1729 { 1730 Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data; 1731 PetscInt *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i; 1732 1733 for (i=0; i<nrqr; ++i) { 1734 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 1735 1736 req_size[idex] = 0; 1737 rbuf1_i = rbuf1[idex]; 1738 start = 2*rbuf1_i[0] + 1; 1739 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 1740 ierr = PetscMalloc1(end,&sbuf2[idex]);CHKERRQ(ierr); 1741 sbuf2_i = sbuf2[idex]; 1742 for (j=start; j<end; j++) { 1743 id = rbuf1_i[j] - rstart; 1744 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 1745 sbuf2_i[j] = ncols; 1746 req_size[idex] += ncols; 1747 } 1748 req_source[idex] = r_status1[i].MPI_SOURCE; 1749 /* form the header */ 1750 sbuf2_i[0] = req_size[idex]; 1751 for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j]; 1752 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 1753 } 1754 } 1755 ierr = PetscFree(r_status1);CHKERRQ(ierr); 1756 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 1757 1758 /* recv buffer sizes */ 1759 /* Receive messages*/ 1760 ierr = PetscMalloc1(nrqs+1,&rbuf3);CHKERRQ(ierr); 1761 ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr); 1762 ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr); 1763 if (!ijonly) { 1764 ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr); 1765 ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr); 1766 } 1767 1768 for (i=0; i<nrqs; ++i) { 1769 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 1770 ierr = PetscMalloc1(rbuf2[idex][0],&rbuf3[idex]);CHKERRQ(ierr); 1771 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 1772 if (!ijonly) { 1773 ierr = PetscMalloc1(rbuf2[idex][0]*bs2,&rbuf4[idex]);CHKERRQ(ierr); 1774 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 1775 } 1776 } 1777 ierr = PetscFree(r_status2);CHKERRQ(ierr); 1778 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 1779 1780 /* Wait on sends1 and sends2 */ 1781 ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr); 1782 ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr); 1783 1784 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 1785 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 1786 ierr = PetscFree(s_status1);CHKERRQ(ierr); 1787 ierr = PetscFree(s_status2);CHKERRQ(ierr); 1788 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 1789 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 1790 1791 /* Now allocate buffers for a->j, and send them off */ 1792 ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr); 1793 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1794 ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr); 1795 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 1796 1797 ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr); 1798 { 1799 for (i=0; i<nrqr; i++) { 1800 rbuf1_i = rbuf1[i]; 1801 sbuf_aj_i = sbuf_aj[i]; 1802 ct1 = 2*rbuf1_i[0] + 1; 1803 ct2 = 0; 1804 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1805 kmax = rbuf1[i][2*j]; 1806 for (k=0; k<kmax; k++,ct1++) { 1807 row = rbuf1_i[ct1] - rstart; 1808 nzA = a_i[row+1] - a_i[row]; 1809 nzB = b_i[row+1] - b_i[row]; 1810 ncols = nzA + nzB; 1811 cworkA = a_j + a_i[row]; 1812 cworkB = b_j + b_i[row]; 1813 1814 /* load the column indices for this row into cols*/ 1815 cols = sbuf_aj_i + ct2; 1816 for (l=0; l<nzB; l++) { 1817 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 1818 else break; 1819 } 1820 imark = l; 1821 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 1822 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 1823 ct2 += ncols; 1824 } 1825 } 1826 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 1827 } 1828 } 1829 ierr = PetscMalloc1(nrqs+1,&r_status3);CHKERRQ(ierr); 1830 ierr = PetscMalloc1(nrqr+1,&s_status3);CHKERRQ(ierr); 1831 1832 /* Allocate buffers for a->a, and send them off */ 1833 if (!ijonly) { 1834 ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr); 1835 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 1836 ierr = PetscMalloc1((j+1)*bs2,&sbuf_aa[0]);CHKERRQ(ierr); 1837 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 1838 1839 ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr); 1840 { 1841 for (i=0; i<nrqr; i++) { 1842 rbuf1_i = rbuf1[i]; 1843 sbuf_aa_i = sbuf_aa[i]; 1844 ct1 = 2*rbuf1_i[0]+1; 1845 ct2 = 0; 1846 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 1847 kmax = rbuf1_i[2*j]; 1848 for (k=0; k<kmax; k++,ct1++) { 1849 row = rbuf1_i[ct1] - rstart; 1850 nzA = a_i[row+1] - a_i[row]; 1851 nzB = b_i[row+1] - b_i[row]; 1852 ncols = nzA + nzB; 1853 cworkB = b_j + b_i[row]; 1854 vworkA = a_a + a_i[row]*bs2; 1855 vworkB = b_a + b_i[row]*bs2; 1856 1857 /* load the column values for this row into vals*/ 1858 vals = sbuf_aa_i+ct2*bs2; 1859 for (l=0; l<nzB; l++) { 1860 if ((bmap[cworkB[l]]) < cstart) { 1861 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1862 } else break; 1863 } 1864 imark = l; 1865 for (l=0; l<nzA; l++) { 1866 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1867 } 1868 for (l=imark; l<nzB; l++) { 1869 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1870 } 1871 ct2 += ncols; 1872 } 1873 } 1874 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 1875 } 1876 } 1877 ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr); 1878 ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr); 1879 } 1880 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 1881 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 1882 1883 /* Form the matrix */ 1884 /* create col map: global col of C -> local col of submatrices */ 1885 { 1886 const PetscInt *icol_i; 1887 #if defined(PETSC_USE_CTABLE) 1888 ierr = PetscMalloc1(1+ismax,&cmap);CHKERRQ(ierr); 1889 for (i=0; i<ismax; i++) { 1890 if (!allcolumns[i]) { 1891 ierr = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr); 1892 jmax = ncol[i]; 1893 icol_i = icol[i]; 1894 cmap_i = cmap[i]; 1895 for (j=0; j<jmax; j++) { 1896 ierr = PetscTableAdd(cmap_i,icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1897 } 1898 } else { 1899 cmap[i] = NULL; 1900 } 1901 } 1902 #else 1903 ierr = PetscMalloc1(ismax,&cmap);CHKERRQ(ierr); 1904 for (i=0; i<ismax; i++) { 1905 if (!allcolumns[i]) { 1906 ierr = PetscCalloc1(c->Nbs,&cmap[i]);CHKERRQ(ierr); 1907 jmax = ncol[i]; 1908 icol_i = icol[i]; 1909 cmap_i = cmap[i]; 1910 for (j=0; j<jmax; j++) { 1911 cmap_i[icol_i[j]] = j+1; 1912 } 1913 } else { /* allcolumns[i] */ 1914 cmap[i] = NULL; 1915 } 1916 } 1917 #endif 1918 } 1919 1920 /* Create lens which is required for MatCreate... */ 1921 for (i=0,j=0; i<ismax; i++) j += nrow[i]; 1922 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);CHKERRQ(ierr); 1923 lens[0] = (PetscInt*)(lens + ismax); 1924 ierr = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr); 1925 for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1]; 1926 1927 /* Update lens from local data */ 1928 for (i=0; i<ismax; i++) { 1929 jmax = nrow[i]; 1930 if (!allcolumns[i]) cmap_i = cmap[i]; 1931 irow_i = irow[i]; 1932 lens_i = lens[i]; 1933 for (j=0; j<jmax; j++) { 1934 if (allrows[i]) row = j; 1935 else row = irow_i[j]; 1936 1937 #if defined(PETSC_USE_CTABLE) 1938 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1939 #else 1940 proc = rtable[row]; 1941 #endif 1942 if (proc == rank) { 1943 /* Get indices from matA and then from matB */ 1944 row = row - rstart; 1945 nzA = a_i[row+1] - a_i[row]; 1946 nzB = b_i[row+1] - b_i[row]; 1947 cworkA = a_j + a_i[row]; 1948 cworkB = b_j + b_i[row]; 1949 if (!allcolumns[i]) { 1950 #if defined(PETSC_USE_CTABLE) 1951 for (k=0; k<nzA; k++) { 1952 ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1953 if (tt) lens_i[j]++; 1954 } 1955 for (k=0; k<nzB; k++) { 1956 ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1957 if (tt) lens_i[j]++; 1958 } 1959 1960 #else 1961 for (k=0; k<nzA; k++) { 1962 if (cmap_i[cstart + cworkA[k]]) lens_i[j]++; 1963 } 1964 for (k=0; k<nzB; k++) { 1965 if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++; 1966 } 1967 #endif 1968 } else { /* allcolumns */ 1969 lens_i[j] = nzA + nzB; 1970 } 1971 } 1972 } 1973 } 1974 #if defined(PETSC_USE_CTABLE) 1975 /* Create row map*/ 1976 ierr = PetscMalloc1(1+ismax,&rmap);CHKERRQ(ierr); 1977 for (i=0; i<ismax; i++) { 1978 ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr); 1979 } 1980 #else 1981 /* Create row map*/ 1982 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);CHKERRQ(ierr); 1983 rmap[0] = (PetscInt*)(rmap + ismax); 1984 ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));CHKERRQ(ierr); 1985 for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + Mbs; 1986 #endif 1987 for (i=0; i<ismax; i++) { 1988 irow_i = irow[i]; 1989 jmax = nrow[i]; 1990 #if defined(PETSC_USE_CTABLE) 1991 rmap_i = rmap[i]; 1992 for (j=0; j<jmax; j++) { 1993 if (allrows[i]) { 1994 ierr = PetscTableAdd(rmap_i,j+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1995 } else { 1996 ierr = PetscTableAdd(rmap_i,irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1997 } 1998 } 1999 #else 2000 rmap_i = rmap[i]; 2001 for (j=0; j<jmax; j++) { 2002 if (allrows[i]) rmap_i[j] = j; 2003 else rmap_i[irow_i[j]] = j; 2004 } 2005 #endif 2006 } 2007 2008 /* Update lens from offproc data */ 2009 { 2010 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 2011 PetscMPIInt ii; 2012 2013 for (tmp2=0; tmp2<nrqs; tmp2++) { 2014 ierr = MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);CHKERRQ(ierr); 2015 idex = pa[ii]; 2016 sbuf1_i = sbuf1[idex]; 2017 jmax = sbuf1_i[0]; 2018 ct1 = 2*jmax+1; 2019 ct2 = 0; 2020 rbuf2_i = rbuf2[ii]; 2021 rbuf3_i = rbuf3[ii]; 2022 for (j=1; j<=jmax; j++) { 2023 is_no = sbuf1_i[2*j-1]; 2024 max1 = sbuf1_i[2*j]; 2025 lens_i = lens[is_no]; 2026 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2027 rmap_i = rmap[is_no]; 2028 for (k=0; k<max1; k++,ct1++) { 2029 #if defined(PETSC_USE_CTABLE) 2030 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 2031 row--; 2032 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 2033 #else 2034 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 2035 #endif 2036 max2 = rbuf2_i[ct1]; 2037 for (l=0; l<max2; l++,ct2++) { 2038 if (!allcolumns[is_no]) { 2039 #if defined(PETSC_USE_CTABLE) 2040 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr); 2041 if (tt) lens_i[row]++; 2042 #else 2043 if (cmap_i[rbuf3_i[ct2]]) lens_i[row]++; 2044 #endif 2045 } else { /* allcolumns */ 2046 lens_i[row]++; 2047 } 2048 } 2049 } 2050 } 2051 } 2052 } 2053 ierr = PetscFree(r_status3);CHKERRQ(ierr); 2054 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 2055 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 2056 ierr = PetscFree(s_status3);CHKERRQ(ierr); 2057 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 2058 2059 /* Create the submatrices */ 2060 if (scall == MAT_REUSE_MATRIX) { 2061 if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet"); 2062 /* 2063 Assumes new rows are same length as the old rows, hence bug! 2064 */ 2065 for (i=0; i<ismax; i++) { 2066 mat = (Mat_SeqBAIJ*)(submats[i]->data); 2067 if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 2068 ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);CHKERRQ(ierr); 2069 if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros"); 2070 /* Initial matrix as if empty */ 2071 ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));CHKERRQ(ierr); 2072 2073 submats[i]->factortype = C->factortype; 2074 } 2075 } else { 2076 PetscInt bs_tmp; 2077 if (ijonly) bs_tmp = 1; 2078 else bs_tmp = bs; 2079 for (i=0; i<ismax; i++) { 2080 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 2081 ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);CHKERRQ(ierr); 2082 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 2083 ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); 2084 ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */ 2085 } 2086 } 2087 2088 /* Assemble the matrices */ 2089 /* First assemble the local rows */ 2090 { 2091 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i; 2092 MatScalar *imat_a = NULL; 2093 2094 for (i=0; i<ismax; i++) { 2095 mat = (Mat_SeqBAIJ*)submats[i]->data; 2096 imat_ilen = mat->ilen; 2097 imat_j = mat->j; 2098 imat_i = mat->i; 2099 if (!ijonly) imat_a = mat->a; 2100 if (!allcolumns[i]) cmap_i = cmap[i]; 2101 rmap_i = rmap[i]; 2102 irow_i = irow[i]; 2103 jmax = nrow[i]; 2104 for (j=0; j<jmax; j++) { 2105 if (allrows[i]) row = j; 2106 else row = irow_i[j]; 2107 2108 #if defined(PETSC_USE_CTABLE) 2109 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 2110 #else 2111 proc = rtable[row]; 2112 #endif 2113 if (proc == rank) { 2114 row = row - rstart; 2115 nzA = a_i[row+1] - a_i[row]; 2116 nzB = b_i[row+1] - b_i[row]; 2117 cworkA = a_j + a_i[row]; 2118 cworkB = b_j + b_i[row]; 2119 if (!ijonly) { 2120 vworkA = a_a + a_i[row]*bs2; 2121 vworkB = b_a + b_i[row]*bs2; 2122 } 2123 #if defined(PETSC_USE_CTABLE) 2124 ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr); 2125 row--; 2126 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 2127 #else 2128 row = rmap_i[row + rstart]; 2129 #endif 2130 mat_i = imat_i[row]; 2131 if (!ijonly) mat_a = imat_a + mat_i*bs2; 2132 mat_j = imat_j + mat_i; 2133 ilen_row = imat_ilen[row]; 2134 2135 /* load the column indices for this row into cols*/ 2136 if (!allcolumns[i]) { 2137 for (l=0; l<nzB; l++) { 2138 if ((ctmp = bmap[cworkB[l]]) < cstart) { 2139 #if defined(PETSC_USE_CTABLE) 2140 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr); 2141 if (tcol) { 2142 #else 2143 if ((tcol = cmap_i[ctmp])) { 2144 #endif 2145 *mat_j++ = tcol - 1; 2146 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2147 mat_a += bs2; 2148 ilen_row++; 2149 } 2150 } else break; 2151 } 2152 imark = l; 2153 for (l=0; l<nzA; l++) { 2154 #if defined(PETSC_USE_CTABLE) 2155 ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 2156 if (tcol) { 2157 #else 2158 if ((tcol = cmap_i[cstart + cworkA[l]])) { 2159 #endif 2160 *mat_j++ = tcol - 1; 2161 if (!ijonly) { 2162 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2163 mat_a += bs2; 2164 } 2165 ilen_row++; 2166 } 2167 } 2168 for (l=imark; l<nzB; l++) { 2169 #if defined(PETSC_USE_CTABLE) 2170 ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 2171 if (tcol) { 2172 #else 2173 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 2174 #endif 2175 *mat_j++ = tcol - 1; 2176 if (!ijonly) { 2177 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2178 mat_a += bs2; 2179 } 2180 ilen_row++; 2181 } 2182 } 2183 } else { /* allcolumns */ 2184 for (l=0; l<nzB; l++) { 2185 if ((ctmp = bmap[cworkB[l]]) < cstart) { 2186 *mat_j++ = ctmp; 2187 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2188 mat_a += bs2; 2189 ilen_row++; 2190 } else break; 2191 } 2192 imark = l; 2193 for (l=0; l<nzA; l++) { 2194 *mat_j++ = cstart+cworkA[l]; 2195 if (!ijonly) { 2196 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2197 mat_a += bs2; 2198 } 2199 ilen_row++; 2200 } 2201 for (l=imark; l<nzB; l++) { 2202 *mat_j++ = bmap[cworkB[l]]; 2203 if (!ijonly) { 2204 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2205 mat_a += bs2; 2206 } 2207 ilen_row++; 2208 } 2209 } 2210 imat_ilen[row] = ilen_row; 2211 } 2212 } 2213 } 2214 } 2215 2216 /* Now assemble the off proc rows*/ 2217 { 2218 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 2219 PetscInt *imat_j,*imat_i; 2220 MatScalar *imat_a = NULL,*rbuf4_i = NULL; 2221 PetscMPIInt ii; 2222 2223 for (tmp2=0; tmp2<nrqs; tmp2++) { 2224 if (ijonly) ii = tmp2; 2225 else { 2226 ierr = MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);CHKERRQ(ierr); 2227 } 2228 idex = pa[ii]; 2229 sbuf1_i = sbuf1[idex]; 2230 jmax = sbuf1_i[0]; 2231 ct1 = 2*jmax + 1; 2232 ct2 = 0; 2233 rbuf2_i = rbuf2[ii]; 2234 rbuf3_i = rbuf3[ii]; 2235 if (!ijonly) rbuf4_i = rbuf4[ii]; 2236 for (j=1; j<=jmax; j++) { 2237 is_no = sbuf1_i[2*j-1]; 2238 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 2239 rmap_i = rmap[is_no]; 2240 mat = (Mat_SeqBAIJ*)submats[is_no]->data; 2241 imat_ilen = mat->ilen; 2242 imat_j = mat->j; 2243 imat_i = mat->i; 2244 if (!ijonly) imat_a = mat->a; 2245 max1 = sbuf1_i[2*j]; 2246 for (k=0; k<max1; k++,ct1++) { 2247 row = sbuf1_i[ct1]; 2248 #if defined(PETSC_USE_CTABLE) 2249 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 2250 row--; 2251 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); 2252 #else 2253 row = rmap_i[row]; 2254 #endif 2255 ilen = imat_ilen[row]; 2256 mat_i = imat_i[row]; 2257 if (!ijonly) mat_a = imat_a + mat_i*bs2; 2258 mat_j = imat_j + mat_i; 2259 max2 = rbuf2_i[ct1]; 2260 2261 if (!allcolumns[is_no]) { 2262 for (l=0; l<max2; l++,ct2++) { 2263 #if defined(PETSC_USE_CTABLE) 2264 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 2265 if (tcol) { 2266 #else 2267 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 2268 #endif 2269 *mat_j++ = tcol - 1; 2270 if (!ijonly) { 2271 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2272 mat_a += bs2; 2273 } 2274 ilen++; 2275 } 2276 } 2277 } else { /* allcolumns */ 2278 for (l=0; l<max2; l++,ct2++) { 2279 *mat_j++ = rbuf3_i[ct2]; 2280 if (!ijonly) { 2281 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 2282 mat_a += bs2; 2283 } 2284 ilen++; 2285 } 2286 } 2287 imat_ilen[row] = ilen; 2288 } 2289 } 2290 } 2291 } 2292 /* sort the rows */ 2293 { 2294 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i; 2295 MatScalar *imat_a = NULL; 2296 MatScalar *work; 2297 2298 ierr = PetscMalloc1(bs2,&work);CHKERRQ(ierr); 2299 for (i=0; i<ismax; i++) { 2300 mat = (Mat_SeqBAIJ*)submats[i]->data; 2301 imat_ilen = mat->ilen; 2302 imat_j = mat->j; 2303 imat_i = mat->i; 2304 if (!ijonly) imat_a = mat->a; 2305 if (allcolumns[i]) continue; 2306 jmax = nrow[i]; 2307 for (j=0; j<jmax; j++) { 2308 mat_i = imat_i[j]; 2309 if (!ijonly) mat_a = imat_a + mat_i*bs2; 2310 mat_j = imat_j + mat_i; 2311 ilen_row = imat_ilen[j]; 2312 if (!ijonly) {ierr = PetscSortIntWithDataArray(ilen_row,mat_j,mat_a,bs2*sizeof(MatScalar),work);CHKERRQ(ierr);} 2313 else {ierr = PetscSortInt(ilen_row,mat_j);CHKERRQ(ierr);} 2314 } 2315 } 2316 ierr = PetscFree(work);CHKERRQ(ierr); 2317 } 2318 if (!ijonly) { 2319 ierr = PetscFree(r_status4);CHKERRQ(ierr); 2320 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 2321 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 2322 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 2323 ierr = PetscFree(s_status4);CHKERRQ(ierr); 2324 } 2325 2326 /* Restore the indices */ 2327 for (i=0; i<ismax; i++) { 2328 if (!allrows[i]) { 2329 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 2330 } 2331 if (!allcolumns[i]) { 2332 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 2333 } 2334 } 2335 2336 /* Destroy allocated memory */ 2337 #if defined(PETSC_USE_CTABLE) 2338 ierr = PetscFree4(irow,icol,nrow,ncol);CHKERRQ(ierr); 2339 #else 2340 ierr = PetscFree5(irow,icol,nrow,ncol,rtable);CHKERRQ(ierr); 2341 #endif 2342 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 2343 ierr = PetscFree(pa);CHKERRQ(ierr); 2344 2345 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 2346 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 2347 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 2348 for (i=0; i<nrqr; ++i) { 2349 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 2350 } 2351 for (i=0; i<nrqs; ++i) { 2352 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 2353 } 2354 ierr = PetscFree3(sbuf2,req_size,req_source);CHKERRQ(ierr); 2355 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 2356 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 2357 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 2358 if (!ijonly) { 2359 for (i=0; i<nrqs; ++i) {ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);} 2360 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 2361 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 2362 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 2363 } 2364 2365 #if defined(PETSC_USE_CTABLE) 2366 for (i=0; i<ismax; i++) { 2367 ierr = PetscTableDestroy((PetscTable*)&rmap[i]);CHKERRQ(ierr); 2368 } 2369 #endif 2370 ierr = PetscFree(rmap);CHKERRQ(ierr); 2371 2372 for (i=0; i<ismax; i++) { 2373 if (!allcolumns[i]) { 2374 #if defined(PETSC_USE_CTABLE) 2375 ierr = PetscTableDestroy((PetscTable*)&cmap[i]);CHKERRQ(ierr); 2376 #else 2377 ierr = PetscFree(cmap[i]);CHKERRQ(ierr); 2378 #endif 2379 } 2380 } 2381 ierr = PetscFree(cmap);CHKERRQ(ierr); 2382 ierr = PetscFree(lens);CHKERRQ(ierr); 2383 2384 for (i=0; i<ismax; i++) { 2385 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2386 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 2387 } 2388 2389 c->ijonly = PETSC_FALSE; /* set back to the default */ 2390 PetscFunctionReturn(0); 2391 } 2392 2393