1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix 4 and to find submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/baij/mpi/mpibaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char **,PetscInt*,PetscInt**); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt **,PetscInt**,PetscInt*); 11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ" 16 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 17 { 18 PetscErrorCode ierr; 19 PetscInt i,N=C->cmap->N, bs=C->rmap->bs; 20 IS *is_new; 21 22 PetscFunctionBegin; 23 ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr); 24 /* Convert the indices into block format */ 25 ierr = ISCompressIndicesGeneral(N,bs,imax,is,is_new);CHKERRQ(ierr); 26 if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 27 for (i=0; i<ov; ++i) { 28 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 29 } 30 for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);} 31 ierr = ISExpandIndicesGeneral(N,bs,imax,is_new,is);CHKERRQ(ierr); 32 for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);} 33 ierr = PetscFree(is_new);CHKERRQ(ierr); 34 PetscFunctionReturn(0); 35 } 36 37 /* 38 Sample message format: 39 If a processor A wants processor B to process some elements corresponding 40 to index sets is[1], is[5] 41 mesg [0] = 2 (no of index sets in the mesg) 42 ----------- 43 mesg [1] = 1 => is[1] 44 mesg [2] = sizeof(is[1]); 45 ----------- 46 mesg [5] = 5 => is[5] 47 mesg [6] = sizeof(is[5]); 48 ----------- 49 mesg [7] 50 mesg [n] data(is[1]) 51 ----------- 52 mesg[n+1] 53 mesg[m] data(is[5]) 54 ----------- 55 56 Notes: 57 nrqs - no of requests sent (or to be sent out) 58 nrqr - no of requests recieved (which have to be or which have been processed 59 */ 60 #undef __FUNCT__ 61 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Once" 62 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[]) 63 { 64 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 65 const PetscInt **idx,*idx_i; 66 PetscInt *n,*w3,*w4,**data,len; 67 PetscErrorCode ierr; 68 PetscMPIInt size,rank,tag1,tag2,*w2,*w1,nrqr; 69 PetscInt Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr; 70 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p; 71 PetscMPIInt *onodes1,*olengths1,*onodes2,*olengths2; 72 PetscBT *table; 73 MPI_Comm comm; 74 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 75 MPI_Status *s_status,*recv_status; 76 char *t_p; 77 78 PetscFunctionBegin; 79 comm = ((PetscObject)C)->comm; 80 size = c->size; 81 rank = c->rank; 82 Mbs = c->Mbs; 83 84 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 85 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 86 87 ierr = PetscMalloc2(imax+1,const PetscInt*,&idx,imax,PetscInt,&n);CHKERRQ(ierr); 88 89 for (i=0; i<imax; i++) { 90 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 91 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 92 } 93 94 /* evaluate communication - mesg to who,length of mesg, and buffer space 95 required. Based on this, buffers are allocated, and data copied into them*/ 96 ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr); 97 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 98 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 99 ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr); 100 for (i=0; i<imax; i++) { 101 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 102 idx_i = idx[i]; 103 len = n[i]; 104 for (j=0; j<len; j++) { 105 row = idx_i[j]; 106 if (row < 0) { 107 SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 108 } 109 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 110 w4[proc]++; 111 } 112 for (j=0; j<size; j++){ 113 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 114 } 115 } 116 117 nrqs = 0; /* no of outgoing messages */ 118 msz = 0; /* total mesg length (for all proc */ 119 w1[rank] = 0; /* no mesg sent to itself */ 120 w3[rank] = 0; 121 for (i=0; i<size; i++) { 122 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 123 } 124 /* pa - is list of processors to communicate with */ 125 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); 126 for (i=0,j=0; i<size; i++) { 127 if (w1[i]) {pa[j] = i; j++;} 128 } 129 130 /* Each message would have a header = 1 + 2*(no of IS) + data */ 131 for (i=0; i<nrqs; i++) { 132 j = pa[i]; 133 w1[j] += w2[j] + 2*w3[j]; 134 msz += w1[j]; 135 } 136 137 /* Determine the number of messages to expect, their lengths, from from-ids */ 138 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 139 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 140 141 /* Now post the Irecvs corresponding to these messages */ 142 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 143 144 /* Allocate Memory for outgoing messages */ 145 ierr = PetscMalloc4(size,PetscInt*,&outdat,size,PetscInt*,&ptr,msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr); 146 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 147 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 148 { 149 PetscInt *iptr = tmp,ict = 0; 150 for (i=0; i<nrqs; i++) { 151 j = pa[i]; 152 iptr += ict; 153 outdat[j] = iptr; 154 ict = w1[j]; 155 } 156 } 157 158 /* Form the outgoing messages */ 159 /*plug in the headers*/ 160 for (i=0; i<nrqs; i++) { 161 j = pa[i]; 162 outdat[j][0] = 0; 163 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 164 ptr[j] = outdat[j] + 2*w3[j] + 1; 165 } 166 167 /* Memory for doing local proc's work*/ 168 { 169 ierr = PetscMalloc5(imax,PetscBT,&table, imax,PetscInt*,&data, imax,PetscInt,&isz, 170 Mbs*imax,PetscInt,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,char,&t_p);CHKERRQ(ierr); 171 ierr = PetscMemzero(table,imax*sizeof(PetscBT));CHKERRQ(ierr); 172 ierr = PetscMemzero(data,imax*sizeof(PetscInt*));CHKERRQ(ierr); 173 ierr = PetscMemzero(isz,imax*sizeof(PetscInt));CHKERRQ(ierr); 174 ierr = PetscMemzero(d_p,Mbs*imax*sizeof(PetscInt));CHKERRQ(ierr); 175 ierr = PetscMemzero(t_p,(Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char));CHKERRQ(ierr); 176 177 for (i=0; i<imax; i++) { 178 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 179 data[i] = d_p + (Mbs)*i; 180 } 181 } 182 183 /* Parse the IS and update local tables and the outgoing buf with the data*/ 184 { 185 PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j; 186 PetscBT table_i; 187 188 for (i=0; i<imax; i++) { 189 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 190 n_i = n[i]; 191 table_i = table[i]; 192 idx_i = idx[i]; 193 data_i = data[i]; 194 isz_i = isz[i]; 195 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 196 row = idx_i[j]; 197 ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr); 198 if (proc != rank) { /* copy to the outgoing buffer */ 199 ctr[proc]++; 200 *ptr[proc] = row; 201 ptr[proc]++; 202 } else { /* Update the local table */ 203 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 204 } 205 } 206 /* Update the headers for the current IS */ 207 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 208 if ((ctr_j = ctr[j])) { 209 outdat_j = outdat[j]; 210 k = ++outdat_j[0]; 211 outdat_j[2*k] = ctr_j; 212 outdat_j[2*k-1] = i; 213 } 214 } 215 isz[i] = isz_i; 216 } 217 } 218 219 /* Now post the sends */ 220 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 221 for (i=0; i<nrqs; ++i) { 222 j = pa[i]; 223 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 224 } 225 226 /* No longer need the original indices*/ 227 for (i=0; i<imax; ++i) { 228 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 229 } 230 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 231 232 for (i=0; i<imax; ++i) { 233 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 234 } 235 236 /* Do Local work*/ 237 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 238 239 /* Receive messages*/ 240 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 241 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 242 243 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 244 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 245 246 /* Phase 1 sends are complete - deallocate buffers */ 247 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 248 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 249 250 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);CHKERRQ(ierr); 251 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);CHKERRQ(ierr); 252 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 253 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 254 ierr = PetscFree(rbuf);CHKERRQ(ierr); 255 256 /* Send the data back*/ 257 /* Do a global reduction to know the buffer space req for incoming messages*/ 258 { 259 PetscMPIInt *rw1; 260 261 ierr = PetscMalloc(size*sizeof(PetscInt),&rw1);CHKERRQ(ierr); 262 ierr = PetscMemzero(rw1,size*sizeof(PetscInt));CHKERRQ(ierr); 263 264 for (i=0; i<nrqr; ++i) { 265 proc = recv_status[i].MPI_SOURCE; 266 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 267 rw1[proc] = isz1[i]; 268 } 269 270 ierr = PetscFree(onodes1);CHKERRQ(ierr); 271 ierr = PetscFree(olengths1);CHKERRQ(ierr); 272 273 /* Determine the number of messages to expect, their lengths, from from-ids */ 274 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 275 ierr = PetscFree(rw1);CHKERRQ(ierr); 276 } 277 /* Now post the Irecvs corresponding to these messages */ 278 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 279 280 /* Now post the sends */ 281 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 282 for (i=0; i<nrqr; ++i) { 283 j = recv_status[i].MPI_SOURCE; 284 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 285 } 286 287 /* receive work done on other processors*/ 288 { 289 PetscMPIInt idex; 290 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 291 PetscBT table_i; 292 MPI_Status *status2; 293 294 ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 295 for (i=0; i<nrqs; ++i) { 296 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 297 /* Process the message*/ 298 rbuf2_i = rbuf2[idex]; 299 ct1 = 2*rbuf2_i[0]+1; 300 jmax = rbuf2[idex][0]; 301 for (j=1; j<=jmax; j++) { 302 max = rbuf2_i[2*j]; 303 is_no = rbuf2_i[2*j-1]; 304 isz_i = isz[is_no]; 305 data_i = data[is_no]; 306 table_i = table[is_no]; 307 for (k=0; k<max; k++,ct1++) { 308 row = rbuf2_i[ct1]; 309 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 310 } 311 isz[is_no] = isz_i; 312 } 313 } 314 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 315 ierr = PetscFree(status2);CHKERRQ(ierr); 316 } 317 318 for (i=0; i<imax; ++i) { 319 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 320 } 321 322 323 ierr = PetscFree(onodes2);CHKERRQ(ierr); 324 ierr = PetscFree(olengths2);CHKERRQ(ierr); 325 326 ierr = PetscFree(pa);CHKERRQ(ierr); 327 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 328 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 329 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 330 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 331 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 332 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 333 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 334 ierr = PetscFree(s_status);CHKERRQ(ierr); 335 ierr = PetscFree(recv_status);CHKERRQ(ierr); 336 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 337 ierr = PetscFree(xdata);CHKERRQ(ierr); 338 ierr = PetscFree(isz1);CHKERRQ(ierr); 339 PetscFunctionReturn(0); 340 } 341 342 #undef __FUNCT__ 343 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Local" 344 /* 345 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 346 the work on the local processor. 347 348 Inputs: 349 C - MAT_MPIBAIJ; 350 imax - total no of index sets processed at a time; 351 table - an array of char - size = Mbs bits. 352 353 Output: 354 isz - array containing the count of the solution elements corresponding 355 to each index set; 356 data - pointer to the solutions 357 */ 358 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data) 359 { 360 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 361 Mat A = c->A,B = c->B; 362 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 363 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 364 PetscInt *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 365 PetscBT table_i; 366 367 PetscFunctionBegin; 368 rstart = c->rstartbs; 369 cstart = c->cstartbs; 370 ai = a->i; 371 aj = a->j; 372 bi = b->i; 373 bj = b->j; 374 garray = c->garray; 375 376 377 for (i=0; i<imax; i++) { 378 data_i = data[i]; 379 table_i = table[i]; 380 isz_i = isz[i]; 381 for (j=0,max=isz[i]; j<max; j++) { 382 row = data_i[j] - rstart; 383 start = ai[row]; 384 end = ai[row+1]; 385 for (k=start; k<end; k++) { /* Amat */ 386 val = aj[k] + cstart; 387 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 388 } 389 start = bi[row]; 390 end = bi[row+1]; 391 for (k=start; k<end; k++) { /* Bmat */ 392 val = garray[bj[k]]; 393 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 394 } 395 } 396 isz[i] = isz_i; 397 } 398 PetscFunctionReturn(0); 399 } 400 #undef __FUNCT__ 401 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Receive" 402 /* 403 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 404 and return the output 405 406 Input: 407 C - the matrix 408 nrqr - no of messages being processed. 409 rbuf - an array of pointers to the recieved requests 410 411 Output: 412 xdata - array of messages to be sent back 413 isz1 - size of each message 414 415 For better efficiency perhaps we should malloc separately each xdata[i], 416 then if a remalloc is required we need only copy the data for that one row 417 rather than all previous rows as it is now where a single large chunck of 418 memory is used. 419 420 */ 421 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 422 { 423 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 424 Mat A = c->A,B = c->B; 425 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 426 PetscErrorCode ierr; 427 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 428 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 429 PetscInt val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 430 PetscInt *rbuf_i,kmax,rbuf_0; 431 PetscBT xtable; 432 433 PetscFunctionBegin; 434 Mbs = c->Mbs; 435 rstart = c->rstartbs; 436 cstart = c->cstartbs; 437 ai = a->i; 438 aj = a->j; 439 bi = b->i; 440 bj = b->j; 441 garray = c->garray; 442 443 444 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 445 rbuf_i = rbuf[i]; 446 rbuf_0 = rbuf_i[0]; 447 ct += rbuf_0; 448 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 449 } 450 451 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 452 else max1 = 1; 453 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 454 ierr = PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);CHKERRQ(ierr); 455 ++no_malloc; 456 ierr = PetscBTCreate(Mbs,xtable);CHKERRQ(ierr); 457 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 458 459 ct3 = 0; 460 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 461 rbuf_i = rbuf[i]; 462 rbuf_0 = rbuf_i[0]; 463 ct1 = 2*rbuf_0+1; 464 ct2 = ct1; 465 ct3 += ct1; 466 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 467 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 468 oct2 = ct2; 469 kmax = rbuf_i[2*j]; 470 for (k=0; k<kmax; k++,ct1++) { 471 row = rbuf_i[ct1]; 472 if (!PetscBTLookupSet(xtable,row)) { 473 if (!(ct3 < mem_estimate)) { 474 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 475 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 476 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 477 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 478 xdata[0] = tmp; 479 mem_estimate = new_estimate; ++no_malloc; 480 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 481 } 482 xdata[i][ct2++] = row; 483 ct3++; 484 } 485 } 486 for (k=oct2,max2=ct2; k<max2; k++) { 487 row = xdata[i][k] - rstart; 488 start = ai[row]; 489 end = ai[row+1]; 490 for (l=start; l<end; l++) { 491 val = aj[l] + cstart; 492 if (!PetscBTLookupSet(xtable,val)) { 493 if (!(ct3 < mem_estimate)) { 494 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 495 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 496 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 497 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 498 xdata[0] = tmp; 499 mem_estimate = new_estimate; ++no_malloc; 500 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 501 } 502 xdata[i][ct2++] = val; 503 ct3++; 504 } 505 } 506 start = bi[row]; 507 end = bi[row+1]; 508 for (l=start; l<end; l++) { 509 val = garray[bj[l]]; 510 if (!PetscBTLookupSet(xtable,val)) { 511 if (!(ct3 < mem_estimate)) { 512 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 513 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 514 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 515 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 516 xdata[0] = tmp; 517 mem_estimate = new_estimate; ++no_malloc; 518 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 519 } 520 xdata[i][ct2++] = val; 521 ct3++; 522 } 523 } 524 } 525 /* Update the header*/ 526 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 527 xdata[i][2*j-1] = rbuf_i[2*j-1]; 528 } 529 xdata[i][0] = rbuf_0; 530 xdata[i+1] = xdata[i] + ct2; 531 isz1[i] = ct2; /* size of each message */ 532 } 533 ierr = PetscBTDestroy(xtable);CHKERRQ(ierr); 534 ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 535 PetscFunctionReturn(0); 536 } 537 538 #undef __FUNCT__ 539 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ" 540 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 541 { 542 IS *isrow_new,*iscol_new; 543 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 544 PetscErrorCode ierr; 545 PetscInt nmax,nstages_local,nstages,i,pos,max_no,N=C->cmap->N,bs=C->rmap->bs; 546 547 PetscFunctionBegin; 548 /* The compression and expansion should be avoided. Does'nt point 549 out errors might change the indices hence buggey */ 550 551 ierr = PetscMalloc2(ismax+1,IS,&isrow_new,ismax+1,IS,&iscol_new);CHKERRQ(ierr); 552 ierr = ISCompressIndicesGeneral(N,bs,ismax,isrow,isrow_new);CHKERRQ(ierr); 553 ierr = ISCompressIndicesGeneral(N,bs,ismax,iscol,iscol_new);CHKERRQ(ierr); 554 555 /* Allocate memory to hold all the submatrices */ 556 if (scall != MAT_REUSE_MATRIX) { 557 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 558 } 559 /* Determine the number of stages through which submatrices are done */ 560 nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt)); 561 if (!nmax) nmax = 1; 562 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 563 564 /* Make sure every processor loops through the nstages */ 565 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr); 566 for (i=0,pos=0; i<nstages; i++) { 567 if (pos+nmax <= ismax) max_no = nmax; 568 else if (pos == ismax) max_no = 0; 569 else max_no = ismax-pos; 570 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos);CHKERRQ(ierr); 571 pos += max_no; 572 } 573 574 for (i=0; i<ismax; i++) { 575 ierr = ISDestroy(&isrow_new[i]);CHKERRQ(ierr); 576 ierr = ISDestroy(&iscol_new[i]);CHKERRQ(ierr); 577 } 578 ierr = PetscFree2(isrow_new,iscol_new);CHKERRQ(ierr); 579 PetscFunctionReturn(0); 580 } 581 582 #if defined (PETSC_USE_CTABLE) 583 #undef __FUNCT__ 584 #define __FUNCT__ "PetscGetProc" 585 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank) 586 { 587 PetscInt nGlobalNd = proc_gnode[size]; 588 PetscMPIInt fproc = PetscMPIIntCast( (PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5))); 589 590 PetscFunctionBegin; 591 if (fproc > size) fproc = size; 592 while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) { 593 if (row < proc_gnode[fproc]) fproc--; 594 else fproc++; 595 } 596 *rank = fproc; 597 PetscFunctionReturn(0); 598 } 599 #endif 600 601 /* -------------------------------------------------------------------------*/ 602 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */ 603 #undef __FUNCT__ 604 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ_local" 605 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submats) 606 { 607 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 608 Mat A = c->A; 609 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat; 610 const PetscInt **irow,**icol,*irow_i; 611 PetscInt *nrow,*ncol,*w3,*w4,start; 612 PetscErrorCode ierr; 613 PetscMPIInt size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc; 614 PetscInt **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row; 615 PetscInt nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol; 616 PetscInt **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2; 617 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 618 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 619 PetscInt bs=C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB; 620 PetscInt cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark; 621 PetscInt *bmap = c->garray,ctmp,rstart=c->rstartbs; 622 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3; 623 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3; 624 MPI_Comm comm; 625 PetscBool flag; 626 PetscMPIInt *onodes1,*olengths1; 627 PetscBool ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */ 628 /* variables below are used for the matrix numerical values - case of !ijonly */ 629 MPI_Request *r_waits4,*s_waits4; 630 MPI_Status *r_status4,*s_status4; 631 MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a = PETSC_NULL,*sbuf_aa_i,*vworkA = PETSC_NULL,*vworkB = PETSC_NULL; 632 MatScalar *a_a=a->a,*b_a=b->a; 633 634 #if defined (PETSC_USE_CTABLE) 635 PetscInt tt; 636 PetscTable *rowmaps,*colmaps,lrow1_grow1,lcol1_gcol1; 637 #else 638 PetscInt **cmap,*cmap_i,*rtable,*rmap_i,**rmap, Mbs = c->Mbs; 639 #endif 640 641 PetscFunctionBegin; 642 comm = ((PetscObject)C)->comm; 643 tag0 = ((PetscObject)C)->tag; 644 size = c->size; 645 rank = c->rank; 646 647 /* Get some new tags to keep the communication clean */ 648 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 649 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 650 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 651 652 #if defined(PETSC_USE_CTABLE) 653 ierr = PetscMalloc4(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol);CHKERRQ(ierr); 654 #else 655 ierr = PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,Mbs+1,PetscInt,&rtable);CHKERRQ(ierr); 656 /* Create hash table for the mapping :row -> proc*/ 657 for (i=0,j=0; i<size; i++) { 658 jmax = c->rowners[i+1]; 659 for (; j<jmax; j++) { 660 rtable[j] = i; 661 } 662 } 663 #endif 664 665 for (i=0; i<ismax; i++) { 666 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 667 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 668 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 669 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 670 } 671 672 /* evaluate communication - mesg to who,length of mesg,and buffer space 673 required. Based on this, buffers are allocated, and data copied into them*/ 674 ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr); 675 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 676 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 677 ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr); 678 for (i=0; i<ismax; i++) { 679 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 680 jmax = nrow[i]; 681 irow_i = irow[i]; 682 for (j=0; j<jmax; j++) { 683 row = irow_i[j]; 684 #if defined (PETSC_USE_CTABLE) 685 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 686 #else 687 proc = rtable[row]; 688 #endif 689 w4[proc]++; 690 } 691 for (j=0; j<size; j++) { 692 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 693 } 694 } 695 696 nrqs = 0; /* no of outgoing messages */ 697 msz = 0; /* total mesg length for all proc */ 698 w1[rank] = 0; /* no mesg sent to intself */ 699 w3[rank] = 0; 700 for (i=0; i<size; i++) { 701 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 702 } 703 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/ 704 for (i=0,j=0; i<size; i++) { 705 if (w1[i]) { pa[j] = i; j++; } 706 } 707 708 /* Each message would have a header = 1 + 2*(no of IS) + data */ 709 for (i=0; i<nrqs; i++) { 710 j = pa[i]; 711 w1[j] += w2[j] + 2* w3[j]; 712 msz += w1[j]; 713 } 714 715 /* Determine the number of messages to expect, their lengths, from from-ids */ 716 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 717 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 718 719 /* Now post the Irecvs corresponding to these messages */ 720 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 721 722 ierr = PetscFree(onodes1);CHKERRQ(ierr); 723 ierr = PetscFree(olengths1);CHKERRQ(ierr); 724 725 /* Allocate Memory for outgoing messages */ 726 ierr = PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr); 727 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 728 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 729 { 730 PetscInt *iptr = tmp,ict = 0; 731 for (i=0; i<nrqs; i++) { 732 j = pa[i]; 733 iptr += ict; 734 sbuf1[j] = iptr; 735 ict = w1[j]; 736 } 737 } 738 739 /* Form the outgoing messages */ 740 /* Initialise the header space */ 741 for (i=0; i<nrqs; i++) { 742 j = pa[i]; 743 sbuf1[j][0] = 0; 744 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 745 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 746 } 747 748 /* Parse the isrow and copy data into outbuf */ 749 for (i=0; i<ismax; i++) { 750 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 751 irow_i = irow[i]; 752 jmax = nrow[i]; 753 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 754 row = irow_i[j]; 755 #if defined (PETSC_USE_CTABLE) 756 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 757 #else 758 proc = rtable[row]; 759 #endif 760 if (proc != rank) { /* copy to the outgoing buf*/ 761 ctr[proc]++; 762 *ptr[proc] = row; 763 ptr[proc]++; 764 } 765 } 766 /* Update the headers for the current IS */ 767 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 768 if ((ctr_j = ctr[j])) { 769 sbuf1_j = sbuf1[j]; 770 k = ++sbuf1_j[0]; 771 sbuf1_j[2*k] = ctr_j; 772 sbuf1_j[2*k-1] = i; 773 } 774 } 775 } 776 777 /* Now post the sends */ 778 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 779 for (i=0; i<nrqs; ++i) { 780 j = pa[i]; 781 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 782 } 783 784 /* Post Recieves to capture the buffer size */ 785 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 786 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);CHKERRQ(ierr); 787 rbuf2[0] = tmp + msz; 788 for (i=1; i<nrqs; ++i) { 789 j = pa[i]; 790 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 791 } 792 for (i=0; i<nrqs; ++i) { 793 j = pa[i]; 794 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 795 } 796 797 /* Send to other procs the buf size they should allocate */ 798 799 /* Receive messages*/ 800 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 801 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 802 ierr = PetscMalloc3(nrqr+1,PetscInt*,&sbuf2,nrqr,PetscInt,&req_size,nrqr,PetscInt,&req_source);CHKERRQ(ierr); 803 { 804 Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data; 805 PetscInt *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i; 806 807 for (i=0; i<nrqr; ++i) { 808 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 809 req_size[idex] = 0; 810 rbuf1_i = rbuf1[idex]; 811 start = 2*rbuf1_i[0] + 1; 812 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 813 ierr = PetscMalloc(end*sizeof(PetscInt),&sbuf2[idex]);CHKERRQ(ierr); 814 sbuf2_i = sbuf2[idex]; 815 for (j=start; j<end; j++) { 816 id = rbuf1_i[j] - rstart; 817 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 818 sbuf2_i[j] = ncols; 819 req_size[idex] += ncols; 820 } 821 req_source[idex] = r_status1[i].MPI_SOURCE; 822 /* form the header */ 823 sbuf2_i[0] = req_size[idex]; 824 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 825 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 826 } 827 } 828 ierr = PetscFree(r_status1);CHKERRQ(ierr); 829 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 830 831 /* recv buffer sizes */ 832 /* Receive messages*/ 833 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);CHKERRQ(ierr); 834 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 835 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 836 if (!ijonly){ 837 ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr); 838 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 839 } 840 841 for (i=0; i<nrqs; ++i) { 842 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 843 ierr = PetscMalloc(rbuf2[idex][0]*sizeof(PetscInt),&rbuf3[idex]);CHKERRQ(ierr); 844 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 845 if (!ijonly){ 846 ierr = PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);CHKERRQ(ierr); 847 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 848 } 849 } 850 ierr = PetscFree(r_status2);CHKERRQ(ierr); 851 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 852 853 /* Wait on sends1 and sends2 */ 854 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 855 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 856 857 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 858 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 859 ierr = PetscFree(s_status1);CHKERRQ(ierr); 860 ierr = PetscFree(s_status2);CHKERRQ(ierr); 861 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 862 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 863 864 /* Now allocate buffers for a->j, and send them off */ 865 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);CHKERRQ(ierr); 866 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 867 ierr = PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);CHKERRQ(ierr); 868 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 869 870 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 871 { 872 for (i=0; i<nrqr; i++) { 873 rbuf1_i = rbuf1[i]; 874 sbuf_aj_i = sbuf_aj[i]; 875 ct1 = 2*rbuf1_i[0] + 1; 876 ct2 = 0; 877 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 878 kmax = rbuf1[i][2*j]; 879 for (k=0; k<kmax; k++,ct1++) { 880 row = rbuf1_i[ct1] - rstart; 881 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 882 ncols = nzA + nzB; 883 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 884 885 /* load the column indices for this row into cols*/ 886 cols = sbuf_aj_i + ct2; 887 for (l=0; l<nzB; l++) { 888 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 889 else break; 890 } 891 imark = l; 892 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 893 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 894 ct2 += ncols; 895 } 896 } 897 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 898 } 899 } 900 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 901 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 902 903 /* Allocate buffers for a->a, and send them off */ 904 if (!ijonly){ 905 ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);CHKERRQ(ierr); 906 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 907 ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr); 908 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 909 910 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 911 { 912 for (i=0; i<nrqr; i++) { 913 rbuf1_i = rbuf1[i]; 914 sbuf_aa_i = sbuf_aa[i]; 915 ct1 = 2*rbuf1_i[0]+1; 916 ct2 = 0; 917 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 918 kmax = rbuf1_i[2*j]; 919 for (k=0; k<kmax; k++,ct1++) { 920 row = rbuf1_i[ct1] - rstart; 921 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 922 ncols = nzA + nzB; 923 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 924 vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2; 925 926 /* load the column values for this row into vals*/ 927 vals = sbuf_aa_i+ct2*bs2; 928 for (l=0; l<nzB; l++) { 929 if ((bmap[cworkB[l]]) < cstart) { 930 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 931 } 932 else break; 933 } 934 imark = l; 935 for (l=0; l<nzA; l++) { 936 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 937 } 938 for (l=imark; l<nzB; l++) { 939 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 940 } 941 ct2 += ncols; 942 } 943 } 944 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 945 } 946 } 947 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 948 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 949 } 950 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 951 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 952 953 /* Form the matrix */ 954 /* create col map */ 955 { 956 const PetscInt *icol_i; 957 #if defined (PETSC_USE_CTABLE) 958 /* Create row map*/ 959 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&colmaps);CHKERRQ(ierr); 960 for (i=0; i<ismax; i++) { 961 ierr = PetscTableCreate(ncol[i]+1,&colmaps[i]);CHKERRQ(ierr); 962 } 963 #else 964 ierr = PetscMalloc(ismax*sizeof(PetscInt*),&cmap);CHKERRQ(ierr); 965 ierr = PetscMalloc(ismax*c->Nbs*sizeof(PetscInt),&cmap[0]);CHKERRQ(ierr); 966 for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; } 967 #endif 968 for (i=0; i<ismax; i++) { 969 jmax = ncol[i]; 970 icol_i = icol[i]; 971 #if defined (PETSC_USE_CTABLE) 972 lcol1_gcol1 = colmaps[i]; 973 for (j=0; j<jmax; j++) { 974 ierr = PetscTableAdd(lcol1_gcol1,icol_i[j]+1,j+1);CHKERRQ(ierr); 975 } 976 #else 977 cmap_i = cmap[i]; 978 for (j=0; j<jmax; j++) { 979 cmap_i[icol_i[j]] = j+1; 980 } 981 #endif 982 } 983 } 984 985 /* Create lens which is required for MatCreate... */ 986 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 987 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);CHKERRQ(ierr); 988 lens[0] = (PetscInt*)(lens + ismax); 989 ierr = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr); 990 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 991 992 /* Update lens from local data */ 993 for (i=0; i<ismax; i++) { 994 jmax = nrow[i]; 995 #if defined (PETSC_USE_CTABLE) 996 lcol1_gcol1 = colmaps[i]; 997 #else 998 cmap_i = cmap[i]; 999 #endif 1000 irow_i = irow[i]; 1001 lens_i = lens[i]; 1002 for (j=0; j<jmax; j++) { 1003 row = irow_i[j]; 1004 #if defined (PETSC_USE_CTABLE) 1005 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1006 #else 1007 proc = rtable[row]; 1008 #endif 1009 if (proc == rank) { 1010 /* Get indices from matA and then from matB */ 1011 row = row - rstart; 1012 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1013 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1014 #if defined (PETSC_USE_CTABLE) 1015 for (k=0; k<nzA; k++) { 1016 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1017 if (tt) { lens_i[j]++; } 1018 } 1019 for (k=0; k<nzB; k++) { 1020 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1021 if (tt) { lens_i[j]++; } 1022 } 1023 #else 1024 for (k=0; k<nzA; k++) { 1025 if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; } 1026 } 1027 for (k=0; k<nzB; k++) { 1028 if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; } 1029 } 1030 #endif 1031 } 1032 } 1033 } 1034 #if defined (PETSC_USE_CTABLE) 1035 /* Create row map*/ 1036 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rowmaps);CHKERRQ(ierr); 1037 for (i=0; i<ismax; i++){ 1038 ierr = PetscTableCreate(nrow[i]+1,&rowmaps[i]);CHKERRQ(ierr); 1039 } 1040 #else 1041 /* Create row map*/ 1042 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);CHKERRQ(ierr); 1043 rmap[0] = (PetscInt*)(rmap + ismax); 1044 ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));CHKERRQ(ierr); 1045 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;} 1046 #endif 1047 for (i=0; i<ismax; i++) { 1048 irow_i = irow[i]; 1049 jmax = nrow[i]; 1050 #if defined (PETSC_USE_CTABLE) 1051 lrow1_grow1 = rowmaps[i]; 1052 for (j=0; j<jmax; j++) { 1053 ierr = PetscTableAdd(lrow1_grow1,irow_i[j]+1,j+1);CHKERRQ(ierr); 1054 } 1055 #else 1056 rmap_i = rmap[i]; 1057 for (j=0; j<jmax; j++) { 1058 rmap_i[irow_i[j]] = j; 1059 } 1060 #endif 1061 } 1062 1063 /* Update lens from offproc data */ 1064 { 1065 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 1066 PetscMPIInt ii; 1067 1068 for (tmp2=0; tmp2<nrqs; tmp2++) { 1069 ierr = MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);CHKERRQ(ierr); 1070 idex = pa[ii]; 1071 sbuf1_i = sbuf1[idex]; 1072 jmax = sbuf1_i[0]; 1073 ct1 = 2*jmax+1; 1074 ct2 = 0; 1075 rbuf2_i = rbuf2[ii]; 1076 rbuf3_i = rbuf3[ii]; 1077 for (j=1; j<=jmax; j++) { 1078 is_no = sbuf1_i[2*j-1]; 1079 max1 = sbuf1_i[2*j]; 1080 lens_i = lens[is_no]; 1081 #if defined (PETSC_USE_CTABLE) 1082 lcol1_gcol1 = colmaps[is_no]; 1083 lrow1_grow1 = rowmaps[is_no]; 1084 #else 1085 cmap_i = cmap[is_no]; 1086 rmap_i = rmap[is_no]; 1087 #endif 1088 for (k=0; k<max1; k++,ct1++) { 1089 #if defined (PETSC_USE_CTABLE) 1090 ierr = PetscTableFind(lrow1_grow1,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1091 row--; 1092 if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1093 #else 1094 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1095 #endif 1096 max2 = rbuf2_i[ct1]; 1097 for (l=0; l<max2; l++,ct2++) { 1098 #if defined (PETSC_USE_CTABLE) 1099 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr); 1100 if (tt) { 1101 lens_i[row]++; 1102 } 1103 #else 1104 if (cmap_i[rbuf3_i[ct2]]) { 1105 lens_i[row]++; 1106 } 1107 #endif 1108 } 1109 } 1110 } 1111 } 1112 } 1113 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1114 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1115 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 1116 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1117 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1118 1119 /* Create the submatrices */ 1120 if (scall == MAT_REUSE_MATRIX) { 1121 if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet"); 1122 /* 1123 Assumes new rows are same length as the old rows, hence bug! 1124 */ 1125 for (i=0; i<ismax; i++) { 1126 mat = (Mat_SeqBAIJ *)(submats[i]->data); 1127 if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1128 ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);CHKERRQ(ierr); 1129 if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros"); 1130 /* Initial matrix as if empty */ 1131 ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));CHKERRQ(ierr); 1132 submats[i]->factortype = C->factortype; 1133 } 1134 } else { 1135 PetscInt bs_tmp; 1136 if (ijonly){ 1137 bs_tmp = 1; 1138 } else { 1139 bs_tmp = bs; 1140 } 1141 for (i=0; i<ismax; i++) { 1142 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 1143 ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);CHKERRQ(ierr); 1144 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 1145 ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); 1146 ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */ 1147 } 1148 } 1149 1150 /* Assemble the matrices */ 1151 /* First assemble the local rows */ 1152 { 1153 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i; 1154 MatScalar *imat_a = PETSC_NULL; 1155 1156 for (i=0; i<ismax; i++) { 1157 mat = (Mat_SeqBAIJ*)submats[i]->data; 1158 imat_ilen = mat->ilen; 1159 imat_j = mat->j; 1160 imat_i = mat->i; 1161 if (!ijonly) imat_a = mat->a; 1162 1163 #if defined (PETSC_USE_CTABLE) 1164 lcol1_gcol1 = colmaps[i]; 1165 lrow1_grow1 = rowmaps[i]; 1166 #else 1167 cmap_i = cmap[i]; 1168 rmap_i = rmap[i]; 1169 #endif 1170 irow_i = irow[i]; 1171 jmax = nrow[i]; 1172 for (j=0; j<jmax; j++) { 1173 row = irow_i[j]; 1174 #if defined (PETSC_USE_CTABLE) 1175 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1176 #else 1177 proc = rtable[row]; 1178 #endif 1179 if (proc == rank) { 1180 row = row - rstart; 1181 nzA = a_i[row+1] - a_i[row]; 1182 nzB = b_i[row+1] - b_i[row]; 1183 cworkA = a_j + a_i[row]; 1184 cworkB = b_j + b_i[row]; 1185 if (!ijonly){ 1186 vworkA = a_a + a_i[row]*bs2; 1187 vworkB = b_a + b_i[row]*bs2; 1188 } 1189 #if defined (PETSC_USE_CTABLE) 1190 ierr = PetscTableFind(lrow1_grow1,row+rstart+1,&row);CHKERRQ(ierr); 1191 row--; 1192 if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1193 #else 1194 row = rmap_i[row + rstart]; 1195 #endif 1196 mat_i = imat_i[row]; 1197 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1198 mat_j = imat_j + mat_i; 1199 ilen_row = imat_ilen[row]; 1200 1201 /* load the column indices for this row into cols*/ 1202 for (l=0; l<nzB; l++) { 1203 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1204 #if defined (PETSC_USE_CTABLE) 1205 ierr = PetscTableFind(lcol1_gcol1,ctmp+1,&tcol);CHKERRQ(ierr); 1206 if (tcol) { 1207 #else 1208 if ((tcol = cmap_i[ctmp])) { 1209 #endif 1210 *mat_j++ = tcol - 1; 1211 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1212 mat_a += bs2; 1213 ilen_row++; 1214 } 1215 } else break; 1216 } 1217 imark = l; 1218 for (l=0; l<nzA; l++) { 1219 #if defined (PETSC_USE_CTABLE) 1220 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1221 if (tcol) { 1222 #else 1223 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1224 #endif 1225 *mat_j++ = tcol - 1; 1226 if (!ijonly){ 1227 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1228 mat_a += bs2; 1229 } 1230 ilen_row++; 1231 } 1232 } 1233 for (l=imark; l<nzB; l++) { 1234 #if defined (PETSC_USE_CTABLE) 1235 ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1236 if (tcol) { 1237 #else 1238 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1239 #endif 1240 *mat_j++ = tcol - 1; 1241 if (!ijonly){ 1242 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1243 mat_a += bs2; 1244 } 1245 ilen_row++; 1246 } 1247 } 1248 imat_ilen[row] = ilen_row; 1249 } 1250 } 1251 1252 } 1253 } 1254 1255 /* Now assemble the off proc rows*/ 1256 { 1257 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1258 PetscInt *imat_j,*imat_i; 1259 MatScalar *imat_a = PETSC_NULL,*rbuf4_i = PETSC_NULL; 1260 PetscMPIInt ii; 1261 1262 for (tmp2=0; tmp2<nrqs; tmp2++) { 1263 if (ijonly){ 1264 ii = tmp2; 1265 } else { 1266 ierr = MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);CHKERRQ(ierr); 1267 } 1268 idex = pa[ii]; 1269 sbuf1_i = sbuf1[idex]; 1270 jmax = sbuf1_i[0]; 1271 ct1 = 2*jmax + 1; 1272 ct2 = 0; 1273 rbuf2_i = rbuf2[ii]; 1274 rbuf3_i = rbuf3[ii]; 1275 if (!ijonly) rbuf4_i = rbuf4[ii]; 1276 for (j=1; j<=jmax; j++) { 1277 is_no = sbuf1_i[2*j-1]; 1278 #if defined (PETSC_USE_CTABLE) 1279 lrow1_grow1 = rowmaps[is_no]; 1280 lcol1_gcol1 = colmaps[is_no]; 1281 #else 1282 rmap_i = rmap[is_no]; 1283 cmap_i = cmap[is_no]; 1284 #endif 1285 mat = (Mat_SeqBAIJ*)submats[is_no]->data; 1286 imat_ilen = mat->ilen; 1287 imat_j = mat->j; 1288 imat_i = mat->i; 1289 if (!ijonly) imat_a = mat->a; 1290 max1 = sbuf1_i[2*j]; 1291 for (k=0; k<max1; k++,ct1++) { 1292 row = sbuf1_i[ct1]; 1293 #if defined (PETSC_USE_CTABLE) 1294 ierr = PetscTableFind(lrow1_grow1,row+1,&row);CHKERRQ(ierr); 1295 row--; 1296 if(row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1297 #else 1298 row = rmap_i[row]; 1299 #endif 1300 ilen = imat_ilen[row]; 1301 mat_i = imat_i[row]; 1302 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1303 mat_j = imat_j + mat_i; 1304 max2 = rbuf2_i[ct1]; 1305 for (l=0; l<max2; l++,ct2++) { 1306 #if defined (PETSC_USE_CTABLE) 1307 ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1308 if (tcol) { 1309 #else 1310 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1311 #endif 1312 *mat_j++ = tcol - 1; 1313 if (!ijonly){ 1314 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1315 mat_a += bs2; 1316 } 1317 ilen++; 1318 } 1319 } 1320 imat_ilen[row] = ilen; 1321 } 1322 } 1323 } 1324 } 1325 if (!ijonly){ 1326 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1327 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1328 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 1329 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1330 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1331 } 1332 1333 /* Restore the indices */ 1334 for (i=0; i<ismax; i++) { 1335 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1336 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1337 } 1338 1339 /* Destroy allocated memory */ 1340 #if defined(PETSC_USE_CTABLE) 1341 ierr = PetscFree4(irow,icol,nrow,ncol);CHKERRQ(ierr); 1342 #else 1343 ierr = PetscFree5(irow,icol,nrow,ncol,rtable);CHKERRQ(ierr); 1344 #endif 1345 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 1346 ierr = PetscFree(pa);CHKERRQ(ierr); 1347 1348 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 1349 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1350 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1351 for (i=0; i<nrqr; ++i) { 1352 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1353 } 1354 for (i=0; i<nrqs; ++i) { 1355 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1356 } 1357 ierr = PetscFree3(sbuf2,req_size,req_source);CHKERRQ(ierr); 1358 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1359 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1360 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1361 if (!ijonly) { 1362 for (i=0; i<nrqs; ++i) {ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);} 1363 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1364 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1365 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1366 } 1367 1368 #if defined (PETSC_USE_CTABLE) 1369 for (i=0; i<ismax; i++){ 1370 ierr = PetscTableDestroy(&rowmaps[i]);CHKERRQ(ierr); 1371 ierr = PetscTableDestroy(&colmaps[i]);CHKERRQ(ierr); 1372 } 1373 ierr = PetscFree(colmaps);CHKERRQ(ierr); 1374 ierr = PetscFree(rowmaps);CHKERRQ(ierr); 1375 #else 1376 ierr = PetscFree(rmap);CHKERRQ(ierr); 1377 ierr = PetscFree(cmap[0]);CHKERRQ(ierr); 1378 ierr = PetscFree(cmap);CHKERRQ(ierr); 1379 #endif 1380 ierr = PetscFree(lens);CHKERRQ(ierr); 1381 1382 for (i=0; i<ismax; i++) { 1383 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1384 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1385 } 1386 1387 c->ijonly = PETSC_FALSE; /* set back to the default */ 1388 PetscFunctionReturn(0); 1389 } 1390 1391