1 2 /* 3 Routines to compute overlapping regions of a parallel MPI matrix 4 and to find submatrices that were shared across processors. 5 */ 6 #include <../src/mat/impls/baij/mpi/mpibaij.h> 7 #include <petscbt.h> 8 9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char **,PetscInt*,PetscInt**); 10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt **,PetscInt**,PetscInt*); 11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**); 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ" 16 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov) 17 { 18 PetscErrorCode ierr; 19 PetscInt i,N=C->cmap->N, bs=C->rmap->bs; 20 IS *is_new; 21 22 PetscFunctionBegin; 23 ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr); 24 /* Convert the indices into block format */ 25 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);CHKERRQ(ierr); 26 if (ov < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");} 27 for (i=0; i<ov; ++i) { 28 ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr); 29 } 30 for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);} 31 ierr = ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);CHKERRQ(ierr); 32 for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);} 33 ierr = PetscFree(is_new);CHKERRQ(ierr); 34 PetscFunctionReturn(0); 35 } 36 37 /* 38 Sample message format: 39 If a processor A wants processor B to process some elements corresponding 40 to index sets is[1], is[5] 41 mesg [0] = 2 (no of index sets in the mesg) 42 ----------- 43 mesg [1] = 1 => is[1] 44 mesg [2] = sizeof(is[1]); 45 ----------- 46 mesg [5] = 5 => is[5] 47 mesg [6] = sizeof(is[5]); 48 ----------- 49 mesg [7] 50 mesg [n] data(is[1]) 51 ----------- 52 mesg[n+1] 53 mesg[m] data(is[5]) 54 ----------- 55 56 Notes: 57 nrqs - no of requests sent (or to be sent out) 58 nrqr - no of requests recieved (which have to be or which have been processed 59 */ 60 #undef __FUNCT__ 61 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Once" 62 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[]) 63 { 64 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 65 const PetscInt **idx,*idx_i; 66 PetscInt *n,*w3,*w4,**data,len; 67 PetscErrorCode ierr; 68 PetscMPIInt size,rank,tag1,tag2,*w2,*w1,nrqr; 69 PetscInt Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr; 70 PetscInt *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p; 71 PetscMPIInt *onodes1,*olengths1,*onodes2,*olengths2; 72 PetscBT *table; 73 MPI_Comm comm; 74 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2; 75 MPI_Status *s_status,*recv_status; 76 char *t_p; 77 78 PetscFunctionBegin; 79 comm = ((PetscObject)C)->comm; 80 size = c->size; 81 rank = c->rank; 82 Mbs = c->Mbs; 83 84 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 85 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 86 87 ierr = PetscMalloc2(imax+1,const PetscInt*,&idx,imax,PetscInt,&n);CHKERRQ(ierr); 88 89 for (i=0; i<imax; i++) { 90 ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr); 91 ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr); 92 } 93 94 /* evaluate communication - mesg to who,length of mesg, and buffer space 95 required. Based on this, buffers are allocated, and data copied into them*/ 96 ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr); 97 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 98 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 99 ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr); 100 for (i=0; i<imax; i++) { 101 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 102 idx_i = idx[i]; 103 len = n[i]; 104 for (j=0; j<len; j++) { 105 row = idx_i[j]; 106 if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries"); 107 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 108 w4[proc]++; 109 } 110 for (j=0; j<size; j++) { 111 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 112 } 113 } 114 115 nrqs = 0; /* no of outgoing messages */ 116 msz = 0; /* total mesg length (for all proc */ 117 w1[rank] = 0; /* no mesg sent to itself */ 118 w3[rank] = 0; 119 for (i=0; i<size; i++) { 120 if (w1[i]) {w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 121 } 122 /* pa - is list of processors to communicate with */ 123 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); 124 for (i=0,j=0; i<size; i++) { 125 if (w1[i]) {pa[j] = i; j++;} 126 } 127 128 /* Each message would have a header = 1 + 2*(no of IS) + data */ 129 for (i=0; i<nrqs; i++) { 130 j = pa[i]; 131 w1[j] += w2[j] + 2*w3[j]; 132 msz += w1[j]; 133 } 134 135 /* Determine the number of messages to expect, their lengths, from from-ids */ 136 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 137 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 138 139 /* Now post the Irecvs corresponding to these messages */ 140 ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr); 141 142 /* Allocate Memory for outgoing messages */ 143 ierr = PetscMalloc4(size,PetscInt*,&outdat,size,PetscInt*,&ptr,msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr); 144 ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr); 145 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 146 { 147 PetscInt *iptr = tmp,ict = 0; 148 for (i=0; i<nrqs; i++) { 149 j = pa[i]; 150 iptr += ict; 151 outdat[j] = iptr; 152 ict = w1[j]; 153 } 154 } 155 156 /* Form the outgoing messages */ 157 /*plug in the headers*/ 158 for (i=0; i<nrqs; i++) { 159 j = pa[i]; 160 outdat[j][0] = 0; 161 ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 162 ptr[j] = outdat[j] + 2*w3[j] + 1; 163 } 164 165 /* Memory for doing local proc's work*/ 166 { 167 ierr = PetscMalloc5(imax,PetscBT,&table, imax,PetscInt*,&data, imax,PetscInt,&isz, 168 Mbs*imax,PetscInt,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,char,&t_p);CHKERRQ(ierr); 169 ierr = PetscMemzero(table,imax*sizeof(PetscBT));CHKERRQ(ierr); 170 ierr = PetscMemzero(data,imax*sizeof(PetscInt*));CHKERRQ(ierr); 171 ierr = PetscMemzero(isz,imax*sizeof(PetscInt));CHKERRQ(ierr); 172 ierr = PetscMemzero(d_p,Mbs*imax*sizeof(PetscInt));CHKERRQ(ierr); 173 ierr = PetscMemzero(t_p,(Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char));CHKERRQ(ierr); 174 175 for (i=0; i<imax; i++) { 176 table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i; 177 data[i] = d_p + (Mbs)*i; 178 } 179 } 180 181 /* Parse the IS and update local tables and the outgoing buf with the data*/ 182 { 183 PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j; 184 PetscBT table_i; 185 186 for (i=0; i<imax; i++) { 187 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 188 n_i = n[i]; 189 table_i = table[i]; 190 idx_i = idx[i]; 191 data_i = data[i]; 192 isz_i = isz[i]; 193 for (j=0; j<n_i; j++) { /* parse the indices of each IS */ 194 row = idx_i[j]; 195 ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr); 196 if (proc != rank) { /* copy to the outgoing buffer */ 197 ctr[proc]++; 198 *ptr[proc] = row; 199 ptr[proc]++; 200 } else { /* Update the local table */ 201 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 202 } 203 } 204 /* Update the headers for the current IS */ 205 for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */ 206 if ((ctr_j = ctr[j])) { 207 outdat_j = outdat[j]; 208 k = ++outdat_j[0]; 209 outdat_j[2*k] = ctr_j; 210 outdat_j[2*k-1] = i; 211 } 212 } 213 isz[i] = isz_i; 214 } 215 } 216 217 /* Now post the sends */ 218 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 219 for (i=0; i<nrqs; ++i) { 220 j = pa[i]; 221 ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr); 222 } 223 224 /* No longer need the original indices*/ 225 for (i=0; i<imax; ++i) { 226 ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr); 227 } 228 ierr = PetscFree2(idx,n);CHKERRQ(ierr); 229 230 for (i=0; i<imax; ++i) { 231 ierr = ISDestroy(&is[i]);CHKERRQ(ierr); 232 } 233 234 /* Do Local work*/ 235 ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr); 236 237 /* Receive messages*/ 238 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr); 239 if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);} 240 241 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr); 242 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);} 243 244 /* Phase 1 sends are complete - deallocate buffers */ 245 ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr); 246 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 247 248 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);CHKERRQ(ierr); 249 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);CHKERRQ(ierr); 250 ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr); 251 ierr = PetscFree(rbuf[0]);CHKERRQ(ierr); 252 ierr = PetscFree(rbuf);CHKERRQ(ierr); 253 254 /* Send the data back*/ 255 /* Do a global reduction to know the buffer space req for incoming messages*/ 256 { 257 PetscMPIInt *rw1; 258 259 ierr = PetscMalloc(size*sizeof(PetscInt),&rw1);CHKERRQ(ierr); 260 ierr = PetscMemzero(rw1,size*sizeof(PetscInt));CHKERRQ(ierr); 261 262 for (i=0; i<nrqr; ++i) { 263 proc = recv_status[i].MPI_SOURCE; 264 if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch"); 265 rw1[proc] = isz1[i]; 266 } 267 268 ierr = PetscFree(onodes1);CHKERRQ(ierr); 269 ierr = PetscFree(olengths1);CHKERRQ(ierr); 270 271 /* Determine the number of messages to expect, their lengths, from from-ids */ 272 ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr); 273 ierr = PetscFree(rw1);CHKERRQ(ierr); 274 } 275 /* Now post the Irecvs corresponding to these messages */ 276 ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr); 277 278 /* Now post the sends */ 279 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 280 for (i=0; i<nrqr; ++i) { 281 j = recv_status[i].MPI_SOURCE; 282 ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr); 283 } 284 285 /* receive work done on other processors*/ 286 { 287 PetscMPIInt idex; 288 PetscInt is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax; 289 PetscBT table_i; 290 MPI_Status *status2; 291 292 ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr); 293 for (i=0; i<nrqs; ++i) { 294 ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr); 295 /* Process the message*/ 296 rbuf2_i = rbuf2[idex]; 297 ct1 = 2*rbuf2_i[0]+1; 298 jmax = rbuf2[idex][0]; 299 for (j=1; j<=jmax; j++) { 300 max = rbuf2_i[2*j]; 301 is_no = rbuf2_i[2*j-1]; 302 isz_i = isz[is_no]; 303 data_i = data[is_no]; 304 table_i = table[is_no]; 305 for (k=0; k<max; k++,ct1++) { 306 row = rbuf2_i[ct1]; 307 if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;} 308 } 309 isz[is_no] = isz_i; 310 } 311 } 312 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);} 313 ierr = PetscFree(status2);CHKERRQ(ierr); 314 } 315 316 for (i=0; i<imax; ++i) { 317 ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr); 318 } 319 320 321 ierr = PetscFree(onodes2);CHKERRQ(ierr); 322 ierr = PetscFree(olengths2);CHKERRQ(ierr); 323 324 ierr = PetscFree(pa);CHKERRQ(ierr); 325 ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr); 326 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 327 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 328 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 329 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 330 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 331 ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr); 332 ierr = PetscFree(s_status);CHKERRQ(ierr); 333 ierr = PetscFree(recv_status);CHKERRQ(ierr); 334 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 335 ierr = PetscFree(xdata);CHKERRQ(ierr); 336 ierr = PetscFree(isz1);CHKERRQ(ierr); 337 PetscFunctionReturn(0); 338 } 339 340 #undef __FUNCT__ 341 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Local" 342 /* 343 MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do 344 the work on the local processor. 345 346 Inputs: 347 C - MAT_MPIBAIJ; 348 imax - total no of index sets processed at a time; 349 table - an array of char - size = Mbs bits. 350 351 Output: 352 isz - array containing the count of the solution elements corresponding 353 to each index set; 354 data - pointer to the solutions 355 */ 356 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data) 357 { 358 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 359 Mat A = c->A,B = c->B; 360 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 361 PetscInt start,end,val,max,rstart,cstart,*ai,*aj; 362 PetscInt *bi,*bj,*garray,i,j,k,row,*data_i,isz_i; 363 PetscBT table_i; 364 365 PetscFunctionBegin; 366 rstart = c->rstartbs; 367 cstart = c->cstartbs; 368 ai = a->i; 369 aj = a->j; 370 bi = b->i; 371 bj = b->j; 372 garray = c->garray; 373 374 375 for (i=0; i<imax; i++) { 376 data_i = data[i]; 377 table_i = table[i]; 378 isz_i = isz[i]; 379 for (j=0,max=isz[i]; j<max; j++) { 380 row = data_i[j] - rstart; 381 start = ai[row]; 382 end = ai[row+1]; 383 for (k=start; k<end; k++) { /* Amat */ 384 val = aj[k] + cstart; 385 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 386 } 387 start = bi[row]; 388 end = bi[row+1]; 389 for (k=start; k<end; k++) { /* Bmat */ 390 val = garray[bj[k]]; 391 if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;} 392 } 393 } 394 isz[i] = isz_i; 395 } 396 PetscFunctionReturn(0); 397 } 398 #undef __FUNCT__ 399 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Receive" 400 /* 401 MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages, 402 and return the output 403 404 Input: 405 C - the matrix 406 nrqr - no of messages being processed. 407 rbuf - an array of pointers to the recieved requests 408 409 Output: 410 xdata - array of messages to be sent back 411 isz1 - size of each message 412 413 For better efficiency perhaps we should malloc separately each xdata[i], 414 then if a remalloc is required we need only copy the data for that one row 415 rather than all previous rows as it is now where a single large chunck of 416 memory is used. 417 418 */ 419 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1) 420 { 421 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 422 Mat A = c->A,B = c->B; 423 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data; 424 PetscErrorCode ierr; 425 PetscInt rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k; 426 PetscInt row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end; 427 PetscInt val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr; 428 PetscInt *rbuf_i,kmax,rbuf_0; 429 PetscBT xtable; 430 431 PetscFunctionBegin; 432 Mbs = c->Mbs; 433 rstart = c->rstartbs; 434 cstart = c->cstartbs; 435 ai = a->i; 436 aj = a->j; 437 bi = b->i; 438 bj = b->j; 439 garray = c->garray; 440 441 442 for (i=0,ct=0,total_sz=0; i<nrqr; ++i) { 443 rbuf_i = rbuf[i]; 444 rbuf_0 = rbuf_i[0]; 445 ct += rbuf_0; 446 for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; } 447 } 448 449 if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs; 450 else max1 = 1; 451 mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1); 452 ierr = PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);CHKERRQ(ierr); 453 ++no_malloc; 454 ierr = PetscBTCreate(Mbs,&xtable);CHKERRQ(ierr); 455 ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr); 456 457 ct3 = 0; 458 for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */ 459 rbuf_i = rbuf[i]; 460 rbuf_0 = rbuf_i[0]; 461 ct1 = 2*rbuf_0+1; 462 ct2 = ct1; 463 ct3 += ct1; 464 for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/ 465 ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr); 466 oct2 = ct2; 467 kmax = rbuf_i[2*j]; 468 for (k=0; k<kmax; k++,ct1++) { 469 row = rbuf_i[ct1]; 470 if (!PetscBTLookupSet(xtable,row)) { 471 if (!(ct3 < mem_estimate)) { 472 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 473 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 474 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 475 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 476 xdata[0] = tmp; 477 mem_estimate = new_estimate; ++no_malloc; 478 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 479 } 480 xdata[i][ct2++] = row; 481 ct3++; 482 } 483 } 484 for (k=oct2,max2=ct2; k<max2; k++) { 485 row = xdata[i][k] - rstart; 486 start = ai[row]; 487 end = ai[row+1]; 488 for (l=start; l<end; l++) { 489 val = aj[l] + cstart; 490 if (!PetscBTLookupSet(xtable,val)) { 491 if (!(ct3 < mem_estimate)) { 492 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 493 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 494 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 495 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 496 xdata[0] = tmp; 497 mem_estimate = new_estimate; ++no_malloc; 498 for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 499 } 500 xdata[i][ct2++] = val; 501 ct3++; 502 } 503 } 504 start = bi[row]; 505 end = bi[row+1]; 506 for (l=start; l<end; l++) { 507 val = garray[bj[l]]; 508 if (!PetscBTLookupSet(xtable,val)) { 509 if (!(ct3 < mem_estimate)) { 510 new_estimate = (PetscInt)(1.5*mem_estimate)+1; 511 ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr); 512 ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr); 513 ierr = PetscFree(xdata[0]);CHKERRQ(ierr); 514 xdata[0] = tmp; 515 mem_estimate = new_estimate; ++no_malloc; 516 for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];} 517 } 518 xdata[i][ct2++] = val; 519 ct3++; 520 } 521 } 522 } 523 /* Update the header*/ 524 xdata[i][2*j] = ct2 - oct2; /* Undo the vector isz1 and use only a var*/ 525 xdata[i][2*j-1] = rbuf_i[2*j-1]; 526 } 527 xdata[i][0] = rbuf_0; 528 xdata[i+1] = xdata[i] + ct2; 529 isz1[i] = ct2; /* size of each message */ 530 } 531 ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr); 532 ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr); 533 PetscFunctionReturn(0); 534 } 535 536 #undef __FUNCT__ 537 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ" 538 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[]) 539 { 540 IS *isrow_new,*iscol_new; 541 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 542 PetscErrorCode ierr; 543 PetscInt nmax,nstages_local,nstages,i,pos,max_no,ncol,nrow,N=C->cmap->N,bs=C->rmap->bs; 544 PetscBool colflag,*allcolumns,*allrows; 545 546 PetscFunctionBegin; 547 /* Currently, unsorted column indices will result in inverted column indices in the resulting submatrices. */ 548 for (i = 0; i < ismax; ++i) { 549 PetscBool sorted; 550 ierr = ISSorted(iscol[i], &sorted);CHKERRQ(ierr); 551 if (!sorted) SETERRQ1(((PetscObject)iscol[i])->comm, PETSC_ERR_SUP, "Column index set %D not sorted", i); 552 } 553 /* The compression and expansion should be avoided. Doesn't point 554 out errors, might change the indices, hence buggey */ 555 ierr = PetscMalloc2(ismax+1,IS,&isrow_new,ismax+1,IS,&iscol_new);CHKERRQ(ierr); 556 ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_new);CHKERRQ(ierr); 557 ierr = ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_new);CHKERRQ(ierr); 558 559 /* Check for special case: each processor gets entire matrix columns */ 560 ierr = PetscMalloc2(ismax+1,PetscBool,&allcolumns,ismax+1,PetscBool,&allrows);CHKERRQ(ierr); 561 for (i=0; i<ismax; i++) { 562 ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr); 563 ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr); 564 if (colflag && ncol == C->cmap->N) { 565 allcolumns[i] = PETSC_TRUE; 566 } else { 567 allcolumns[i] = PETSC_FALSE; 568 } 569 570 ierr = ISIdentity(isrow[i],&colflag);CHKERRQ(ierr); 571 ierr = ISGetLocalSize(isrow[i],&nrow);CHKERRQ(ierr); 572 if (colflag && nrow == C->rmap->N) { 573 allrows[i] = PETSC_TRUE; 574 } else { 575 allrows[i] = PETSC_FALSE; 576 } 577 } 578 579 /* Allocate memory to hold all the submatrices */ 580 if (scall != MAT_REUSE_MATRIX) { 581 ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr); 582 } 583 /* Determine the number of stages through which submatrices are done */ 584 nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt)); 585 if (!nmax) nmax = 1; 586 nstages_local = ismax/nmax + ((ismax % nmax)?1:0); 587 588 /* Make sure every processor loops through the nstages */ 589 ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr); 590 for (i=0,pos=0; i<nstages; i++) { 591 if (pos+nmax <= ismax) max_no = nmax; 592 else if (pos == ismax) max_no = 0; 593 else max_no = ismax-pos; 594 ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);CHKERRQ(ierr); 595 pos += max_no; 596 } 597 598 for (i=0; i<ismax; i++) { 599 ierr = ISDestroy(&isrow_new[i]);CHKERRQ(ierr); 600 ierr = ISDestroy(&iscol_new[i]);CHKERRQ(ierr); 601 } 602 ierr = PetscFree2(isrow_new,iscol_new);CHKERRQ(ierr); 603 ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr); 604 PetscFunctionReturn(0); 605 } 606 607 #if defined (PETSC_USE_CTABLE) 608 #undef __FUNCT__ 609 #define __FUNCT__ "PetscGetProc" 610 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank) 611 { 612 PetscInt nGlobalNd = proc_gnode[size]; 613 PetscMPIInt fproc = PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5))); 614 615 PetscFunctionBegin; 616 if (fproc > size) fproc = size; 617 while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) { 618 if (row < proc_gnode[fproc]) fproc--; 619 else fproc++; 620 } 621 *rank = fproc; 622 PetscFunctionReturn(0); 623 } 624 #endif 625 626 /* -------------------------------------------------------------------------*/ 627 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */ 628 #undef __FUNCT__ 629 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ_local" 630 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats) 631 { 632 Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data; 633 Mat A = c->A; 634 Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat; 635 const PetscInt **irow,**icol,*irow_i; 636 PetscInt *nrow,*ncol,*w3,*w4,start; 637 PetscErrorCode ierr; 638 PetscMPIInt size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc; 639 PetscInt **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row; 640 PetscInt nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol; 641 PetscInt **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2; 642 PetscInt **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax; 643 PetscInt ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i; 644 PetscInt bs=C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB; 645 PetscInt cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark; 646 PetscInt *bmap = c->garray,ctmp,rstart=c->rstartbs; 647 MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3; 648 MPI_Status *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3; 649 MPI_Comm comm; 650 PetscBool flag; 651 PetscMPIInt *onodes1,*olengths1; 652 PetscBool ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */ 653 /* variables below are used for the matrix numerical values - case of !ijonly */ 654 MPI_Request *r_waits4,*s_waits4; 655 MPI_Status *r_status4,*s_status4; 656 MatScalar **rbuf4,**sbuf_aa,*vals,*mat_a = PETSC_NULL,*sbuf_aa_i,*vworkA = PETSC_NULL,*vworkB = PETSC_NULL; 657 MatScalar *a_a=a->a,*b_a=b->a; 658 659 #if defined (PETSC_USE_CTABLE) 660 PetscInt tt; 661 PetscTable *rmap,*cmap,rmap_i,cmap_i=PETSC_NULL; 662 #else 663 PetscInt **cmap,*cmap_i=PETSC_NULL,*rtable,*rmap_i,**rmap, Mbs = c->Mbs; 664 #endif 665 666 PetscFunctionBegin; 667 comm = ((PetscObject)C)->comm; 668 tag0 = ((PetscObject)C)->tag; 669 size = c->size; 670 rank = c->rank; 671 672 /* Get some new tags to keep the communication clean */ 673 ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr); 674 ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr); 675 ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr); 676 677 #if defined(PETSC_USE_CTABLE) 678 ierr = PetscMalloc4(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol);CHKERRQ(ierr); 679 #else 680 ierr = PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,Mbs+1,PetscInt,&rtable);CHKERRQ(ierr); 681 /* Create hash table for the mapping :row -> proc*/ 682 for (i=0,j=0; i<size; i++) { 683 jmax = C->rmap->range[i+1]/bs; 684 for (; j<jmax; j++) { 685 rtable[j] = i; 686 } 687 } 688 #endif 689 690 for (i=0; i<ismax; i++) { 691 if (allrows[i]) { 692 irow[i] = PETSC_NULL; 693 nrow[i] = C->rmap->N/bs; 694 } else { 695 ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr); 696 ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr); 697 } 698 699 if (allcolumns[i]) { 700 icol[i] = PETSC_NULL; 701 ncol[i] = C->cmap->N/bs; 702 } else { 703 ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr); 704 ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr); 705 } 706 } 707 708 /* evaluate communication - mesg to who,length of mesg,and buffer space 709 required. Based on this, buffers are allocated, and data copied into them*/ 710 ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr); 711 ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 712 ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); 713 ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr); 714 for (i=0; i<ismax; i++) { 715 ierr = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/ 716 jmax = nrow[i]; 717 irow_i = irow[i]; 718 for (j=0; j<jmax; j++) { 719 if (allrows[i]) { 720 row = j; 721 } else { 722 row = irow_i[j]; 723 } 724 #if defined (PETSC_USE_CTABLE) 725 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 726 #else 727 proc = rtable[row]; 728 #endif 729 w4[proc]++; 730 } 731 for (j=0; j<size; j++) { 732 if (w4[j]) { w1[j] += w4[j]; w3[j]++;} 733 } 734 } 735 736 nrqs = 0; /* no of outgoing messages */ 737 msz = 0; /* total mesg length for all proc */ 738 w1[rank] = 0; /* no mesg sent to intself */ 739 w3[rank] = 0; 740 for (i=0; i<size; i++) { 741 if (w1[i]) { w2[i] = 1; nrqs++;} /* there exists a message to proc i */ 742 } 743 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/ 744 for (i=0,j=0; i<size; i++) { 745 if (w1[i]) { pa[j] = i; j++; } 746 } 747 748 /* Each message would have a header = 1 + 2*(no of IS) + data */ 749 for (i=0; i<nrqs; i++) { 750 j = pa[i]; 751 w1[j] += w2[j] + 2* w3[j]; 752 msz += w1[j]; 753 } 754 755 /* Determine the number of messages to expect, their lengths, from from-ids */ 756 ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr); 757 ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr); 758 759 /* Now post the Irecvs corresponding to these messages */ 760 ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr); 761 762 ierr = PetscFree(onodes1);CHKERRQ(ierr); 763 ierr = PetscFree(olengths1);CHKERRQ(ierr); 764 765 /* Allocate Memory for outgoing messages */ 766 ierr = PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr); 767 ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr); 768 ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr); 769 { 770 PetscInt *iptr = tmp,ict = 0; 771 for (i=0; i<nrqs; i++) { 772 j = pa[i]; 773 iptr += ict; 774 sbuf1[j] = iptr; 775 ict = w1[j]; 776 } 777 } 778 779 /* Form the outgoing messages */ 780 /* Initialise the header space */ 781 for (i=0; i<nrqs; i++) { 782 j = pa[i]; 783 sbuf1[j][0] = 0; 784 ierr = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr); 785 ptr[j] = sbuf1[j] + 2*w3[j] + 1; 786 } 787 788 /* Parse the isrow and copy data into outbuf */ 789 for (i=0; i<ismax; i++) { 790 ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr); 791 irow_i = irow[i]; 792 jmax = nrow[i]; 793 for (j=0; j<jmax; j++) { /* parse the indices of each IS */ 794 if (allrows[i]) { 795 row = j; 796 } else { 797 row = irow_i[j]; 798 } 799 #if defined (PETSC_USE_CTABLE) 800 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 801 #else 802 proc = rtable[row]; 803 #endif 804 if (proc != rank) { /* copy to the outgoing buf*/ 805 ctr[proc]++; 806 *ptr[proc] = row; 807 ptr[proc]++; 808 } 809 } 810 /* Update the headers for the current IS */ 811 for (j=0; j<size; j++) { /* Can Optimise this loop too */ 812 if ((ctr_j = ctr[j])) { 813 sbuf1_j = sbuf1[j]; 814 k = ++sbuf1_j[0]; 815 sbuf1_j[2*k] = ctr_j; 816 sbuf1_j[2*k-1] = i; 817 } 818 } 819 } 820 821 /* Now post the sends */ 822 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr); 823 for (i=0; i<nrqs; ++i) { 824 j = pa[i]; 825 ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr); 826 } 827 828 /* Post Recieves to capture the buffer size */ 829 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr); 830 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);CHKERRQ(ierr); 831 rbuf2[0] = tmp + msz; 832 for (i=1; i<nrqs; ++i) { 833 j = pa[i]; 834 rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]]; 835 } 836 for (i=0; i<nrqs; ++i) { 837 j = pa[i]; 838 ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr); 839 } 840 841 /* Send to other procs the buf size they should allocate */ 842 843 /* Receive messages*/ 844 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr); 845 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr); 846 ierr = PetscMalloc3(nrqr+1,PetscInt*,&sbuf2,nrqr,PetscInt,&req_size,nrqr,PetscInt,&req_source);CHKERRQ(ierr); 847 { 848 Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data; 849 PetscInt *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i; 850 851 for (i=0; i<nrqr; ++i) { 852 ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr); 853 req_size[idex] = 0; 854 rbuf1_i = rbuf1[idex]; 855 start = 2*rbuf1_i[0] + 1; 856 ierr = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr); 857 ierr = PetscMalloc(end*sizeof(PetscInt),&sbuf2[idex]);CHKERRQ(ierr); 858 sbuf2_i = sbuf2[idex]; 859 for (j=start; j<end; j++) { 860 id = rbuf1_i[j] - rstart; 861 ncols = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id]; 862 sbuf2_i[j] = ncols; 863 req_size[idex] += ncols; 864 } 865 req_source[idex] = r_status1[i].MPI_SOURCE; 866 /* form the header */ 867 sbuf2_i[0] = req_size[idex]; 868 for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; } 869 ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr); 870 } 871 } 872 ierr = PetscFree(r_status1);CHKERRQ(ierr); 873 ierr = PetscFree(r_waits1);CHKERRQ(ierr); 874 875 /* recv buffer sizes */ 876 /* Receive messages*/ 877 ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);CHKERRQ(ierr); 878 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr); 879 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr); 880 if (!ijonly) { 881 ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr); 882 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr); 883 } 884 885 for (i=0; i<nrqs; ++i) { 886 ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr); 887 ierr = PetscMalloc(rbuf2[idex][0]*sizeof(PetscInt),&rbuf3[idex]);CHKERRQ(ierr); 888 ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr); 889 if (!ijonly) { 890 ierr = PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);CHKERRQ(ierr); 891 ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr); 892 } 893 } 894 ierr = PetscFree(r_status2);CHKERRQ(ierr); 895 ierr = PetscFree(r_waits2);CHKERRQ(ierr); 896 897 /* Wait on sends1 and sends2 */ 898 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr); 899 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr); 900 901 if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);} 902 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);} 903 ierr = PetscFree(s_status1);CHKERRQ(ierr); 904 ierr = PetscFree(s_status2);CHKERRQ(ierr); 905 ierr = PetscFree(s_waits1);CHKERRQ(ierr); 906 ierr = PetscFree(s_waits2);CHKERRQ(ierr); 907 908 /* Now allocate buffers for a->j, and send them off */ 909 ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);CHKERRQ(ierr); 910 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 911 ierr = PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);CHKERRQ(ierr); 912 for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1]; 913 914 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr); 915 { 916 for (i=0; i<nrqr; i++) { 917 rbuf1_i = rbuf1[i]; 918 sbuf_aj_i = sbuf_aj[i]; 919 ct1 = 2*rbuf1_i[0] + 1; 920 ct2 = 0; 921 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 922 kmax = rbuf1[i][2*j]; 923 for (k=0; k<kmax; k++,ct1++) { 924 row = rbuf1_i[ct1] - rstart; 925 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 926 ncols = nzA + nzB; 927 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 928 929 /* load the column indices for this row into cols*/ 930 cols = sbuf_aj_i + ct2; 931 for (l=0; l<nzB; l++) { 932 if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp; 933 else break; 934 } 935 imark = l; 936 for (l=0; l<nzA; l++) cols[imark+l] = cstart + cworkA[l]; 937 for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]]; 938 ct2 += ncols; 939 } 940 } 941 ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr); 942 } 943 } 944 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr); 945 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr); 946 947 /* Allocate buffers for a->a, and send them off */ 948 if (!ijonly) { 949 ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);CHKERRQ(ierr); 950 for (i=0,j=0; i<nrqr; i++) j += req_size[i]; 951 ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr); 952 for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2; 953 954 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr); 955 { 956 for (i=0; i<nrqr; i++) { 957 rbuf1_i = rbuf1[i]; 958 sbuf_aa_i = sbuf_aa[i]; 959 ct1 = 2*rbuf1_i[0]+1; 960 ct2 = 0; 961 for (j=1,max1=rbuf1_i[0]; j<=max1; j++) { 962 kmax = rbuf1_i[2*j]; 963 for (k=0; k<kmax; k++,ct1++) { 964 row = rbuf1_i[ct1] - rstart; 965 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 966 ncols = nzA + nzB; 967 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 968 vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2; 969 970 /* load the column values for this row into vals*/ 971 vals = sbuf_aa_i+ct2*bs2; 972 for (l=0; l<nzB; l++) { 973 if ((bmap[cworkB[l]]) < cstart) { 974 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 975 } 976 else break; 977 } 978 imark = l; 979 for (l=0; l<nzA; l++) { 980 ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 981 } 982 for (l=imark; l<nzB; l++) { 983 ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 984 } 985 ct2 += ncols; 986 } 987 } 988 ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr); 989 } 990 } 991 ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr); 992 ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr); 993 } 994 ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr); 995 ierr = PetscFree(rbuf1);CHKERRQ(ierr); 996 997 /* Form the matrix */ 998 /* create col map: global col of C -> local col of submatrices */ 999 { 1000 const PetscInt *icol_i; 1001 #if defined (PETSC_USE_CTABLE) 1002 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&cmap);CHKERRQ(ierr); 1003 for (i=0; i<ismax; i++) { 1004 if (!allcolumns[i]) { 1005 ierr = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr); 1006 jmax = ncol[i]; 1007 icol_i = icol[i]; 1008 cmap_i = cmap[i]; 1009 for (j=0; j<jmax; j++) { 1010 ierr = PetscTableAdd(cmap_i,icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1011 } 1012 } else { 1013 cmap[i] = PETSC_NULL; 1014 } 1015 } 1016 #else 1017 ierr = PetscMalloc(ismax*sizeof(PetscInt*),&cmap);CHKERRQ(ierr); 1018 for (i=0; i<ismax; i++) { 1019 if (!allcolumns[i]) { 1020 ierr = PetscMalloc(c->Nbs*sizeof(PetscInt),&cmap[i]);CHKERRQ(ierr); 1021 ierr = PetscMemzero(cmap[i],c->Nbs*sizeof(PetscInt));CHKERRQ(ierr); 1022 jmax = ncol[i]; 1023 icol_i = icol[i]; 1024 cmap_i = cmap[i]; 1025 for (j=0; j<jmax; j++) { 1026 cmap_i[icol_i[j]] = j+1; 1027 } 1028 } else { /* allcolumns[i] */ 1029 cmap[i] = PETSC_NULL; 1030 } 1031 } 1032 #endif 1033 } 1034 1035 /* Create lens which is required for MatCreate... */ 1036 for (i=0,j=0; i<ismax; i++) { j += nrow[i]; } 1037 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);CHKERRQ(ierr); 1038 lens[0] = (PetscInt*)(lens + ismax); 1039 ierr = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr); 1040 for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; } 1041 1042 /* Update lens from local data */ 1043 for (i=0; i<ismax; i++) { 1044 jmax = nrow[i]; 1045 if (!allcolumns[i]) cmap_i = cmap[i]; 1046 irow_i = irow[i]; 1047 lens_i = lens[i]; 1048 for (j=0; j<jmax; j++) { 1049 if (allrows[i]) { 1050 row = j; 1051 } else { 1052 row = irow_i[j]; 1053 } 1054 #if defined (PETSC_USE_CTABLE) 1055 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1056 #else 1057 proc = rtable[row]; 1058 #endif 1059 if (proc == rank) { 1060 /* Get indices from matA and then from matB */ 1061 row = row - rstart; 1062 nzA = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row]; 1063 cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row]; 1064 if (!allcolumns[i]) { 1065 #if defined (PETSC_USE_CTABLE) 1066 for (k=0; k<nzA; k++) { 1067 ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr); 1068 if (tt) { lens_i[j]++; } 1069 } 1070 for (k=0; k<nzB; k++) { 1071 ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr); 1072 if (tt) { lens_i[j]++; } 1073 } 1074 1075 #else 1076 for (k=0; k<nzA; k++) { 1077 if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; } 1078 } 1079 for (k=0; k<nzB; k++) { 1080 if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; } 1081 } 1082 #endif 1083 } else {/* allcolumns */ 1084 lens_i[j] = nzA + nzB; 1085 } 1086 } 1087 } 1088 } 1089 #if defined (PETSC_USE_CTABLE) 1090 /* Create row map*/ 1091 ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rmap);CHKERRQ(ierr); 1092 for (i=0; i<ismax; i++) { 1093 ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr); 1094 } 1095 #else 1096 /* Create row map*/ 1097 ierr = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);CHKERRQ(ierr); 1098 rmap[0] = (PetscInt*)(rmap + ismax); 1099 ierr = PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));CHKERRQ(ierr); 1100 for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;} 1101 #endif 1102 for (i=0; i<ismax; i++) { 1103 irow_i = irow[i]; 1104 jmax = nrow[i]; 1105 #if defined (PETSC_USE_CTABLE) 1106 rmap_i = rmap[i]; 1107 for (j=0; j<jmax; j++) { 1108 if (allrows[i]) { 1109 ierr = PetscTableAdd(rmap_i,j+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1110 } else { 1111 ierr = PetscTableAdd(rmap_i,irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr); 1112 } 1113 } 1114 #else 1115 rmap_i = rmap[i]; 1116 for (j=0; j<jmax; j++) { 1117 if (allrows[i]) { 1118 rmap_i[j] = j; 1119 } else { 1120 rmap_i[irow_i[j]] = j; 1121 } 1122 } 1123 #endif 1124 } 1125 1126 /* Update lens from offproc data */ 1127 { 1128 PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i; 1129 PetscMPIInt ii; 1130 1131 for (tmp2=0; tmp2<nrqs; tmp2++) { 1132 ierr = MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);CHKERRQ(ierr); 1133 idex = pa[ii]; 1134 sbuf1_i = sbuf1[idex]; 1135 jmax = sbuf1_i[0]; 1136 ct1 = 2*jmax+1; 1137 ct2 = 0; 1138 rbuf2_i = rbuf2[ii]; 1139 rbuf3_i = rbuf3[ii]; 1140 for (j=1; j<=jmax; j++) { 1141 is_no = sbuf1_i[2*j-1]; 1142 max1 = sbuf1_i[2*j]; 1143 lens_i = lens[is_no]; 1144 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1145 rmap_i = rmap[is_no]; 1146 for (k=0; k<max1; k++,ct1++) { 1147 #if defined (PETSC_USE_CTABLE) 1148 ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr); 1149 row--; 1150 if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1151 #else 1152 row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */ 1153 #endif 1154 max2 = rbuf2_i[ct1]; 1155 for (l=0; l<max2; l++,ct2++) { 1156 if (!allcolumns[is_no]) { 1157 #if defined (PETSC_USE_CTABLE) 1158 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr); 1159 if (tt) { 1160 lens_i[row]++; 1161 } 1162 #else 1163 if (cmap_i[rbuf3_i[ct2]]) { 1164 lens_i[row]++; 1165 } 1166 #endif 1167 } else { /* allcolumns */ 1168 lens_i[row]++; 1169 } 1170 } 1171 } 1172 } 1173 } 1174 } 1175 ierr = PetscFree(r_status3);CHKERRQ(ierr); 1176 ierr = PetscFree(r_waits3);CHKERRQ(ierr); 1177 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);} 1178 ierr = PetscFree(s_status3);CHKERRQ(ierr); 1179 ierr = PetscFree(s_waits3);CHKERRQ(ierr); 1180 1181 /* Create the submatrices */ 1182 if (scall == MAT_REUSE_MATRIX) { 1183 if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet"); 1184 /* 1185 Assumes new rows are same length as the old rows, hence bug! 1186 */ 1187 for (i=0; i<ismax; i++) { 1188 mat = (Mat_SeqBAIJ *)(submats[i]->data); 1189 if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size"); 1190 ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);CHKERRQ(ierr); 1191 if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros"); 1192 /* Initial matrix as if empty */ 1193 ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));CHKERRQ(ierr); 1194 submats[i]->factortype = C->factortype; 1195 } 1196 } else { 1197 PetscInt bs_tmp; 1198 if (ijonly) { 1199 bs_tmp = 1; 1200 } else { 1201 bs_tmp = bs; 1202 } 1203 for (i=0; i<ismax; i++) { 1204 ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr); 1205 ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);CHKERRQ(ierr); 1206 ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr); 1207 ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); 1208 ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */ 1209 } 1210 } 1211 1212 /* Assemble the matrices */ 1213 /* First assemble the local rows */ 1214 { 1215 PetscInt ilen_row,*imat_ilen,*imat_j,*imat_i; 1216 MatScalar *imat_a = PETSC_NULL; 1217 1218 for (i=0; i<ismax; i++) { 1219 mat = (Mat_SeqBAIJ*)submats[i]->data; 1220 imat_ilen = mat->ilen; 1221 imat_j = mat->j; 1222 imat_i = mat->i; 1223 if (!ijonly) imat_a = mat->a; 1224 if (!allcolumns[i]) cmap_i = cmap[i]; 1225 rmap_i = rmap[i]; 1226 irow_i = irow[i]; 1227 jmax = nrow[i]; 1228 for (j=0; j<jmax; j++) { 1229 if (allrows[i]) { 1230 row = j; 1231 } else { 1232 row = irow_i[j]; 1233 } 1234 #if defined (PETSC_USE_CTABLE) 1235 ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr); 1236 #else 1237 proc = rtable[row]; 1238 #endif 1239 if (proc == rank) { 1240 row = row - rstart; 1241 nzA = a_i[row+1] - a_i[row]; 1242 nzB = b_i[row+1] - b_i[row]; 1243 cworkA = a_j + a_i[row]; 1244 cworkB = b_j + b_i[row]; 1245 if (!ijonly) { 1246 vworkA = a_a + a_i[row]*bs2; 1247 vworkB = b_a + b_i[row]*bs2; 1248 } 1249 #if defined (PETSC_USE_CTABLE) 1250 ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr); 1251 row--; 1252 if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1253 #else 1254 row = rmap_i[row + rstart]; 1255 #endif 1256 mat_i = imat_i[row]; 1257 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1258 mat_j = imat_j + mat_i; 1259 ilen_row = imat_ilen[row]; 1260 1261 /* load the column indices for this row into cols*/ 1262 if (!allcolumns[i]) { 1263 for (l=0; l<nzB; l++) { 1264 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1265 #if defined (PETSC_USE_CTABLE) 1266 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr); 1267 if (tcol) { 1268 #else 1269 if ((tcol = cmap_i[ctmp])) { 1270 #endif 1271 *mat_j++ = tcol - 1; 1272 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1273 mat_a += bs2; 1274 ilen_row++; 1275 } 1276 } else break; 1277 } 1278 imark = l; 1279 for (l=0; l<nzA; l++) { 1280 #if defined (PETSC_USE_CTABLE) 1281 ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr); 1282 if (tcol) { 1283 #else 1284 if ((tcol = cmap_i[cstart + cworkA[l]])) { 1285 #endif 1286 *mat_j++ = tcol - 1; 1287 if (!ijonly) { 1288 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1289 mat_a += bs2; 1290 } 1291 ilen_row++; 1292 } 1293 } 1294 for (l=imark; l<nzB; l++) { 1295 #if defined (PETSC_USE_CTABLE) 1296 ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr); 1297 if (tcol) { 1298 #else 1299 if ((tcol = cmap_i[bmap[cworkB[l]]])) { 1300 #endif 1301 *mat_j++ = tcol - 1; 1302 if (!ijonly) { 1303 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1304 mat_a += bs2; 1305 } 1306 ilen_row++; 1307 } 1308 } 1309 } else { /* allcolumns */ 1310 for (l=0; l<nzB; l++) { 1311 if ((ctmp = bmap[cworkB[l]]) < cstart) { 1312 *mat_j++ = ctmp; 1313 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1314 mat_a += bs2; 1315 ilen_row++; 1316 } else break; 1317 } 1318 imark = l; 1319 for (l=0; l<nzA; l++) { 1320 *mat_j++ = cstart+cworkA[l]; 1321 if (!ijonly) { 1322 ierr = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1323 mat_a += bs2; 1324 } 1325 ilen_row++; 1326 } 1327 for (l=imark; l<nzB; l++) { 1328 *mat_j++ = bmap[cworkB[l]]; 1329 if (!ijonly) { 1330 ierr = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1331 mat_a += bs2; 1332 } 1333 ilen_row++; 1334 } 1335 } 1336 imat_ilen[row] = ilen_row; 1337 } 1338 } 1339 } 1340 } 1341 1342 /* Now assemble the off proc rows*/ 1343 { 1344 PetscInt *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen; 1345 PetscInt *imat_j,*imat_i; 1346 MatScalar *imat_a = PETSC_NULL,*rbuf4_i = PETSC_NULL; 1347 PetscMPIInt ii; 1348 1349 for (tmp2=0; tmp2<nrqs; tmp2++) { 1350 if (ijonly) { 1351 ii = tmp2; 1352 } else { 1353 ierr = MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);CHKERRQ(ierr); 1354 } 1355 idex = pa[ii]; 1356 sbuf1_i = sbuf1[idex]; 1357 jmax = sbuf1_i[0]; 1358 ct1 = 2*jmax + 1; 1359 ct2 = 0; 1360 rbuf2_i = rbuf2[ii]; 1361 rbuf3_i = rbuf3[ii]; 1362 if (!ijonly) rbuf4_i = rbuf4[ii]; 1363 for (j=1; j<=jmax; j++) { 1364 is_no = sbuf1_i[2*j-1]; 1365 if (!allcolumns[is_no]) cmap_i = cmap[is_no]; 1366 rmap_i = rmap[is_no]; 1367 mat = (Mat_SeqBAIJ*)submats[is_no]->data; 1368 imat_ilen = mat->ilen; 1369 imat_j = mat->j; 1370 imat_i = mat->i; 1371 if (!ijonly) imat_a = mat->a; 1372 max1 = sbuf1_i[2*j]; 1373 for (k=0; k<max1; k++,ct1++) { 1374 row = sbuf1_i[ct1]; 1375 #if defined (PETSC_USE_CTABLE) 1376 ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr); 1377 row--; 1378 if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); } 1379 #else 1380 row = rmap_i[row]; 1381 #endif 1382 ilen = imat_ilen[row]; 1383 mat_i = imat_i[row]; 1384 if (!ijonly) mat_a = imat_a + mat_i*bs2; 1385 mat_j = imat_j + mat_i; 1386 max2 = rbuf2_i[ct1]; 1387 1388 if (!allcolumns[is_no]) { 1389 for (l=0; l<max2; l++,ct2++) { 1390 #if defined (PETSC_USE_CTABLE) 1391 ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr); 1392 if (tcol) { 1393 #else 1394 if ((tcol = cmap_i[rbuf3_i[ct2]])) { 1395 #endif 1396 *mat_j++ = tcol - 1; 1397 if (!ijonly) { 1398 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1399 mat_a += bs2; 1400 } 1401 ilen++; 1402 } 1403 } 1404 } else { /* allcolumns */ 1405 for (l=0; l<max2; l++,ct2++) { 1406 *mat_j++ = rbuf3_i[ct2]; 1407 if (!ijonly) { 1408 ierr = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr); 1409 mat_a += bs2; 1410 } 1411 ilen++; 1412 } 1413 } 1414 imat_ilen[row] = ilen; 1415 } 1416 } 1417 } 1418 } 1419 if (!ijonly) { 1420 ierr = PetscFree(r_status4);CHKERRQ(ierr); 1421 ierr = PetscFree(r_waits4);CHKERRQ(ierr); 1422 if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);} 1423 ierr = PetscFree(s_waits4);CHKERRQ(ierr); 1424 ierr = PetscFree(s_status4);CHKERRQ(ierr); 1425 } 1426 1427 /* Restore the indices */ 1428 for (i=0; i<ismax; i++) { 1429 if (!allrows[i]) { 1430 ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr); 1431 } 1432 if (!allcolumns[i]) { 1433 ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr); 1434 } 1435 } 1436 1437 /* Destroy allocated memory */ 1438 #if defined(PETSC_USE_CTABLE) 1439 ierr = PetscFree4(irow,icol,nrow,ncol);CHKERRQ(ierr); 1440 #else 1441 ierr = PetscFree5(irow,icol,nrow,ncol,rtable);CHKERRQ(ierr); 1442 #endif 1443 ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr); 1444 ierr = PetscFree(pa);CHKERRQ(ierr); 1445 1446 ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr); 1447 ierr = PetscFree(sbuf1);CHKERRQ(ierr); 1448 ierr = PetscFree(rbuf2);CHKERRQ(ierr); 1449 for (i=0; i<nrqr; ++i) { 1450 ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr); 1451 } 1452 for (i=0; i<nrqs; ++i) { 1453 ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr); 1454 } 1455 ierr = PetscFree3(sbuf2,req_size,req_source);CHKERRQ(ierr); 1456 ierr = PetscFree(rbuf3);CHKERRQ(ierr); 1457 ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr); 1458 ierr = PetscFree(sbuf_aj);CHKERRQ(ierr); 1459 if (!ijonly) { 1460 for (i=0; i<nrqs; ++i) {ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);} 1461 ierr = PetscFree(rbuf4);CHKERRQ(ierr); 1462 ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr); 1463 ierr = PetscFree(sbuf_aa);CHKERRQ(ierr); 1464 } 1465 1466 #if defined (PETSC_USE_CTABLE) 1467 for (i=0; i<ismax; i++) { 1468 ierr = PetscTableDestroy((PetscTable*)&rmap[i]);CHKERRQ(ierr); 1469 } 1470 #endif 1471 ierr = PetscFree(rmap);CHKERRQ(ierr); 1472 1473 for (i=0; i<ismax; i++) { 1474 if (!allcolumns[i]) { 1475 #if defined (PETSC_USE_CTABLE) 1476 ierr = PetscTableDestroy((PetscTable*)&cmap[i]);CHKERRQ(ierr); 1477 #else 1478 ierr = PetscFree(cmap[i]);CHKERRQ(ierr); 1479 #endif 1480 } 1481 } 1482 ierr = PetscFree(cmap);CHKERRQ(ierr); 1483 ierr = PetscFree(lens);CHKERRQ(ierr); 1484 1485 for (i=0; i<ismax; i++) { 1486 ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1487 ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr); 1488 } 1489 1490 c->ijonly = PETSC_FALSE; /* set back to the default */ 1491 PetscFunctionReturn(0); 1492 } 1493 1494