xref: /petsc/src/mat/impls/baij/mpi/baijov.c (revision fdb35ca6b8bfd78facfcbaad2994e8eeeb8f8a19)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix
4   and to find submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/baij/mpi/mpibaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**);
10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*);
11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ"
16 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov)
17 {
18   PetscErrorCode ierr;
19   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs;
20   IS             *is_new;
21 
22   PetscFunctionBegin;
23   ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr);
24   /* Convert the indices into block format */
25   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);CHKERRQ(ierr);
26   if (ov < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");
27   for (i=0; i<ov; ++i) {
28     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr);
29   }
30   for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
31   ierr = ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);CHKERRQ(ierr);
32   for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
33   ierr = PetscFree(is_new);CHKERRQ(ierr);
34   PetscFunctionReturn(0);
35 }
36 
37 /*
38   Sample message format:
39   If a processor A wants processor B to process some elements corresponding
40   to index sets is[1], is[5]
41   mesg [0] = 2   (no of index sets in the mesg)
42   -----------
43   mesg [1] = 1 => is[1]
44   mesg [2] = sizeof(is[1]);
45   -----------
46   mesg [5] = 5  => is[5]
47   mesg [6] = sizeof(is[5]);
48   -----------
49   mesg [7]
50   mesg [n]  data(is[1])
51   -----------
52   mesg[n+1]
53   mesg[m]  data(is[5])
54   -----------
55 
56   Notes:
57   nrqs - no of requests sent (or to be sent out)
58   nrqr - no of requests recieved (which have to be or which have been processed
59 */
60 #undef __FUNCT__
61 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Once"
62 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[])
63 {
64   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
65   const PetscInt **idx,*idx_i;
66   PetscInt       *n,*w3,*w4,**data,len;
67   PetscErrorCode ierr;
68   PetscMPIInt    size,rank,tag1,tag2,*w2,*w1,nrqr;
69   PetscInt       Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr;
70   PetscInt       *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p;
71   PetscMPIInt    *onodes1,*olengths1,*onodes2,*olengths2;
72   PetscBT        *table;
73   MPI_Comm       comm;
74   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
75   MPI_Status     *s_status,*recv_status;
76   char           *t_p;
77 
78   PetscFunctionBegin;
79   comm = ((PetscObject)C)->comm;
80   size = c->size;
81   rank = c->rank;
82   Mbs  = c->Mbs;
83 
84   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
85   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
86 
87   ierr = PetscMalloc2(imax+1,const PetscInt*,&idx,imax,PetscInt,&n);CHKERRQ(ierr);
88 
89   for (i=0; i<imax; i++) {
90     ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr);
91     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
92   }
93 
94   /* evaluate communication - mesg to who,length of mesg, and buffer space
95      required. Based on this, buffers are allocated, and data copied into them*/
96   ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr);
97   ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
98   ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
99   ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr);
100   for (i=0; i<imax; i++) {
101     ierr  = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/
102     idx_i = idx[i];
103     len   = n[i];
104     for (j=0; j<len; j++) {
105       row = idx_i[j];
106       if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries");
107       ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr);
108       w4[proc]++;
109     }
110     for (j=0; j<size; j++) {
111       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
112     }
113   }
114 
115   nrqs     = 0;              /* no of outgoing messages */
116   msz      = 0;              /* total mesg length (for all proc */
117   w1[rank] = 0;              /* no mesg sent to itself */
118   w3[rank] = 0;
119   for (i=0; i<size; i++) {
120     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
121   }
122   /* pa - is list of processors to communicate with */
123   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr);
124   for (i=0,j=0; i<size; i++) {
125     if (w1[i]) {pa[j] = i; j++;}
126   }
127 
128   /* Each message would have a header = 1 + 2*(no of IS) + data */
129   for (i=0; i<nrqs; i++) {
130     j      = pa[i];
131     w1[j] += w2[j] + 2*w3[j];
132     msz   += w1[j];
133   }
134 
135   /* Determine the number of messages to expect, their lengths, from from-ids */
136   ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
137   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
138 
139   /* Now post the Irecvs corresponding to these messages */
140   ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr);
141 
142   /* Allocate Memory for outgoing messages */
143   ierr = PetscMalloc4(size,PetscInt*,&outdat,size,PetscInt*,&ptr,msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr);
144   ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr);
145   ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
146   {
147     PetscInt *iptr = tmp,ict  = 0;
148     for (i=0; i<nrqs; i++) {
149       j         = pa[i];
150       iptr     +=  ict;
151       outdat[j] = iptr;
152       ict       = w1[j];
153     }
154   }
155 
156   /* Form the outgoing messages */
157   /*plug in the headers*/
158   for (i=0; i<nrqs; i++) {
159     j            = pa[i];
160     outdat[j][0] = 0;
161     ierr         = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
162     ptr[j]       = outdat[j] + 2*w3[j] + 1;
163   }
164 
165   /* Memory for doing local proc's work*/
166   {
167     ierr = PetscMalloc5(imax,PetscBT,&table, imax,PetscInt*,&data, imax,PetscInt,&isz,
168                         Mbs*imax,PetscInt,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,char,&t_p);CHKERRQ(ierr);
169     ierr = PetscMemzero(table,imax*sizeof(PetscBT));CHKERRQ(ierr);
170     ierr = PetscMemzero(data,imax*sizeof(PetscInt*));CHKERRQ(ierr);
171     ierr = PetscMemzero(isz,imax*sizeof(PetscInt));CHKERRQ(ierr);
172     ierr = PetscMemzero(d_p,Mbs*imax*sizeof(PetscInt));CHKERRQ(ierr);
173     ierr = PetscMemzero(t_p,(Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char));CHKERRQ(ierr);
174 
175     for (i=0; i<imax; i++) {
176       table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
177       data[i]  = d_p + (Mbs)*i;
178     }
179   }
180 
181   /* Parse the IS and update local tables and the outgoing buf with the data*/
182   {
183     PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j;
184     PetscBT  table_i;
185 
186     for (i=0; i<imax; i++) {
187       ierr    = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
188       n_i     = n[i];
189       table_i = table[i];
190       idx_i   = idx[i];
191       data_i  = data[i];
192       isz_i   = isz[i];
193       for (j=0; j<n_i; j++) {   /* parse the indices of each IS */
194         row  = idx_i[j];
195         ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr);
196         if (proc != rank) { /* copy to the outgoing buffer */
197           ctr[proc]++;
198           *ptr[proc] = row;
199           ptr[proc]++;
200         } else { /* Update the local table */
201           if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row;
202         }
203       }
204       /* Update the headers for the current IS */
205       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
206         if ((ctr_j = ctr[j])) {
207           outdat_j        = outdat[j];
208           k               = ++outdat_j[0];
209           outdat_j[2*k]   = ctr_j;
210           outdat_j[2*k-1] = i;
211         }
212       }
213       isz[i] = isz_i;
214     }
215   }
216 
217   /*  Now  post the sends */
218   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
219   for (i=0; i<nrqs; ++i) {
220     j    = pa[i];
221     ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr);
222   }
223 
224   /* No longer need the original indices*/
225   for (i=0; i<imax; ++i) {
226     ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr);
227   }
228   ierr = PetscFree2(idx,n);CHKERRQ(ierr);
229 
230   for (i=0; i<imax; ++i) {
231     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
232   }
233 
234   /* Do Local work*/
235   ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr);
236 
237   /* Receive messages*/
238   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr);
239   if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);}
240 
241   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
242   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
243 
244   /* Phase 1 sends are complete - deallocate buffers */
245   ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr);
246   ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
247 
248   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);CHKERRQ(ierr);
249   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);CHKERRQ(ierr);
250   ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr);
251   ierr = PetscFree(rbuf[0]);CHKERRQ(ierr);
252   ierr = PetscFree(rbuf);CHKERRQ(ierr);
253 
254   /* Send the data back*/
255   /* Do a global reduction to know the buffer space req for incoming messages*/
256   {
257     PetscMPIInt *rw1;
258 
259     ierr = PetscMalloc(size*sizeof(PetscInt),&rw1);CHKERRQ(ierr);
260     ierr = PetscMemzero(rw1,size*sizeof(PetscInt));CHKERRQ(ierr);
261 
262     for (i=0; i<nrqr; ++i) {
263       proc = recv_status[i].MPI_SOURCE;
264       if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch");
265       rw1[proc] = isz1[i];
266     }
267 
268     ierr = PetscFree(onodes1);CHKERRQ(ierr);
269     ierr = PetscFree(olengths1);CHKERRQ(ierr);
270 
271     /* Determine the number of messages to expect, their lengths, from from-ids */
272     ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr);
273     ierr = PetscFree(rw1);CHKERRQ(ierr);
274   }
275   /* Now post the Irecvs corresponding to these messages */
276   ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr);
277 
278   /*  Now  post the sends */
279   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr);
280   for (i=0; i<nrqr; ++i) {
281     j    = recv_status[i].MPI_SOURCE;
282     ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr);
283   }
284 
285   /* receive work done on other processors*/
286   {
287     PetscMPIInt idex;
288     PetscInt    is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
289     PetscBT     table_i;
290     MPI_Status  *status2;
291 
292     ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr);
293     for (i=0; i<nrqs; ++i) {
294       ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr);
295       /* Process the message*/
296       rbuf2_i = rbuf2[idex];
297       ct1     = 2*rbuf2_i[0]+1;
298       jmax    = rbuf2[idex][0];
299       for (j=1; j<=jmax; j++) {
300         max     = rbuf2_i[2*j];
301         is_no   = rbuf2_i[2*j-1];
302         isz_i   = isz[is_no];
303         data_i  = data[is_no];
304         table_i = table[is_no];
305         for (k=0; k<max; k++,ct1++) {
306           row = rbuf2_i[ct1];
307           if (!PetscBTLookupSet(table_i,row)) data_i[isz_i++] = row;
308         }
309         isz[is_no] = isz_i;
310       }
311     }
312     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);}
313     ierr = PetscFree(status2);CHKERRQ(ierr);
314   }
315 
316   for (i=0; i<imax; ++i) {
317     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
318   }
319 
320 
321   ierr = PetscFree(onodes2);CHKERRQ(ierr);
322   ierr = PetscFree(olengths2);CHKERRQ(ierr);
323 
324   ierr = PetscFree(pa);CHKERRQ(ierr);
325   ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr);
326   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
327   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
328   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
329   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
330   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
331   ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr);
332   ierr = PetscFree(s_status);CHKERRQ(ierr);
333   ierr = PetscFree(recv_status);CHKERRQ(ierr);
334   ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
335   ierr = PetscFree(xdata);CHKERRQ(ierr);
336   ierr = PetscFree(isz1);CHKERRQ(ierr);
337   PetscFunctionReturn(0);
338 }
339 
340 #undef __FUNCT__
341 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Local"
342 /*
343    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
344        the work on the local processor.
345 
346      Inputs:
347       C      - MAT_MPIBAIJ;
348       imax - total no of index sets processed at a time;
349       table  - an array of char - size = Mbs bits.
350 
351      Output:
352       isz    - array containing the count of the solution elements corresponding
353                to each index set;
354       data   - pointer to the solutions
355 */
356 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data)
357 {
358   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
359   Mat         A  = c->A,B = c->B;
360   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
361   PetscInt    start,end,val,max,rstart,cstart,*ai,*aj;
362   PetscInt    *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
363   PetscBT     table_i;
364 
365   PetscFunctionBegin;
366   rstart = c->rstartbs;
367   cstart = c->cstartbs;
368   ai     = a->i;
369   aj     = a->j;
370   bi     = b->i;
371   bj     = b->j;
372   garray = c->garray;
373 
374 
375   for (i=0; i<imax; i++) {
376     data_i  = data[i];
377     table_i = table[i];
378     isz_i   = isz[i];
379     for (j=0,max=isz[i]; j<max; j++) {
380       row   = data_i[j] - rstart;
381       start = ai[row];
382       end   = ai[row+1];
383       for (k=start; k<end; k++) { /* Amat */
384         val = aj[k] + cstart;
385         if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val;
386       }
387       start = bi[row];
388       end   = bi[row+1];
389       for (k=start; k<end; k++) { /* Bmat */
390         val = garray[bj[k]];
391         if (!PetscBTLookupSet(table_i,val)) data_i[isz_i++] = val;
392       }
393     }
394     isz[i] = isz_i;
395   }
396   PetscFunctionReturn(0);
397 }
398 #undef __FUNCT__
399 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Receive"
400 /*
401       MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
402          and return the output
403 
404          Input:
405            C    - the matrix
406            nrqr - no of messages being processed.
407            rbuf - an array of pointers to the recieved requests
408 
409          Output:
410            xdata - array of messages to be sent back
411            isz1  - size of each message
412 
413   For better efficiency perhaps we should malloc separately each xdata[i],
414 then if a remalloc is required we need only copy the data for that one row
415 rather than all previous rows as it is now where a single large chunck of
416 memory is used.
417 
418 */
419 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1)
420 {
421   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
422   Mat            A  = c->A,B = c->B;
423   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
424   PetscErrorCode ierr;
425   PetscInt       rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
426   PetscInt       row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
427   PetscInt       val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
428   PetscInt       *rbuf_i,kmax,rbuf_0;
429   PetscBT        xtable;
430 
431   PetscFunctionBegin;
432   Mbs    = c->Mbs;
433   rstart = c->rstartbs;
434   cstart = c->cstartbs;
435   ai     = a->i;
436   aj     = a->j;
437   bi     = b->i;
438   bj     = b->j;
439   garray = c->garray;
440 
441 
442   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
443     rbuf_i =  rbuf[i];
444     rbuf_0 =  rbuf_i[0];
445     ct    += rbuf_0;
446     for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j];
447   }
448 
449   if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
450   else        max1 = 1;
451   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
452   ierr         = PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);CHKERRQ(ierr);
453   ++no_malloc;
454   ierr = PetscBTCreate(Mbs,&xtable);CHKERRQ(ierr);
455   ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr);
456 
457   ct3 = 0;
458   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
459     rbuf_i =  rbuf[i];
460     rbuf_0 =  rbuf_i[0];
461     ct1    =  2*rbuf_0+1;
462     ct2    =  ct1;
463     ct3   += ct1;
464     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
465       ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr);
466       oct2 = ct2;
467       kmax = rbuf_i[2*j];
468       for (k=0; k<kmax; k++,ct1++) {
469         row = rbuf_i[ct1];
470         if (!PetscBTLookupSet(xtable,row)) {
471           if (!(ct3 < mem_estimate)) {
472             new_estimate = (PetscInt)(1.5*mem_estimate)+1;
473             ierr         = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
474             ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
475             ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
476             xdata[0]     = tmp;
477             mem_estimate = new_estimate; ++no_malloc;
478             for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
479           }
480           xdata[i][ct2++] = row;
481           ct3++;
482         }
483       }
484       for (k=oct2,max2=ct2; k<max2; k++)  {
485         row   = xdata[i][k] - rstart;
486         start = ai[row];
487         end   = ai[row+1];
488         for (l=start; l<end; l++) {
489           val = aj[l] + cstart;
490           if (!PetscBTLookupSet(xtable,val)) {
491             if (!(ct3 < mem_estimate)) {
492               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
493               ierr         = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
494               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
495               ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
496               xdata[0]     = tmp;
497               mem_estimate = new_estimate; ++no_malloc;
498               for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
499             }
500             xdata[i][ct2++] = val;
501             ct3++;
502           }
503         }
504         start = bi[row];
505         end   = bi[row+1];
506         for (l=start; l<end; l++) {
507           val = garray[bj[l]];
508           if (!PetscBTLookupSet(xtable,val)) {
509             if (!(ct3 < mem_estimate)) {
510               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
511               ierr         = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
512               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
513               ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
514               xdata[0]     = tmp;
515               mem_estimate = new_estimate; ++no_malloc;
516               for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
517             }
518             xdata[i][ct2++] = val;
519             ct3++;
520           }
521         }
522       }
523       /* Update the header*/
524       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
525       xdata[i][2*j-1] = rbuf_i[2*j-1];
526     }
527     xdata[i][0] = rbuf_0;
528     xdata[i+1]  = xdata[i] + ct2;
529     isz1[i]     = ct2; /* size of each message */
530   }
531   ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr);
532   ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr);
533   PetscFunctionReturn(0);
534 }
535 
536 #undef __FUNCT__
537 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ"
538 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
539 {
540   IS             *isrow_new,*iscol_new;
541   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
542   PetscErrorCode ierr;
543   PetscInt       nmax,nstages_local,nstages,i,pos,max_no,ncol,nrow,N=C->cmap->N,bs=C->rmap->bs;
544   PetscBool      colflag,*allcolumns,*allrows;
545 
546   PetscFunctionBegin;
547   /* Currently, unsorted column indices will result in inverted column indices in the resulting submatrices. */
548   for (i = 0; i < ismax; ++i) {
549     PetscBool sorted;
550     ierr = ISSorted(iscol[i], &sorted);CHKERRQ(ierr);
551     if (!sorted) SETERRQ1(((PetscObject)iscol[i])->comm, PETSC_ERR_SUP, "Column index set %D not sorted", i);
552   }
553   /* The compression and expansion should be avoided. Doesn't point
554      out errors, might change the indices, hence buggey */
555   ierr = PetscMalloc2(ismax+1,IS,&isrow_new,ismax+1,IS,&iscol_new);CHKERRQ(ierr);
556   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_new);CHKERRQ(ierr);
557   ierr = ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_new);CHKERRQ(ierr);
558 
559   /* Check for special case: each processor gets entire matrix columns */
560   ierr = PetscMalloc2(ismax+1,PetscBool,&allcolumns,ismax+1,PetscBool,&allrows);CHKERRQ(ierr);
561   for (i=0; i<ismax; i++) {
562     ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr);
563     ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr);
564     if (colflag && ncol == C->cmap->N) allcolumns[i] = PETSC_TRUE;
565     else allcolumns[i] = PETSC_FALSE;
566 
567     ierr = ISIdentity(isrow[i],&colflag);CHKERRQ(ierr);
568     ierr = ISGetLocalSize(isrow[i],&nrow);CHKERRQ(ierr);
569     if (colflag && nrow == C->rmap->N) allrows[i] = PETSC_TRUE;
570     else allrows[i] = PETSC_FALSE;
571   }
572 
573   /* Allocate memory to hold all the submatrices */
574   if (scall != MAT_REUSE_MATRIX) {
575     ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr);
576   }
577   /* Determine the number of stages through which submatrices are done */
578   nmax = 20*1000000 / (c->Nbs * sizeof(PetscInt));
579   if (!nmax) nmax = 1;
580   nstages_local = ismax/nmax + ((ismax % nmax) ? 1 : 0);
581 
582   /* Make sure every processor loops through the nstages */
583   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr);
584   for (i=0,pos=0; i<nstages; i++) {
585     if (pos+nmax <= ismax) max_no = nmax;
586     else if (pos == ismax) max_no = 0;
587     else                   max_no = ismax-pos;
588     ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);CHKERRQ(ierr);
589     pos += max_no;
590   }
591 
592   for (i=0; i<ismax; i++) {
593     ierr = ISDestroy(&isrow_new[i]);CHKERRQ(ierr);
594     ierr = ISDestroy(&iscol_new[i]);CHKERRQ(ierr);
595   }
596   ierr = PetscFree2(isrow_new,iscol_new);CHKERRQ(ierr);
597   ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr);
598   PetscFunctionReturn(0);
599 }
600 
601 #if defined(PETSC_USE_CTABLE)
602 #undef __FUNCT__
603 #define __FUNCT__ "PetscGetProc"
604 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
605 {
606   PetscInt       nGlobalNd = proc_gnode[size];
607   PetscMPIInt    fproc;
608   PetscErrorCode ierr;
609 
610   PetscFunctionBegin;
611   ierr = PetscMPIIntCast((PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)),&fproc);CHKERRQ(ierr);
612   if (fproc > size) fproc = size;
613   while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) {
614     if (row < proc_gnode[fproc]) fproc--;
615     else                         fproc++;
616   }
617   *rank = fproc;
618   PetscFunctionReturn(0);
619 }
620 #endif
621 
622 /* -------------------------------------------------------------------------*/
623 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
624 #undef __FUNCT__
625 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ_local"
626 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats)
627 {
628   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
629   Mat            A  = c->A;
630   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat;
631   const PetscInt **irow,**icol,*irow_i;
632   PetscInt       *nrow,*ncol,*w3,*w4,start;
633   PetscErrorCode ierr;
634   PetscMPIInt    size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc;
635   PetscInt       **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row;
636   PetscInt       nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol;
637   PetscInt       **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2;
638   PetscInt       **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax;
639   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
640   PetscInt       bs     =C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB;
641   PetscInt       cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
642   PetscInt       *bmap  = c->garray,ctmp,rstart=c->rstartbs;
643   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3;
644   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3;
645   MPI_Comm       comm;
646   PetscBool      flag;
647   PetscMPIInt    *onodes1,*olengths1;
648   PetscBool      ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */
649 
650   /* variables below are used for the matrix numerical values - case of !ijonly */
651   MPI_Request *r_waits4,*s_waits4;
652   MPI_Status  *r_status4,*s_status4;
653   MatScalar   **rbuf4,**sbuf_aa,*vals,*mat_a = PETSC_NULL,*sbuf_aa_i,*vworkA = PETSC_NULL,*vworkB = PETSC_NULL;
654   MatScalar   *a_a=a->a,*b_a=b->a;
655 
656 #if defined(PETSC_USE_CTABLE)
657   PetscInt   tt;
658   PetscTable *rmap,*cmap,rmap_i,cmap_i=PETSC_NULL;
659 #else
660   PetscInt **cmap,*cmap_i=PETSC_NULL,*rtable,*rmap_i,**rmap, Mbs = c->Mbs;
661 #endif
662 
663   PetscFunctionBegin;
664   comm = ((PetscObject)C)->comm;
665   tag0 = ((PetscObject)C)->tag;
666   size = c->size;
667   rank = c->rank;
668 
669   /* Get some new tags to keep the communication clean */
670   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
671   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
672   ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr);
673 
674 #if defined(PETSC_USE_CTABLE)
675   ierr = PetscMalloc4(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol);CHKERRQ(ierr);
676 #else
677   ierr = PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,Mbs+1,PetscInt,&rtable);CHKERRQ(ierr);
678   /* Create hash table for the mapping :row -> proc*/
679   for (i=0,j=0; i<size; i++) {
680     jmax = C->rmap->range[i+1]/bs;
681     for (; j<jmax; j++) rtable[j] = i;
682   }
683 #endif
684 
685   for (i=0; i<ismax; i++) {
686     if (allrows[i]) {
687       irow[i] = PETSC_NULL;
688       nrow[i] = C->rmap->N/bs;
689     } else {
690       ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
691       ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
692     }
693 
694     if (allcolumns[i]) {
695       icol[i] = PETSC_NULL;
696       ncol[i] = C->cmap->N/bs;
697     } else {
698       ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
699       ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
700     }
701   }
702 
703   /* evaluate communication - mesg to who,length of mesg,and buffer space
704      required. Based on this, buffers are allocated, and data copied into them*/
705   ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr);
706   ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
707   ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
708   ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr);
709   for (i=0; i<ismax; i++) {
710     ierr   = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/
711     jmax   = nrow[i];
712     irow_i = irow[i];
713     for (j=0; j<jmax; j++) {
714       if (allrows[i]) row = j;
715       else row = irow_i[j];
716 
717 #if defined(PETSC_USE_CTABLE)
718       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
719 #else
720       proc = rtable[row];
721 #endif
722       w4[proc]++;
723     }
724     for (j=0; j<size; j++) {
725       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
726     }
727   }
728 
729   nrqs     = 0;              /* no of outgoing messages */
730   msz      = 0;              /* total mesg length for all proc */
731   w1[rank] = 0;              /* no mesg sent to intself */
732   w3[rank] = 0;
733   for (i=0; i<size; i++) {
734     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
735   }
736   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/
737   for (i=0,j=0; i<size; i++) {
738     if (w1[i]) { pa[j] = i; j++; }
739   }
740 
741   /* Each message would have a header = 1 + 2*(no of IS) + data */
742   for (i=0; i<nrqs; i++) {
743     j     = pa[i];
744     w1[j] += w2[j] + 2* w3[j];
745     msz   += w1[j];
746   }
747 
748   /* Determine the number of messages to expect, their lengths, from from-ids */
749   ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
750   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
751 
752   /* Now post the Irecvs corresponding to these messages */
753   ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr);
754 
755   ierr = PetscFree(onodes1);CHKERRQ(ierr);
756   ierr = PetscFree(olengths1);CHKERRQ(ierr);
757 
758   /* Allocate Memory for outgoing messages */
759   ierr = PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr);
760   ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr);
761   ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
762   {
763     PetscInt *iptr = tmp,ict = 0;
764     for (i=0; i<nrqs; i++) {
765       j        = pa[i];
766       iptr    += ict;
767       sbuf1[j] = iptr;
768       ict      = w1[j];
769     }
770   }
771 
772   /* Form the outgoing messages */
773   /* Initialise the header space */
774   for (i=0; i<nrqs; i++) {
775     j           = pa[i];
776     sbuf1[j][0] = 0;
777     ierr        = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
778     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
779   }
780 
781   /* Parse the isrow and copy data into outbuf */
782   for (i=0; i<ismax; i++) {
783     ierr   = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
784     irow_i = irow[i];
785     jmax   = nrow[i];
786     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
787       if (allrows[i]) row = j;
788       else row = irow_i[j];
789 
790 #if defined(PETSC_USE_CTABLE)
791       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
792 #else
793       proc = rtable[row];
794 #endif
795       if (proc != rank) { /* copy to the outgoing buf*/
796         ctr[proc]++;
797         *ptr[proc] = row;
798         ptr[proc]++;
799       }
800     }
801     /* Update the headers for the current IS */
802     for (j=0; j<size; j++) { /* Can Optimise this loop too */
803       if ((ctr_j = ctr[j])) {
804         sbuf1_j        = sbuf1[j];
805         k              = ++sbuf1_j[0];
806         sbuf1_j[2*k]   = ctr_j;
807         sbuf1_j[2*k-1] = i;
808       }
809     }
810   }
811 
812   /*  Now  post the sends */
813   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
814   for (i=0; i<nrqs; ++i) {
815     j    = pa[i];
816     ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr);
817   }
818 
819   /* Post Recieves to capture the buffer size */
820   ierr     = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr);
821   ierr     = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);CHKERRQ(ierr);
822   rbuf2[0] = tmp + msz;
823   for (i=1; i<nrqs; ++i) {
824     j        = pa[i];
825     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
826   }
827   for (i=0; i<nrqs; ++i) {
828     j    = pa[i];
829     ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr);
830   }
831 
832   /* Send to other procs the buf size they should allocate */
833 
834   /* Receive messages*/
835   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr);
836   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr);
837   ierr = PetscMalloc3(nrqr+1,PetscInt*,&sbuf2,nrqr,PetscInt,&req_size,nrqr,PetscInt,&req_source);CHKERRQ(ierr);
838   {
839     Mat_SeqBAIJ *sA  = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data;
840     PetscInt    *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i;
841 
842     for (i=0; i<nrqr; ++i) {
843       ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr);
844 
845       req_size[idex] = 0;
846       rbuf1_i        = rbuf1[idex];
847       start          = 2*rbuf1_i[0] + 1;
848       ierr           = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr);
849       ierr           = PetscMalloc(end*sizeof(PetscInt),&sbuf2[idex]);CHKERRQ(ierr);
850       sbuf2_i        = sbuf2[idex];
851       for (j=start; j<end; j++) {
852         id              = rbuf1_i[j] - rstart;
853         ncols           = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
854         sbuf2_i[j]      = ncols;
855         req_size[idex] += ncols;
856       }
857       req_source[idex] = r_status1[i].MPI_SOURCE;
858       /* form the header */
859       sbuf2_i[0] = req_size[idex];
860       for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j];
861       ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr);
862     }
863   }
864   ierr = PetscFree(r_status1);CHKERRQ(ierr);
865   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
866 
867   /*  recv buffer sizes */
868   /* Receive messages*/
869   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);CHKERRQ(ierr);
870   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr);
871   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr);
872   if (!ijonly) {
873     ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr);
874     ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr);
875   }
876 
877   for (i=0; i<nrqs; ++i) {
878     ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr);
879     ierr = PetscMalloc(rbuf2[idex][0]*sizeof(PetscInt),&rbuf3[idex]);CHKERRQ(ierr);
880     ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr);
881     if (!ijonly) {
882       ierr = PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);CHKERRQ(ierr);
883       ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr);
884     }
885   }
886   ierr = PetscFree(r_status2);CHKERRQ(ierr);
887   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
888 
889   /* Wait on sends1 and sends2 */
890   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr);
891   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr);
892 
893   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);}
894   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);}
895   ierr = PetscFree(s_status1);CHKERRQ(ierr);
896   ierr = PetscFree(s_status2);CHKERRQ(ierr);
897   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
898   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
899 
900   /* Now allocate buffers for a->j, and send them off */
901   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);CHKERRQ(ierr);
902   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
903   ierr = PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);CHKERRQ(ierr);
904   for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
905 
906   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr);
907   {
908     for (i=0; i<nrqr; i++) {
909       rbuf1_i   = rbuf1[i];
910       sbuf_aj_i = sbuf_aj[i];
911       ct1       = 2*rbuf1_i[0] + 1;
912       ct2       = 0;
913       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
914         kmax = rbuf1[i][2*j];
915         for (k=0; k<kmax; k++,ct1++) {
916           row    = rbuf1_i[ct1] - rstart;
917           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
918           ncols  = nzA + nzB;
919           cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
920 
921           /* load the column indices for this row into cols*/
922           cols = sbuf_aj_i + ct2;
923           for (l=0; l<nzB; l++) {
924             if ((ctmp = bmap[cworkB[l]]) < cstart) cols[l] = ctmp;
925             else break;
926           }
927           imark = l;
928           for (l=0; l<nzA; l++)   cols[imark+l] = cstart + cworkA[l];
929           for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
930           ct2 += ncols;
931         }
932       }
933       ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr);
934     }
935   }
936   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr);
937   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr);
938 
939   /* Allocate buffers for a->a, and send them off */
940   if (!ijonly) {
941     ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar*),&sbuf_aa);CHKERRQ(ierr);
942     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
943     ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr);
944     for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
945 
946     ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr);
947     {
948       for (i=0; i<nrqr; i++) {
949         rbuf1_i   = rbuf1[i];
950         sbuf_aa_i = sbuf_aa[i];
951         ct1       = 2*rbuf1_i[0]+1;
952         ct2       = 0;
953         for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
954           kmax = rbuf1_i[2*j];
955           for (k=0; k<kmax; k++,ct1++) {
956             row    = rbuf1_i[ct1] - rstart;
957             nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
958             ncols  = nzA + nzB;
959             cworkA = a_j + a_i[row];     cworkB = b_j + b_i[row];
960             vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;
961 
962             /* load the column values for this row into vals*/
963             vals = sbuf_aa_i+ct2*bs2;
964             for (l=0; l<nzB; l++) {
965               if ((bmap[cworkB[l]]) < cstart) {
966                 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
967               } else break;
968             }
969             imark = l;
970             for (l=0; l<nzA; l++) {
971               ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
972             }
973             for (l=imark; l<nzB; l++) {
974               ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
975             }
976             ct2 += ncols;
977           }
978         }
979         ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr);
980       }
981     }
982     ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr);
983     ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr);
984   }
985   ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr);
986   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
987 
988   /* Form the matrix */
989   /* create col map: global col of C -> local col of submatrices */
990   {
991     const PetscInt *icol_i;
992 #if defined(PETSC_USE_CTABLE)
993     ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&cmap);CHKERRQ(ierr);
994     for (i=0; i<ismax; i++) {
995       if (!allcolumns[i]) {
996         ierr   = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr);
997         jmax   = ncol[i];
998         icol_i = icol[i];
999         cmap_i = cmap[i];
1000         for (j=0; j<jmax; j++) {
1001           ierr = PetscTableAdd(cmap_i,icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
1002         }
1003       } else {
1004         cmap[i] = PETSC_NULL;
1005       }
1006     }
1007 #else
1008     ierr = PetscMalloc(ismax*sizeof(PetscInt*),&cmap);CHKERRQ(ierr);
1009     for (i=0; i<ismax; i++) {
1010       if (!allcolumns[i]) {
1011         ierr   = PetscMalloc(c->Nbs*sizeof(PetscInt),&cmap[i]);CHKERRQ(ierr);
1012         ierr   = PetscMemzero(cmap[i],c->Nbs*sizeof(PetscInt));CHKERRQ(ierr);
1013         jmax   = ncol[i];
1014         icol_i = icol[i];
1015         cmap_i = cmap[i];
1016         for (j=0; j<jmax; j++) {
1017           cmap_i[icol_i[j]] = j+1;
1018         }
1019       } else { /* allcolumns[i] */
1020         cmap[i] = PETSC_NULL;
1021       }
1022     }
1023 #endif
1024   }
1025 
1026   /* Create lens which is required for MatCreate... */
1027   for (i=0,j=0; i<ismax; i++) j += nrow[i];
1028   ierr    = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);CHKERRQ(ierr);
1029   lens[0] = (PetscInt*)(lens + ismax);
1030   ierr    = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr);
1031   for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1];
1032 
1033   /* Update lens from local data */
1034   for (i=0; i<ismax; i++) {
1035     jmax = nrow[i];
1036     if (!allcolumns[i]) cmap_i = cmap[i];
1037     irow_i = irow[i];
1038     lens_i = lens[i];
1039     for (j=0; j<jmax; j++) {
1040       if (allrows[i]) row = j;
1041       else row = irow_i[j];
1042 
1043 #if defined(PETSC_USE_CTABLE)
1044       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
1045 #else
1046       proc = rtable[row];
1047 #endif
1048       if (proc == rank) {
1049         /* Get indices from matA and then from matB */
1050         row    = row - rstart;
1051         nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1052         cworkA =  a_j + a_i[row]; cworkB = b_j + b_i[row];
1053         if (!allcolumns[i]) {
1054 #if defined(PETSC_USE_CTABLE)
1055           for (k=0; k<nzA; k++) {
1056             ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr);
1057             if (tt) lens_i[j]++;
1058           }
1059           for (k=0; k<nzB; k++) {
1060             ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr);
1061             if (tt) lens_i[j]++;
1062           }
1063 
1064 #else
1065           for (k=0; k<nzA; k++) {
1066             if (cmap_i[cstart + cworkA[k]]) lens_i[j]++;
1067           }
1068           for (k=0; k<nzB; k++) {
1069             if (cmap_i[bmap[cworkB[k]]]) lens_i[j]++;
1070           }
1071 #endif
1072         } else { /* allcolumns */
1073           lens_i[j] = nzA + nzB;
1074         }
1075       }
1076     }
1077   }
1078 #if defined(PETSC_USE_CTABLE)
1079   /* Create row map*/
1080   ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rmap);CHKERRQ(ierr);
1081   for (i=0; i<ismax; i++) {
1082     ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr);
1083   }
1084 #else
1085   /* Create row map*/
1086   ierr    = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);CHKERRQ(ierr);
1087   rmap[0] = (PetscInt*)(rmap + ismax);
1088   ierr    = PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));CHKERRQ(ierr);
1089   for (i=1; i<ismax; i++) rmap[i] = rmap[i-1] + Mbs;
1090 #endif
1091   for (i=0; i<ismax; i++) {
1092     irow_i = irow[i];
1093     jmax   = nrow[i];
1094 #if defined(PETSC_USE_CTABLE)
1095     rmap_i = rmap[i];
1096     for (j=0; j<jmax; j++) {
1097       if (allrows[i]) {
1098         ierr = PetscTableAdd(rmap_i,j+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
1099       } else {
1100         ierr = PetscTableAdd(rmap_i,irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
1101       }
1102     }
1103 #else
1104     rmap_i = rmap[i];
1105     for (j=0; j<jmax; j++) {
1106       if (allrows[i]) rmap_i[j] = j;
1107       else rmap_i[irow_i[j]] = j;
1108     }
1109 #endif
1110   }
1111 
1112   /* Update lens from offproc data */
1113   {
1114     PetscInt    *rbuf2_i,*rbuf3_i,*sbuf1_i;
1115     PetscMPIInt ii;
1116 
1117     for (tmp2=0; tmp2<nrqs; tmp2++) {
1118       ierr    = MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);CHKERRQ(ierr);
1119       idex    = pa[ii];
1120       sbuf1_i = sbuf1[idex];
1121       jmax    = sbuf1_i[0];
1122       ct1     = 2*jmax+1;
1123       ct2     = 0;
1124       rbuf2_i = rbuf2[ii];
1125       rbuf3_i = rbuf3[ii];
1126       for (j=1; j<=jmax; j++) {
1127         is_no  = sbuf1_i[2*j-1];
1128         max1   = sbuf1_i[2*j];
1129         lens_i = lens[is_no];
1130         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1131         rmap_i = rmap[is_no];
1132         for (k=0; k<max1; k++,ct1++) {
1133 #if defined(PETSC_USE_CTABLE)
1134           ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr);
1135           row--;
1136           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1137 #else
1138           row = rmap_i[sbuf1_i[ct1]];  /* the val in the new matrix to be */
1139 #endif
1140           max2 = rbuf2_i[ct1];
1141           for (l=0; l<max2; l++,ct2++) {
1142             if (!allcolumns[is_no]) {
1143 #if defined(PETSC_USE_CTABLE)
1144               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr);
1145               if (tt) lens_i[row]++;
1146 #else
1147               if (cmap_i[rbuf3_i[ct2]]) lens_i[row]++;
1148 #endif
1149             } else { /* allcolumns */
1150               lens_i[row]++;
1151             }
1152           }
1153         }
1154       }
1155     }
1156   }
1157   ierr = PetscFree(r_status3);CHKERRQ(ierr);
1158   ierr = PetscFree(r_waits3);CHKERRQ(ierr);
1159   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);}
1160   ierr = PetscFree(s_status3);CHKERRQ(ierr);
1161   ierr = PetscFree(s_waits3);CHKERRQ(ierr);
1162 
1163   /* Create the submatrices */
1164   if (scall == MAT_REUSE_MATRIX) {
1165     if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet");
1166     /*
1167         Assumes new rows are same length as the old rows, hence bug!
1168     */
1169     for (i=0; i<ismax; i++) {
1170       mat = (Mat_SeqBAIJ*)(submats[i]->data);
1171       if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
1172       ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);CHKERRQ(ierr);
1173       if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros");
1174       /* Initial matrix as if empty */
1175       ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));CHKERRQ(ierr);
1176 
1177       submats[i]->factortype = C->factortype;
1178     }
1179   } else {
1180     PetscInt bs_tmp;
1181     if (ijonly) bs_tmp = 1;
1182     else        bs_tmp = bs;
1183     for (i=0; i<ismax; i++) {
1184       ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr);
1185       ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);CHKERRQ(ierr);
1186       ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr);
1187       ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr);
1188       ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */
1189     }
1190   }
1191 
1192   /* Assemble the matrices */
1193   /* First assemble the local rows */
1194   {
1195     PetscInt  ilen_row,*imat_ilen,*imat_j,*imat_i;
1196     MatScalar *imat_a = PETSC_NULL;
1197 
1198     for (i=0; i<ismax; i++) {
1199       mat       = (Mat_SeqBAIJ*)submats[i]->data;
1200       imat_ilen = mat->ilen;
1201       imat_j    = mat->j;
1202       imat_i    = mat->i;
1203       if (!ijonly) imat_a = mat->a;
1204       if (!allcolumns[i]) cmap_i = cmap[i];
1205       rmap_i = rmap[i];
1206       irow_i = irow[i];
1207       jmax   = nrow[i];
1208       for (j=0; j<jmax; j++) {
1209         if (allrows[i]) row = j;
1210         else row = irow_i[j];
1211 
1212 #if defined(PETSC_USE_CTABLE)
1213         ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
1214 #else
1215         proc = rtable[row];
1216 #endif
1217         if (proc == rank) {
1218           row    = row - rstart;
1219           nzA    = a_i[row+1] - a_i[row];
1220           nzB    = b_i[row+1] - b_i[row];
1221           cworkA = a_j + a_i[row];
1222           cworkB = b_j + b_i[row];
1223           if (!ijonly) {
1224             vworkA = a_a + a_i[row]*bs2;
1225             vworkB = b_a + b_i[row]*bs2;
1226           }
1227 #if defined(PETSC_USE_CTABLE)
1228           ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr);
1229           row--;
1230           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1231 #else
1232           row = rmap_i[row + rstart];
1233 #endif
1234           mat_i = imat_i[row];
1235           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1236           mat_j    = imat_j + mat_i;
1237           ilen_row = imat_ilen[row];
1238 
1239           /* load the column indices for this row into cols*/
1240           if (!allcolumns[i]) {
1241             for (l=0; l<nzB; l++) {
1242               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1243 #if defined(PETSC_USE_CTABLE)
1244                 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr);
1245                 if (tcol) {
1246 #else
1247                 if ((tcol = cmap_i[ctmp])) {
1248 #endif
1249                   *mat_j++ = tcol - 1;
1250                   ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1251                   mat_a   += bs2;
1252                   ilen_row++;
1253                 }
1254               } else break;
1255             }
1256             imark = l;
1257             for (l=0; l<nzA; l++) {
1258 #if defined(PETSC_USE_CTABLE)
1259               ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr);
1260               if (tcol) {
1261 #else
1262               if ((tcol = cmap_i[cstart + cworkA[l]])) {
1263 #endif
1264                 *mat_j++ = tcol - 1;
1265                 if (!ijonly) {
1266                   ierr   = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1267                   mat_a += bs2;
1268                 }
1269                 ilen_row++;
1270               }
1271             }
1272             for (l=imark; l<nzB; l++) {
1273 #if defined(PETSC_USE_CTABLE)
1274               ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr);
1275               if (tcol) {
1276 #else
1277               if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1278 #endif
1279                 *mat_j++ = tcol - 1;
1280                 if (!ijonly) {
1281                   ierr   = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1282                   mat_a += bs2;
1283                 }
1284                 ilen_row++;
1285               }
1286             }
1287           } else { /* allcolumns */
1288             for (l=0; l<nzB; l++) {
1289               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1290                 *mat_j++ = ctmp;
1291                 ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1292                 mat_a   += bs2;
1293                 ilen_row++;
1294               } else break;
1295             }
1296             imark = l;
1297             for (l=0; l<nzA; l++) {
1298               *mat_j++ = cstart+cworkA[l];
1299               if (!ijonly) {
1300                 ierr   = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1301                 mat_a += bs2;
1302               }
1303               ilen_row++;
1304             }
1305             for (l=imark; l<nzB; l++) {
1306               *mat_j++ = bmap[cworkB[l]];
1307               if (!ijonly) {
1308                 ierr   = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1309                 mat_a += bs2;
1310               }
1311               ilen_row++;
1312             }
1313           }
1314           imat_ilen[row] = ilen_row;
1315         }
1316       }
1317     }
1318   }
1319 
1320   /*   Now assemble the off proc rows*/
1321   {
1322     PetscInt    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1323     PetscInt    *imat_j,*imat_i;
1324     MatScalar   *imat_a = PETSC_NULL,*rbuf4_i = PETSC_NULL;
1325     PetscMPIInt ii;
1326 
1327     for (tmp2=0; tmp2<nrqs; tmp2++) {
1328       if (ijonly) ii = tmp2;
1329       else {
1330         ierr = MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);CHKERRQ(ierr);
1331       }
1332       idex    = pa[ii];
1333       sbuf1_i = sbuf1[idex];
1334       jmax    = sbuf1_i[0];
1335       ct1     = 2*jmax + 1;
1336       ct2     = 0;
1337       rbuf2_i = rbuf2[ii];
1338       rbuf3_i = rbuf3[ii];
1339       if (!ijonly) rbuf4_i = rbuf4[ii];
1340       for (j=1; j<=jmax; j++) {
1341         is_no = sbuf1_i[2*j-1];
1342         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1343         rmap_i    = rmap[is_no];
1344         mat       = (Mat_SeqBAIJ*)submats[is_no]->data;
1345         imat_ilen = mat->ilen;
1346         imat_j    = mat->j;
1347         imat_i    = mat->i;
1348         if (!ijonly) imat_a = mat->a;
1349         max1 = sbuf1_i[2*j];
1350         for (k=0; k<max1; k++,ct1++) {
1351           row = sbuf1_i[ct1];
1352 #if defined(PETSC_USE_CTABLE)
1353           ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr);
1354           row--;
1355           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1356 #else
1357           row = rmap_i[row];
1358 #endif
1359           ilen  = imat_ilen[row];
1360           mat_i = imat_i[row];
1361           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1362           mat_j = imat_j + mat_i;
1363           max2  = rbuf2_i[ct1];
1364 
1365           if (!allcolumns[is_no]) {
1366             for (l=0; l<max2; l++,ct2++) {
1367 #if defined(PETSC_USE_CTABLE)
1368               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
1369               if (tcol) {
1370 #else
1371               if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1372 #endif
1373                 *mat_j++ = tcol - 1;
1374                 if (!ijonly) {
1375                   ierr   = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1376                   mat_a += bs2;
1377                 }
1378                 ilen++;
1379               }
1380             }
1381           } else { /* allcolumns */
1382             for (l=0; l<max2; l++,ct2++) {
1383               *mat_j++ = rbuf3_i[ct2];
1384               if (!ijonly) {
1385                 ierr   = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1386                 mat_a += bs2;
1387               }
1388               ilen++;
1389             }
1390           }
1391           imat_ilen[row] = ilen;
1392         }
1393       }
1394     }
1395   }
1396   if (!ijonly) {
1397     ierr = PetscFree(r_status4);CHKERRQ(ierr);
1398     ierr = PetscFree(r_waits4);CHKERRQ(ierr);
1399     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);}
1400     ierr = PetscFree(s_waits4);CHKERRQ(ierr);
1401     ierr = PetscFree(s_status4);CHKERRQ(ierr);
1402   }
1403 
1404   /* Restore the indices */
1405   for (i=0; i<ismax; i++) {
1406     if (!allrows[i]) {
1407       ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
1408     }
1409     if (!allcolumns[i]) {
1410       ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
1411     }
1412   }
1413 
1414   /* Destroy allocated memory */
1415 #if defined(PETSC_USE_CTABLE)
1416   ierr = PetscFree4(irow,icol,nrow,ncol);CHKERRQ(ierr);
1417 #else
1418   ierr = PetscFree5(irow,icol,nrow,ncol,rtable);CHKERRQ(ierr);
1419 #endif
1420   ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
1421   ierr = PetscFree(pa);CHKERRQ(ierr);
1422 
1423   ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr);
1424   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
1425   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
1426   for (i=0; i<nrqr; ++i) {
1427     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
1428   }
1429   for (i=0; i<nrqs; ++i) {
1430     ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr);
1431   }
1432   ierr = PetscFree3(sbuf2,req_size,req_source);CHKERRQ(ierr);
1433   ierr = PetscFree(rbuf3);CHKERRQ(ierr);
1434   ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr);
1435   ierr = PetscFree(sbuf_aj);CHKERRQ(ierr);
1436   if (!ijonly) {
1437     for (i=0; i<nrqs; ++i) {ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);}
1438     ierr = PetscFree(rbuf4);CHKERRQ(ierr);
1439     ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr);
1440     ierr = PetscFree(sbuf_aa);CHKERRQ(ierr);
1441   }
1442 
1443 #if defined(PETSC_USE_CTABLE)
1444   for (i=0; i<ismax; i++) {
1445     ierr = PetscTableDestroy((PetscTable*)&rmap[i]);CHKERRQ(ierr);
1446   }
1447 #endif
1448   ierr = PetscFree(rmap);CHKERRQ(ierr);
1449 
1450   for (i=0; i<ismax; i++) {
1451     if (!allcolumns[i]) {
1452 #if defined(PETSC_USE_CTABLE)
1453       ierr = PetscTableDestroy((PetscTable*)&cmap[i]);CHKERRQ(ierr);
1454 #else
1455       ierr = PetscFree(cmap[i]);CHKERRQ(ierr);
1456 #endif
1457     }
1458   }
1459   ierr = PetscFree(cmap);CHKERRQ(ierr);
1460   ierr = PetscFree(lens);CHKERRQ(ierr);
1461 
1462   for (i=0; i<ismax; i++) {
1463     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1464     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1465   }
1466 
1467   c->ijonly = PETSC_FALSE; /* set back to the default */
1468   PetscFunctionReturn(0);
1469 }
1470 
1471