xref: /petsc/src/mat/impls/baij/mpi/baijov.c (revision 794bee337f0d5af71ccfe93f8f7f88922f7fb150)
1 
2 /*
3    Routines to compute overlapping regions of a parallel MPI matrix
4   and to find submatrices that were shared across processors.
5 */
6 #include <../src/mat/impls/baij/mpi/mpibaij.h>
7 #include <petscbt.h>
8 
9 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat,PetscInt,char **,PetscInt*,PetscInt**);
10 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat,PetscInt,PetscInt **,PetscInt**,PetscInt*);
11 extern PetscErrorCode MatGetRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
12 extern PetscErrorCode MatRestoreRow_MPIBAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ"
16 PetscErrorCode MatIncreaseOverlap_MPIBAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov)
17 {
18   PetscErrorCode ierr;
19   PetscInt       i,N=C->cmap->N, bs=C->rmap->bs;
20   IS             *is_new;
21 
22   PetscFunctionBegin;
23   ierr = PetscMalloc(imax*sizeof(IS),&is_new);CHKERRQ(ierr);
24   /* Convert the indices into block format */
25   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,imax,is,is_new);CHKERRQ(ierr);
26   if (ov < 0){ SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified\n");}
27   for (i=0; i<ov; ++i) {
28     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr);
29   }
30   for (i=0; i<imax; i++) {ierr = ISDestroy(&is[i]);CHKERRQ(ierr);}
31   ierr = ISExpandIndicesGeneral(N,N,bs,imax,is_new,is);CHKERRQ(ierr);
32   for (i=0; i<imax; i++) {ierr = ISDestroy(&is_new[i]);CHKERRQ(ierr);}
33   ierr = PetscFree(is_new);CHKERRQ(ierr);
34   PetscFunctionReturn(0);
35 }
36 
37 /*
38   Sample message format:
39   If a processor A wants processor B to process some elements corresponding
40   to index sets is[1], is[5]
41   mesg [0] = 2   (no of index sets in the mesg)
42   -----------
43   mesg [1] = 1 => is[1]
44   mesg [2] = sizeof(is[1]);
45   -----------
46   mesg [5] = 5  => is[5]
47   mesg [6] = sizeof(is[5]);
48   -----------
49   mesg [7]
50   mesg [n]  data(is[1])
51   -----------
52   mesg[n+1]
53   mesg[m]  data(is[5])
54   -----------
55 
56   Notes:
57   nrqs - no of requests sent (or to be sent out)
58   nrqr - no of requests recieved (which have to be or which have been processed
59 */
60 #undef __FUNCT__
61 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Once"
62 PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Once(Mat C,PetscInt imax,IS is[])
63 {
64   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
65   const PetscInt **idx,*idx_i;
66   PetscInt       *n,*w3,*w4,**data,len;
67   PetscErrorCode ierr;
68   PetscMPIInt    size,rank,tag1,tag2,*w2,*w1,nrqr;
69   PetscInt       Mbs,i,j,k,**rbuf,row,proc=-1,nrqs,msz,**outdat,**ptr;
70   PetscInt       *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2,*d_p;
71   PetscMPIInt    *onodes1,*olengths1,*onodes2,*olengths2;
72   PetscBT        *table;
73   MPI_Comm       comm;
74   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
75   MPI_Status     *s_status,*recv_status;
76   char           *t_p;
77 
78   PetscFunctionBegin;
79   comm   = ((PetscObject)C)->comm;
80   size   = c->size;
81   rank   = c->rank;
82   Mbs    = c->Mbs;
83 
84   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
85   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
86 
87   ierr = PetscMalloc2(imax+1,const PetscInt*,&idx,imax,PetscInt,&n);CHKERRQ(ierr);
88 
89   for (i=0; i<imax; i++) {
90     ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr);
91     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
92   }
93 
94   /* evaluate communication - mesg to who,length of mesg, and buffer space
95      required. Based on this, buffers are allocated, and data copied into them*/
96   ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr);
97   ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
98   ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
99   ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr);
100   for (i=0; i<imax; i++) {
101     ierr  = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/
102     idx_i = idx[i];
103     len   = n[i];
104     for (j=0; j<len; j++) {
105       row  = idx_i[j];
106       if (row < 0) {
107         SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries");
108       }
109       ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr);
110       w4[proc]++;
111     }
112     for (j=0; j<size; j++){
113       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
114     }
115   }
116 
117   nrqs     = 0;              /* no of outgoing messages */
118   msz      = 0;              /* total mesg length (for all proc */
119   w1[rank] = 0;              /* no mesg sent to itself */
120   w3[rank] = 0;
121   for (i=0; i<size; i++) {
122     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
123   }
124   /* pa - is list of processors to communicate with */
125   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr);
126   for (i=0,j=0; i<size; i++) {
127     if (w1[i]) {pa[j] = i; j++;}
128   }
129 
130   /* Each message would have a header = 1 + 2*(no of IS) + data */
131   for (i=0; i<nrqs; i++) {
132     j      = pa[i];
133     w1[j] += w2[j] + 2*w3[j];
134     msz   += w1[j];
135   }
136 
137   /* Determine the number of messages to expect, their lengths, from from-ids */
138   ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
139   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
140 
141   /* Now post the Irecvs corresponding to these messages */
142   ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr);
143 
144   /* Allocate Memory for outgoing messages */
145   ierr = PetscMalloc4(size,PetscInt*,&outdat,size,PetscInt*,&ptr,msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr);
146   ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr);
147   ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
148   {
149     PetscInt *iptr = tmp,ict  = 0;
150     for (i=0; i<nrqs; i++) {
151       j         = pa[i];
152       iptr     +=  ict;
153       outdat[j] = iptr;
154       ict       = w1[j];
155     }
156   }
157 
158   /* Form the outgoing messages */
159   /*plug in the headers*/
160   for (i=0; i<nrqs; i++) {
161     j            = pa[i];
162     outdat[j][0] = 0;
163     ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
164     ptr[j]       = outdat[j] + 2*w3[j] + 1;
165   }
166 
167   /* Memory for doing local proc's work*/
168   {
169     ierr = PetscMalloc5(imax,PetscBT,&table, imax,PetscInt*,&data, imax,PetscInt,&isz,
170                         Mbs*imax,PetscInt,&d_p, (Mbs/PETSC_BITS_PER_BYTE+1)*imax,char,&t_p);CHKERRQ(ierr);
171     ierr = PetscMemzero(table,imax*sizeof(PetscBT));CHKERRQ(ierr);
172     ierr = PetscMemzero(data,imax*sizeof(PetscInt*));CHKERRQ(ierr);
173     ierr = PetscMemzero(isz,imax*sizeof(PetscInt));CHKERRQ(ierr);
174     ierr = PetscMemzero(d_p,Mbs*imax*sizeof(PetscInt));CHKERRQ(ierr);
175     ierr = PetscMemzero(t_p,(Mbs/PETSC_BITS_PER_BYTE+1)*imax*sizeof(char));CHKERRQ(ierr);
176 
177     for (i=0; i<imax; i++) {
178       table[i] = t_p + (Mbs/PETSC_BITS_PER_BYTE+1)*i;
179       data[i]  = d_p + (Mbs)*i;
180     }
181   }
182 
183   /* Parse the IS and update local tables and the outgoing buf with the data*/
184   {
185     PetscInt n_i,*data_i,isz_i,*outdat_j,ctr_j;
186     PetscBT  table_i;
187 
188     for (i=0; i<imax; i++) {
189       ierr    = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
190       n_i     = n[i];
191       table_i = table[i];
192       idx_i   = idx[i];
193       data_i  = data[i];
194       isz_i   = isz[i];
195       for (j=0;  j<n_i; j++) {  /* parse the indices of each IS */
196         row  = idx_i[j];
197         ierr = PetscLayoutFindOwner(C->rmap,row*C->rmap->bs,&proc);CHKERRQ(ierr);
198         if (proc != rank) { /* copy to the outgoing buffer */
199           ctr[proc]++;
200           *ptr[proc] = row;
201           ptr[proc]++;
202         } else { /* Update the local table */
203           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
204         }
205       }
206       /* Update the headers for the current IS */
207       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
208         if ((ctr_j = ctr[j])) {
209           outdat_j        = outdat[j];
210           k               = ++outdat_j[0];
211           outdat_j[2*k]   = ctr_j;
212           outdat_j[2*k-1] = i;
213         }
214       }
215       isz[i] = isz_i;
216     }
217   }
218 
219   /*  Now  post the sends */
220   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
221   for (i=0; i<nrqs; ++i) {
222     j    = pa[i];
223     ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr);
224   }
225 
226   /* No longer need the original indices*/
227   for (i=0; i<imax; ++i) {
228     ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr);
229   }
230   ierr = PetscFree2(idx,n);CHKERRQ(ierr);
231 
232   for (i=0; i<imax; ++i) {
233     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
234   }
235 
236   /* Do Local work*/
237   ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr);
238 
239   /* Receive messages*/
240   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&recv_status);CHKERRQ(ierr);
241   if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);}
242 
243   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status);CHKERRQ(ierr);
244   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
245 
246   /* Phase 1 sends are complete - deallocate buffers */
247   ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr);
248   ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
249 
250   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&xdata);CHKERRQ(ierr);
251   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt),&isz1);CHKERRQ(ierr);
252   ierr = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr);
253   ierr = PetscFree(rbuf[0]);CHKERRQ(ierr);
254   ierr = PetscFree(rbuf);CHKERRQ(ierr);
255 
256   /* Send the data back*/
257   /* Do a global reduction to know the buffer space req for incoming messages*/
258   {
259     PetscMPIInt *rw1;
260 
261     ierr = PetscMalloc(size*sizeof(PetscInt),&rw1);CHKERRQ(ierr);
262     ierr = PetscMemzero(rw1,size*sizeof(PetscInt));CHKERRQ(ierr);
263 
264     for (i=0; i<nrqr; ++i) {
265       proc      = recv_status[i].MPI_SOURCE;
266       if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch");
267       rw1[proc] = isz1[i];
268     }
269 
270     ierr = PetscFree(onodes1);CHKERRQ(ierr);
271     ierr = PetscFree(olengths1);CHKERRQ(ierr);
272 
273     /* Determine the number of messages to expect, their lengths, from from-ids */
274     ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr);
275     ierr = PetscFree(rw1);CHKERRQ(ierr);
276   }
277   /* Now post the Irecvs corresponding to these messages */
278   ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr);
279 
280   /*  Now  post the sends */
281   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr);
282   for (i=0; i<nrqr; ++i) {
283     j    = recv_status[i].MPI_SOURCE;
284     ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr);
285   }
286 
287   /* receive work done on other processors*/
288   {
289     PetscMPIInt idex;
290     PetscInt    is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
291     PetscBT     table_i;
292     MPI_Status  *status2;
293 
294     ierr = PetscMalloc((PetscMax(nrqr,nrqs)+1)*sizeof(MPI_Status),&status2);CHKERRQ(ierr);
295     for (i=0; i<nrqs; ++i) {
296       ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr);
297       /* Process the message*/
298       rbuf2_i = rbuf2[idex];
299       ct1     = 2*rbuf2_i[0]+1;
300       jmax    = rbuf2[idex][0];
301       for (j=1; j<=jmax; j++) {
302         max     = rbuf2_i[2*j];
303         is_no   = rbuf2_i[2*j-1];
304         isz_i   = isz[is_no];
305         data_i  = data[is_no];
306         table_i = table[is_no];
307         for (k=0; k<max; k++,ct1++) {
308           row = rbuf2_i[ct1];
309           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
310         }
311         isz[is_no] = isz_i;
312       }
313     }
314     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);}
315     ierr = PetscFree(status2);CHKERRQ(ierr);
316   }
317 
318   for (i=0; i<imax; ++i) {
319     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
320   }
321 
322 
323   ierr = PetscFree(onodes2);CHKERRQ(ierr);
324   ierr = PetscFree(olengths2);CHKERRQ(ierr);
325 
326   ierr = PetscFree(pa);CHKERRQ(ierr);
327   ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr);
328   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
329   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
330   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
331   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
332   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
333   ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr);
334   ierr = PetscFree(s_status);CHKERRQ(ierr);
335   ierr = PetscFree(recv_status);CHKERRQ(ierr);
336   ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
337   ierr = PetscFree(xdata);CHKERRQ(ierr);
338   ierr = PetscFree(isz1);CHKERRQ(ierr);
339   PetscFunctionReturn(0);
340 }
341 
342 #undef __FUNCT__
343 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Local"
344 /*
345    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
346        the work on the local processor.
347 
348      Inputs:
349       C      - MAT_MPIBAIJ;
350       imax - total no of index sets processed at a time;
351       table  - an array of char - size = Mbs bits.
352 
353      Output:
354       isz    - array containing the count of the solution elements corresponding
355                to each index set;
356       data   - pointer to the solutions
357 */
358 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data)
359 {
360   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
361   Mat         A = c->A,B = c->B;
362   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
363   PetscInt    start,end,val,max,rstart,cstart,*ai,*aj;
364   PetscInt    *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
365   PetscBT     table_i;
366 
367   PetscFunctionBegin;
368   rstart = c->rstartbs;
369   cstart = c->cstartbs;
370   ai     = a->i;
371   aj     = a->j;
372   bi     = b->i;
373   bj     = b->j;
374   garray = c->garray;
375 
376 
377   for (i=0; i<imax; i++) {
378     data_i  = data[i];
379     table_i = table[i];
380     isz_i   = isz[i];
381     for (j=0,max=isz[i]; j<max; j++) {
382       row   = data_i[j] - rstart;
383       start = ai[row];
384       end   = ai[row+1];
385       for (k=start; k<end; k++) { /* Amat */
386         val = aj[k] + cstart;
387         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
388       }
389       start = bi[row];
390       end   = bi[row+1];
391       for (k=start; k<end; k++) { /* Bmat */
392         val = garray[bj[k]];
393         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
394       }
395     }
396     isz[i] = isz_i;
397   }
398   PetscFunctionReturn(0);
399 }
400 #undef __FUNCT__
401 #define __FUNCT__ "MatIncreaseOverlap_MPIBAIJ_Receive"
402 /*
403       MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
404          and return the output
405 
406          Input:
407            C    - the matrix
408            nrqr - no of messages being processed.
409            rbuf - an array of pointers to the recieved requests
410 
411          Output:
412            xdata - array of messages to be sent back
413            isz1  - size of each message
414 
415   For better efficiency perhaps we should malloc separately each xdata[i],
416 then if a remalloc is required we need only copy the data for that one row
417 rather than all previous rows as it is now where a single large chunck of
418 memory is used.
419 
420 */
421 static PetscErrorCode MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1)
422 {
423   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
424   Mat            A = c->A,B = c->B;
425   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
426   PetscErrorCode ierr;
427   PetscInt       rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
428   PetscInt       row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
429   PetscInt       val,max1,max2,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
430   PetscInt       *rbuf_i,kmax,rbuf_0;
431   PetscBT        xtable;
432 
433   PetscFunctionBegin;
434   Mbs    = c->Mbs;
435   rstart = c->rstartbs;
436   cstart = c->cstartbs;
437   ai     = a->i;
438   aj     = a->j;
439   bi     = b->i;
440   bj     = b->j;
441   garray = c->garray;
442 
443 
444   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
445     rbuf_i  =  rbuf[i];
446     rbuf_0  =  rbuf_i[0];
447     ct     += rbuf_0;
448     for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; }
449   }
450 
451   if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
452   else        max1 = 1;
453   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
454   ierr         = PetscMalloc(mem_estimate*sizeof(PetscInt),&xdata[0]);CHKERRQ(ierr);
455   ++no_malloc;
456   ierr         = PetscBTCreate(Mbs,xtable);CHKERRQ(ierr);
457   ierr         = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr);
458 
459   ct3 = 0;
460   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
461     rbuf_i =  rbuf[i];
462     rbuf_0 =  rbuf_i[0];
463     ct1    =  2*rbuf_0+1;
464     ct2    =  ct1;
465     ct3    += ct1;
466     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
467       ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr);
468       oct2 = ct2;
469       kmax = rbuf_i[2*j];
470       for (k=0; k<kmax; k++,ct1++) {
471         row = rbuf_i[ct1];
472         if (!PetscBTLookupSet(xtable,row)) {
473           if (!(ct3 < mem_estimate)) {
474             new_estimate = (PetscInt)(1.5*mem_estimate)+1;
475             ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
476             ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
477             ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
478             xdata[0]     = tmp;
479             mem_estimate = new_estimate; ++no_malloc;
480             for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
481           }
482           xdata[i][ct2++] = row;
483           ct3++;
484         }
485       }
486       for (k=oct2,max2=ct2; k<max2; k++)  {
487         row   = xdata[i][k] - rstart;
488         start = ai[row];
489         end   = ai[row+1];
490         for (l=start; l<end; l++) {
491           val = aj[l] + cstart;
492           if (!PetscBTLookupSet(xtable,val)) {
493             if (!(ct3 < mem_estimate)) {
494               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
495               ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
496               ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
497               ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
498               xdata[0]     = tmp;
499               mem_estimate = new_estimate; ++no_malloc;
500               for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
501             }
502             xdata[i][ct2++] = val;
503             ct3++;
504           }
505         }
506         start = bi[row];
507         end   = bi[row+1];
508         for (l=start; l<end; l++) {
509           val = garray[bj[l]];
510           if (!PetscBTLookupSet(xtable,val)) {
511             if (!(ct3 < mem_estimate)) {
512               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
513               ierr = PetscMalloc(new_estimate * sizeof(PetscInt),&tmp);CHKERRQ(ierr);
514               ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
515               ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
516               xdata[0]     = tmp;
517               mem_estimate = new_estimate; ++no_malloc;
518               for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
519             }
520             xdata[i][ct2++] = val;
521             ct3++;
522           }
523         }
524       }
525       /* Update the header*/
526       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
527       xdata[i][2*j-1] = rbuf_i[2*j-1];
528     }
529     xdata[i][0] = rbuf_0;
530     xdata[i+1]  = xdata[i] + ct2;
531     isz1[i]     = ct2; /* size of each message */
532   }
533   ierr = PetscBTDestroy(xtable);CHKERRQ(ierr);
534   ierr = PetscInfo3(C,"Allocated %D bytes, required %D, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr);
535   PetscFunctionReturn(0);
536 }
537 
538 #undef __FUNCT__
539 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ"
540 PetscErrorCode MatGetSubMatrices_MPIBAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
541 {
542   IS             *isrow_new,*iscol_new;
543   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
544   PetscErrorCode ierr;
545   PetscInt       nmax,nstages_local,nstages,i,pos,max_no,ncol,nrow,N=C->cmap->N,bs=C->rmap->bs;
546   PetscBool      colflag,*allcolumns,*allrows;
547 
548   PetscFunctionBegin;
549   /* The compression and expansion should be avoided. Does'nt point
550      out errors might change the indices hence buggey */
551 
552   ierr = PetscMalloc2(ismax+1,IS,&isrow_new,ismax+1,IS,&iscol_new);CHKERRQ(ierr);
553   ierr = ISCompressIndicesGeneral(N,C->rmap->n,bs,ismax,isrow,isrow_new);CHKERRQ(ierr);
554   ierr = ISCompressIndicesGeneral(N,C->cmap->n,bs,ismax,iscol,iscol_new);CHKERRQ(ierr);
555 
556   /* Check for special case: each processor gets entire matrix columns */
557   ierr = PetscMalloc2(ismax+1,PetscBool,&allcolumns,ismax+1,PetscBool,&allrows);CHKERRQ(ierr);
558   for (i=0; i<ismax; i++) {
559     ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr);
560     ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr);
561     if (colflag && ncol == C->cmap->N){
562       allcolumns[i] = PETSC_TRUE;
563     } else {
564       allcolumns[i] = PETSC_FALSE;
565     }
566 
567     ierr = ISIdentity(isrow[i],&colflag);CHKERRQ(ierr);
568     ierr = ISGetLocalSize(isrow[i],&nrow);CHKERRQ(ierr);
569     if (colflag && nrow == C->rmap->N){
570       allrows[i] = PETSC_TRUE;
571     } else {
572       allrows[i] = PETSC_FALSE;
573     }
574   }
575 
576   /* Allocate memory to hold all the submatrices */
577   if (scall != MAT_REUSE_MATRIX) {
578     ierr = PetscMalloc((ismax+1)*sizeof(Mat),submat);CHKERRQ(ierr);
579   }
580   /* Determine the number of stages through which submatrices are done */
581   nmax          = 20*1000000 / (c->Nbs * sizeof(PetscInt));
582   if (!nmax) nmax = 1;
583   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
584 
585   /* Make sure every processor loops through the nstages */
586   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPIU_INT,MPI_MAX,((PetscObject)C)->comm);CHKERRQ(ierr);
587   for (i=0,pos=0; i<nstages; i++) {
588     if (pos+nmax <= ismax) max_no = nmax;
589     else if (pos == ismax) max_no = 0;
590     else                   max_no = ismax-pos;
591     ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,allrows+pos,allcolumns+pos,*submat+pos);CHKERRQ(ierr);
592     pos += max_no;
593   }
594 
595   for (i=0; i<ismax; i++) {
596     ierr = ISDestroy(&isrow_new[i]);CHKERRQ(ierr);
597     ierr = ISDestroy(&iscol_new[i]);CHKERRQ(ierr);
598   }
599   ierr = PetscFree2(isrow_new,iscol_new);CHKERRQ(ierr);
600   ierr = PetscFree2(allcolumns,allrows);CHKERRQ(ierr);
601   PetscFunctionReturn(0);
602 }
603 
604 #if defined (PETSC_USE_CTABLE)
605 #undef __FUNCT__
606 #define __FUNCT__ "PetscGetProc"
607 PetscErrorCode PetscGetProc(const PetscInt row, const PetscMPIInt size, const PetscInt proc_gnode[], PetscMPIInt *rank)
608 {
609   PetscInt    nGlobalNd = proc_gnode[size];
610   PetscMPIInt fproc = PetscMPIIntCast( (PetscInt)(((float)row * (float)size / (float)nGlobalNd + 0.5)));
611 
612   PetscFunctionBegin;
613   if (fproc > size) fproc = size;
614   while (row < proc_gnode[fproc] || row >= proc_gnode[fproc+1]) {
615     if (row < proc_gnode[fproc]) fproc--;
616     else                         fproc++;
617   }
618   *rank = fproc;
619   PetscFunctionReturn(0);
620 }
621 #endif
622 
623 /* -------------------------------------------------------------------------*/
624 /* This code is used for BAIJ and SBAIJ matrices (unfortunate dependency) */
625 #undef __FUNCT__
626 #define __FUNCT__ "MatGetSubMatrices_MPIBAIJ_local"
627 PetscErrorCode MatGetSubMatrices_MPIBAIJ_local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allrows,PetscBool *allcolumns,Mat *submats)
628 {
629   Mat_MPIBAIJ    *c = (Mat_MPIBAIJ*)C->data;
630   Mat            A = c->A;
631   Mat_SeqBAIJ    *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat;
632   const PetscInt **irow,**icol,*irow_i;
633   PetscInt       *nrow,*ncol,*w3,*w4,start;
634   PetscErrorCode ierr;
635   PetscMPIInt    size,tag0,tag1,tag2,tag3,*w1,*w2,nrqr,idex,end,proc;
636   PetscInt       **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,**rbuf1,row;
637   PetscInt       nrqs,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol;
638   PetscInt       **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2;
639   PetscInt       **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax;
640   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
641   PetscInt       bs=C->rmap->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB;
642   PetscInt       cstart = c->cstartbs,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
643   PetscInt       *bmap = c->garray,ctmp,rstart=c->rstartbs;
644   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3,*s_waits3;
645   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3,*s_status2,*r_status3;
646   MPI_Comm       comm;
647   PetscBool      flag;
648   PetscMPIInt    *onodes1,*olengths1;
649   PetscBool      ijonly=c->ijonly; /* private flag indicates only matrix data structures are requested */
650   /* variables below are used for the matrix numerical values - case of !ijonly */
651   MPI_Request    *r_waits4,*s_waits4;
652   MPI_Status     *r_status4,*s_status4;
653   MatScalar      **rbuf4,**sbuf_aa,*vals,*mat_a = PETSC_NULL,*sbuf_aa_i,*vworkA = PETSC_NULL,*vworkB = PETSC_NULL;
654   MatScalar      *a_a=a->a,*b_a=b->a;
655 
656 #if defined (PETSC_USE_CTABLE)
657   PetscInt       tt;
658   PetscTable     *rmap,*cmap,rmap_i,cmap_i=PETSC_NULL;
659 #else
660   PetscInt       **cmap,*cmap_i=PETSC_NULL,*rtable,*rmap_i,**rmap, Mbs = c->Mbs;
661 #endif
662 
663   PetscFunctionBegin;
664   comm   = ((PetscObject)C)->comm;
665   tag0   = ((PetscObject)C)->tag;
666   size   = c->size;
667   rank   = c->rank;
668 
669   /* Get some new tags to keep the communication clean */
670   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
671   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
672   ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr);
673 
674 #if defined(PETSC_USE_CTABLE)
675   ierr = PetscMalloc4(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol);CHKERRQ(ierr);
676 #else
677   ierr = PetscMalloc5(ismax,const PetscInt*,&irow,ismax,const PetscInt*,&icol,ismax,PetscInt,&nrow,ismax,PetscInt,&ncol,Mbs+1,PetscInt,&rtable);CHKERRQ(ierr);
678   /* Create hash table for the mapping :row -> proc*/
679   for (i=0,j=0; i<size; i++) {
680     jmax = C->rmap->range[i+1]/bs;
681     for (; j<jmax; j++) {
682       rtable[j] = i;
683     }
684   }
685 #endif
686 
687   for (i=0; i<ismax; i++) {
688     if (allrows[i]){
689       irow[i] = PETSC_NULL;
690       nrow[i] = C->rmap->N/bs;
691     } else {
692       ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
693       ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
694     }
695 
696     if (allcolumns[i]){
697       icol[i] = PETSC_NULL;
698       ncol[i] = C->cmap->N/bs;
699     } else {
700       ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
701       ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
702     }
703   }
704 
705   /* evaluate communication - mesg to who,length of mesg,and buffer space
706      required. Based on this, buffers are allocated, and data copied into them*/
707   ierr = PetscMalloc4(size,PetscMPIInt,&w1,size,PetscMPIInt,&w2,size,PetscInt,&w3,size,PetscInt,&w4);CHKERRQ(ierr);
708   ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
709   ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
710   ierr = PetscMemzero(w3,size*sizeof(PetscInt));CHKERRQ(ierr);
711   for (i=0; i<ismax; i++) {
712     ierr   = PetscMemzero(w4,size*sizeof(PetscInt));CHKERRQ(ierr); /* initialise work vector*/
713     jmax   = nrow[i];
714     irow_i = irow[i];
715     for (j=0; j<jmax; j++) {
716       if (allrows[i]) {
717         row = j;
718       } else {
719         row  = irow_i[j];
720       }
721 #if defined (PETSC_USE_CTABLE)
722       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
723 #else
724       proc = rtable[row];
725 #endif
726       w4[proc]++;
727     }
728     for (j=0; j<size; j++) {
729       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
730     }
731   }
732 
733   nrqs     = 0;              /* no of outgoing messages */
734   msz      = 0;              /* total mesg length for all proc */
735   w1[rank] = 0;              /* no mesg sent to intself */
736   w3[rank] = 0;
737   for (i=0; i<size; i++) {
738     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
739   }
740   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt),&pa);CHKERRQ(ierr); /*(proc -array)*/
741   for (i=0,j=0; i<size; i++) {
742     if (w1[i]) { pa[j] = i; j++; }
743   }
744 
745   /* Each message would have a header = 1 + 2*(no of IS) + data */
746   for (i=0; i<nrqs; i++) {
747     j     = pa[i];
748     w1[j] += w2[j] + 2* w3[j];
749     msz   += w1[j];
750   }
751 
752   /* Determine the number of messages to expect, their lengths, from from-ids */
753   ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
754   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
755 
756   /* Now post the Irecvs corresponding to these messages */
757   ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr);
758 
759   ierr = PetscFree(onodes1);CHKERRQ(ierr);
760   ierr = PetscFree(olengths1);CHKERRQ(ierr);
761 
762   /* Allocate Memory for outgoing messages */
763   ierr = PetscMalloc4(size,PetscInt*,&sbuf1,size,PetscInt*,&ptr,2*msz,PetscInt,&tmp,size,PetscInt,&ctr);CHKERRQ(ierr);
764   ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr);
765   ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
766   {
767     PetscInt *iptr = tmp,ict = 0;
768     for (i=0; i<nrqs; i++) {
769       j         = pa[i];
770       iptr     += ict;
771       sbuf1[j]  = iptr;
772       ict       = w1[j];
773     }
774   }
775 
776   /* Form the outgoing messages */
777   /* Initialise the header space */
778   for (i=0; i<nrqs; i++) {
779     j           = pa[i];
780     sbuf1[j][0] = 0;
781     ierr        = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
782     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
783   }
784 
785   /* Parse the isrow and copy data into outbuf */
786   for (i=0; i<ismax; i++) {
787     ierr   = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
788     irow_i = irow[i];
789     jmax   = nrow[i];
790     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
791       if (allrows[i]){
792         row = j;
793       } else {
794         row  = irow_i[j];
795       }
796 #if defined (PETSC_USE_CTABLE)
797       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
798 #else
799       proc = rtable[row];
800 #endif
801       if (proc != rank) { /* copy to the outgoing buf*/
802         ctr[proc]++;
803         *ptr[proc] = row;
804         ptr[proc]++;
805       }
806     }
807     /* Update the headers for the current IS */
808     for (j=0; j<size; j++) { /* Can Optimise this loop too */
809       if ((ctr_j = ctr[j])) {
810         sbuf1_j        = sbuf1[j];
811         k              = ++sbuf1_j[0];
812         sbuf1_j[2*k]   = ctr_j;
813         sbuf1_j[2*k-1] = i;
814       }
815     }
816   }
817 
818   /*  Now  post the sends */
819   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&s_waits1);CHKERRQ(ierr);
820   for (i=0; i<nrqs; ++i) {
821     j = pa[i];
822     ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr);
823   }
824 
825   /* Post Recieves to capture the buffer size */
826   ierr     = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits2);CHKERRQ(ierr);
827   ierr     = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf2);CHKERRQ(ierr);
828   rbuf2[0] = tmp + msz;
829   for (i=1; i<nrqs; ++i) {
830     j        = pa[i];
831     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
832   }
833   for (i=0; i<nrqs; ++i) {
834     j    = pa[i];
835     ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr);
836   }
837 
838   /* Send to other procs the buf size they should allocate */
839 
840   /* Receive messages*/
841   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits2);CHKERRQ(ierr);
842   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&r_status1);CHKERRQ(ierr);
843   ierr = PetscMalloc3(nrqr+1,PetscInt*,&sbuf2,nrqr,PetscInt,&req_size,nrqr,PetscInt,&req_source);CHKERRQ(ierr);
844   {
845     Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data;
846     PetscInt    *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i;
847 
848     for (i=0; i<nrqr; ++i) {
849       ierr = MPI_Waitany(nrqr,r_waits1,&idex,r_status1+i);CHKERRQ(ierr);
850       req_size[idex] = 0;
851       rbuf1_i         = rbuf1[idex];
852       start           = 2*rbuf1_i[0] + 1;
853       ierr            = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr);
854       ierr            = PetscMalloc(end*sizeof(PetscInt),&sbuf2[idex]);CHKERRQ(ierr);
855       sbuf2_i         = sbuf2[idex];
856       for (j=start; j<end; j++) {
857         id               = rbuf1_i[j] - rstart;
858         ncols            = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
859         sbuf2_i[j]       = ncols;
860         req_size[idex] += ncols;
861       }
862       req_source[idex] = r_status1[i].MPI_SOURCE;
863       /* form the header */
864       sbuf2_i[0]   = req_size[idex];
865       for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
866       ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source[idex],tag1,comm,s_waits2+i);CHKERRQ(ierr);
867     }
868   }
869   ierr = PetscFree(r_status1);CHKERRQ(ierr);
870   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
871 
872   /*  recv buffer sizes */
873   /* Receive messages*/
874   ierr = PetscMalloc((nrqs+1)*sizeof(PetscInt*),&rbuf3);CHKERRQ(ierr);
875   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits3);CHKERRQ(ierr);
876   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status2);CHKERRQ(ierr);
877   if (!ijonly){
878     ierr = PetscMalloc((nrqs+1)*sizeof(MatScalar*),&rbuf4);CHKERRQ(ierr);
879     ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Request),&r_waits4);CHKERRQ(ierr);
880   }
881 
882   for (i=0; i<nrqs; ++i) {
883     ierr = MPI_Waitany(nrqs,r_waits2,&idex,r_status2+i);CHKERRQ(ierr);
884     ierr = PetscMalloc(rbuf2[idex][0]*sizeof(PetscInt),&rbuf3[idex]);CHKERRQ(ierr);
885     ierr = MPI_Irecv(rbuf3[idex],rbuf2[idex][0],MPIU_INT,r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+idex);CHKERRQ(ierr);
886     if (!ijonly){
887       ierr = PetscMalloc(rbuf2[idex][0]*bs2*sizeof(MatScalar),&rbuf4[idex]);CHKERRQ(ierr);
888       ierr = MPI_Irecv(rbuf4[idex],rbuf2[idex][0]*bs2,MPIU_MATSCALAR,r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+idex);CHKERRQ(ierr);
889     }
890   }
891   ierr = PetscFree(r_status2);CHKERRQ(ierr);
892   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
893 
894   /* Wait on sends1 and sends2 */
895   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&s_status1);CHKERRQ(ierr);
896   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status2);CHKERRQ(ierr);
897 
898   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);}
899   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);}
900   ierr = PetscFree(s_status1);CHKERRQ(ierr);
901   ierr = PetscFree(s_status2);CHKERRQ(ierr);
902   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
903   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
904 
905   /* Now allocate buffers for a->j, and send them off */
906   ierr = PetscMalloc((nrqr+1)*sizeof(PetscInt*),&sbuf_aj);CHKERRQ(ierr);
907   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
908   ierr = PetscMalloc((j+1)*sizeof(PetscInt),&sbuf_aj[0]);CHKERRQ(ierr);
909   for (i=1; i<nrqr; i++)  sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
910 
911   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits3);CHKERRQ(ierr);
912   {
913      for (i=0; i<nrqr; i++) {
914       rbuf1_i   = rbuf1[i];
915       sbuf_aj_i = sbuf_aj[i];
916       ct1       = 2*rbuf1_i[0] + 1;
917       ct2       = 0;
918       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
919         kmax = rbuf1[i][2*j];
920         for (k=0; k<kmax; k++,ct1++) {
921           row    = rbuf1_i[ct1] - rstart;
922           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
923           ncols  = nzA + nzB;
924           cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
925 
926           /* load the column indices for this row into cols*/
927           cols  = sbuf_aj_i + ct2;
928           for (l=0; l<nzB; l++) {
929             if ((ctmp = bmap[cworkB[l]]) < cstart)  cols[l] = ctmp;
930             else break;
931           }
932           imark = l;
933           for (l=0; l<nzA; l++)   cols[imark+l] = cstart + cworkA[l];
934           for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
935           ct2 += ncols;
936         }
937       }
938       ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr);
939     }
940   }
941   ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status3);CHKERRQ(ierr);
942   ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status3);CHKERRQ(ierr);
943 
944   /* Allocate buffers for a->a, and send them off */
945   if (!ijonly){
946     ierr = PetscMalloc((nrqr+1)*sizeof(MatScalar *),&sbuf_aa);CHKERRQ(ierr);
947     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
948     ierr = PetscMalloc((j+1)*bs2*sizeof(MatScalar),&sbuf_aa[0]);CHKERRQ(ierr);
949     for (i=1; i<nrqr; i++)  sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
950 
951     ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Request),&s_waits4);CHKERRQ(ierr);
952     {
953       for (i=0; i<nrqr; i++) {
954         rbuf1_i   = rbuf1[i];
955         sbuf_aa_i = sbuf_aa[i];
956         ct1       = 2*rbuf1_i[0]+1;
957         ct2       = 0;
958         for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
959           kmax = rbuf1_i[2*j];
960           for (k=0; k<kmax; k++,ct1++) {
961             row    = rbuf1_i[ct1] - rstart;
962             nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
963             ncols  = nzA + nzB;
964             cworkA = a_j + a_i[row];     cworkB = b_j + b_i[row];
965             vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;
966 
967             /* load the column values for this row into vals*/
968             vals  = sbuf_aa_i+ct2*bs2;
969             for (l=0; l<nzB; l++) {
970               if ((bmap[cworkB[l]]) < cstart) {
971                 ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
972               }
973               else break;
974             }
975             imark = l;
976             for (l=0; l<nzA; l++) {
977               ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
978             }
979             for (l=imark; l<nzB; l++) {
980               ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
981             }
982             ct2 += ncols;
983           }
984         }
985         ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr);
986       }
987     }
988     ierr = PetscMalloc((nrqs+1)*sizeof(MPI_Status),&r_status4);CHKERRQ(ierr);
989     ierr = PetscMalloc((nrqr+1)*sizeof(MPI_Status),&s_status4);CHKERRQ(ierr);
990   }
991   ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr);
992   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
993 
994   /* Form the matrix */
995   /* create col map: global col of C -> local col of submatrices */
996   {
997     const PetscInt *icol_i;
998 #if defined (PETSC_USE_CTABLE)
999     ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&cmap);CHKERRQ(ierr);
1000     for (i=0; i<ismax; i++) {
1001       if (!allcolumns[i]){
1002         ierr = PetscTableCreate(ncol[i]+1,c->Nbs+1,&cmap[i]);CHKERRQ(ierr);
1003         jmax   = ncol[i];
1004         icol_i = icol[i];
1005         cmap_i = cmap[i];
1006         for (j=0; j<jmax; j++) {
1007           ierr = PetscTableAdd(cmap_i,icol_i[j]+1,j+1);CHKERRQ(ierr);
1008         }
1009       } else {
1010         cmap[i] = PETSC_NULL;
1011       }
1012     }
1013 #else
1014     ierr    = PetscMalloc(ismax*sizeof(PetscInt*),&cmap);CHKERRQ(ierr);
1015     for (i=0; i<ismax; i++) {
1016       if (!allcolumns[i]){
1017         ierr = PetscMalloc(c->Nbs*sizeof(PetscInt),&cmap[i]);CHKERRQ(ierr);
1018         ierr = PetscMemzero(cmap[i],c->Nbs*sizeof(PetscInt));CHKERRQ(ierr);
1019         jmax   = ncol[i];
1020         icol_i = icol[i];
1021         cmap_i = cmap[i];
1022         for (j=0; j<jmax; j++) {
1023           cmap_i[icol_i[j]] = j+1;
1024         }
1025       } else { /* allcolumns[i] */
1026         cmap[i] = PETSC_NULL;
1027       }
1028     }
1029 #endif
1030   }
1031 
1032   /* Create lens which is required for MatCreate... */
1033   for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
1034   ierr    = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ j*sizeof(PetscInt),&lens);CHKERRQ(ierr);
1035   lens[0] = (PetscInt*)(lens + ismax);
1036   ierr    = PetscMemzero(lens[0],j*sizeof(PetscInt));CHKERRQ(ierr);
1037   for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
1038 
1039   /* Update lens from local data */
1040   for (i=0; i<ismax; i++) {
1041     jmax   = nrow[i];
1042     if (!allcolumns[i]) cmap_i = cmap[i];
1043     irow_i = irow[i];
1044     lens_i = lens[i];
1045     for (j=0; j<jmax; j++) {
1046       if (allrows[i]){
1047         row = j;
1048       } else {
1049         row  = irow_i[j];
1050       }
1051 #if defined (PETSC_USE_CTABLE)
1052       ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
1053 #else
1054       proc = rtable[row];
1055 #endif
1056       if (proc == rank) {
1057         /* Get indices from matA and then from matB */
1058         row    = row - rstart;
1059         nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1060         cworkA =  a_j + a_i[row]; cworkB = b_j + b_i[row];
1061         if (!allcolumns[i]){
1062 #if defined (PETSC_USE_CTABLE)
1063           for (k=0; k<nzA; k++) {
1064             ierr = PetscTableFind(cmap_i,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr);
1065             if (tt) { lens_i[j]++; }
1066           }
1067           for (k=0; k<nzB; k++) {
1068             ierr = PetscTableFind(cmap_i,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr);
1069             if (tt) { lens_i[j]++; }
1070           }
1071 
1072 #else
1073           for (k=0; k<nzA; k++) {
1074             if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; }
1075           }
1076           for (k=0; k<nzB; k++) {
1077             if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; }
1078           }
1079 #endif
1080         } else {/* allcolumns */
1081           lens_i[j] = nzA + nzB;
1082         }
1083       }
1084     }
1085   }
1086 #if defined (PETSC_USE_CTABLE)
1087   /* Create row map*/
1088   ierr = PetscMalloc((1+ismax)*sizeof(PetscTable),&rmap);CHKERRQ(ierr);
1089   for (i=0; i<ismax; i++){
1090     ierr = PetscTableCreate(nrow[i]+1,c->Mbs+1,&rmap[i]);CHKERRQ(ierr);
1091   }
1092 #else
1093   /* Create row map*/
1094   ierr    = PetscMalloc((1+ismax)*sizeof(PetscInt*)+ ismax*Mbs*sizeof(PetscInt),&rmap);CHKERRQ(ierr);
1095   rmap[0] = (PetscInt*)(rmap + ismax);
1096   ierr    = PetscMemzero(rmap[0],ismax*Mbs*sizeof(PetscInt));CHKERRQ(ierr);
1097   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;}
1098 #endif
1099   for (i=0; i<ismax; i++) {
1100     irow_i = irow[i];
1101     jmax   = nrow[i];
1102 #if defined (PETSC_USE_CTABLE)
1103     rmap_i = rmap[i];
1104     for (j=0; j<jmax; j++) {
1105       if (allrows[i]){
1106         ierr = PetscTableAdd(rmap_i,j+1,j+1);CHKERRQ(ierr);
1107       } else {
1108         ierr = PetscTableAdd(rmap_i,irow_i[j]+1,j+1);CHKERRQ(ierr);
1109       }
1110     }
1111 #else
1112     rmap_i = rmap[i];
1113     for (j=0; j<jmax; j++) {
1114       if (allrows[i]){
1115         rmap_i[j] = j;
1116       } else {
1117         rmap_i[irow_i[j]] = j;
1118       }
1119     }
1120 #endif
1121   }
1122 
1123   /* Update lens from offproc data */
1124   {
1125     PetscInt    *rbuf2_i,*rbuf3_i,*sbuf1_i;
1126     PetscMPIInt ii;
1127 
1128     for (tmp2=0; tmp2<nrqs; tmp2++) {
1129       ierr    = MPI_Waitany(nrqs,r_waits3,&ii,r_status3+tmp2);CHKERRQ(ierr);
1130       idex   = pa[ii];
1131       sbuf1_i = sbuf1[idex];
1132       jmax    = sbuf1_i[0];
1133       ct1     = 2*jmax+1;
1134       ct2     = 0;
1135       rbuf2_i = rbuf2[ii];
1136       rbuf3_i = rbuf3[ii];
1137       for (j=1; j<=jmax; j++) {
1138         is_no   = sbuf1_i[2*j-1];
1139         max1    = sbuf1_i[2*j];
1140         lens_i  = lens[is_no];
1141         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1142 	rmap_i = rmap[is_no];
1143         for (k=0; k<max1; k++,ct1++) {
1144 #if defined (PETSC_USE_CTABLE)
1145 	  ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr);
1146           row--;
1147           if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1148 #else
1149           row  = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1150 #endif
1151           max2 = rbuf2_i[ct1];
1152           for (l=0; l<max2; l++,ct2++) {
1153             if (!allcolumns[is_no]){
1154 #if defined (PETSC_USE_CTABLE)
1155               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr);
1156               if (tt) {
1157                 lens_i[row]++;
1158               }
1159 #else
1160               if (cmap_i[rbuf3_i[ct2]]) {
1161                 lens_i[row]++;
1162               }
1163 #endif
1164             } else { /* allcolumns */
1165               lens_i[row]++;
1166             }
1167           }
1168         }
1169       }
1170     }
1171   }
1172   ierr = PetscFree(r_status3);CHKERRQ(ierr);
1173   ierr = PetscFree(r_waits3);CHKERRQ(ierr);
1174   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);}
1175   ierr = PetscFree(s_status3);CHKERRQ(ierr);
1176   ierr = PetscFree(s_waits3);CHKERRQ(ierr);
1177 
1178   /* Create the submatrices */
1179   if (scall == MAT_REUSE_MATRIX) {
1180     if (ijonly) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP," MAT_REUSE_MATRIX and ijonly is not supported yet");
1181     /*
1182         Assumes new rows are same length as the old rows, hence bug!
1183     */
1184     for (i=0; i<ismax; i++) {
1185       mat = (Mat_SeqBAIJ *)(submats[i]->data);
1186       if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || C->rmap->bs != bs)) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
1187       ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(PetscInt),&flag);CHKERRQ(ierr);
1188       if (!flag) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Cannot reuse matrix. wrong no of nonzeros");
1189       /* Initial matrix as if empty */
1190       ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(PetscInt));CHKERRQ(ierr);
1191       submats[i]->factortype = C->factortype;
1192     }
1193   } else {
1194     PetscInt bs_tmp;
1195     if (ijonly){
1196       bs_tmp = 1;
1197     } else {
1198       bs_tmp = bs;
1199     }
1200     for (i=0; i<ismax; i++) {
1201       ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr);
1202       ierr = MatSetSizes(submats[i],nrow[i]*bs_tmp,ncol[i]*bs_tmp,nrow[i]*bs_tmp,ncol[i]*bs_tmp);CHKERRQ(ierr);
1203       ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr);
1204       ierr = MatSeqBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr);
1205       ierr = MatSeqSBAIJSetPreallocation(submats[i],bs_tmp,0,lens[i]);CHKERRQ(ierr); /* this subroutine is used by SBAIJ routines */
1206     }
1207   }
1208 
1209   /* Assemble the matrices */
1210   /* First assemble the local rows */
1211   {
1212     PetscInt  ilen_row,*imat_ilen,*imat_j,*imat_i;
1213     MatScalar *imat_a = PETSC_NULL;
1214 
1215     for (i=0; i<ismax; i++) {
1216       mat       = (Mat_SeqBAIJ*)submats[i]->data;
1217       imat_ilen = mat->ilen;
1218       imat_j    = mat->j;
1219       imat_i    = mat->i;
1220       if (!ijonly) imat_a = mat->a;
1221       if (!allcolumns[i]) cmap_i = cmap[i];
1222       rmap_i = rmap[i];
1223       irow_i    = irow[i];
1224       jmax      = nrow[i];
1225       for (j=0; j<jmax; j++) {
1226         if (allrows[i]){
1227           row = j;
1228         } else {
1229           row      = irow_i[j];
1230         }
1231 #if defined (PETSC_USE_CTABLE)
1232 	ierr = PetscGetProc(row,size,c->rangebs,&proc);CHKERRQ(ierr);
1233 #else
1234 	proc = rtable[row];
1235 #endif
1236         if (proc == rank) {
1237           row      = row - rstart;
1238           nzA      = a_i[row+1] - a_i[row];
1239           nzB      = b_i[row+1] - b_i[row];
1240           cworkA   = a_j + a_i[row];
1241           cworkB   = b_j + b_i[row];
1242           if (!ijonly){
1243             vworkA = a_a + a_i[row]*bs2;
1244             vworkB = b_a + b_i[row]*bs2;
1245           }
1246 #if defined (PETSC_USE_CTABLE)
1247 	  ierr = PetscTableFind(rmap_i,row+rstart+1,&row);CHKERRQ(ierr);
1248           row--;
1249           if (row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1250 #else
1251           row      = rmap_i[row + rstart];
1252 #endif
1253           mat_i    = imat_i[row];
1254           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1255           mat_j    = imat_j + mat_i;
1256           ilen_row = imat_ilen[row];
1257 
1258           /* load the column indices for this row into cols*/
1259           if (!allcolumns[i]){
1260             for (l=0; l<nzB; l++) {
1261               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1262 #if defined (PETSC_USE_CTABLE)
1263                 ierr = PetscTableFind(cmap_i,ctmp+1,&tcol);CHKERRQ(ierr);
1264                 if (tcol) {
1265 #else
1266                 if ((tcol = cmap_i[ctmp])) {
1267 #endif
1268                   *mat_j++ = tcol - 1;
1269                   ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1270                   mat_a   += bs2;
1271                   ilen_row++;
1272                 }
1273               } else break;
1274             }
1275             imark = l;
1276             for (l=0; l<nzA; l++) {
1277 #if defined (PETSC_USE_CTABLE)
1278               ierr = PetscTableFind(cmap_i,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr);
1279               if (tcol) {
1280 #else
1281               if ((tcol = cmap_i[cstart + cworkA[l]])) {
1282 #endif
1283                 *mat_j++ = tcol - 1;
1284                 if (!ijonly){
1285                   ierr   = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1286                   mat_a += bs2;
1287                 }
1288                 ilen_row++;
1289               }
1290             }
1291             for (l=imark; l<nzB; l++) {
1292 #if defined (PETSC_USE_CTABLE)
1293               ierr = PetscTableFind(cmap_i,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr);
1294               if (tcol) {
1295 #else
1296               if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1297 #endif
1298                 *mat_j++ = tcol - 1;
1299                 if (!ijonly){
1300                   ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1301                   mat_a   += bs2;
1302                 }
1303                 ilen_row++;
1304               }
1305             }
1306           } else { /* allcolumns */
1307             for (l=0; l<nzB; l++) {
1308               if ((ctmp = bmap[cworkB[l]]) < cstart) {
1309                 *mat_j++ = ctmp;
1310                 ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1311                 mat_a   += bs2;
1312                 ilen_row++;
1313               } else break;
1314             }
1315             imark = l;
1316             for (l=0; l<nzA; l++) {
1317               *mat_j++ = cstart+cworkA[l];
1318               if (!ijonly){
1319                 ierr   = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1320                 mat_a += bs2;
1321               }
1322               ilen_row++;
1323             }
1324             for (l=imark; l<nzB; l++) {
1325               *mat_j++ = bmap[cworkB[l]];
1326               if (!ijonly){
1327                 ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1328                 mat_a   += bs2;
1329               }
1330               ilen_row++;
1331             }
1332           }
1333           imat_ilen[row] = ilen_row;
1334         }
1335       }
1336     }
1337   }
1338 
1339   /*   Now assemble the off proc rows*/
1340   {
1341     PetscInt    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1342     PetscInt    *imat_j,*imat_i;
1343     MatScalar   *imat_a = PETSC_NULL,*rbuf4_i = PETSC_NULL;
1344     PetscMPIInt ii;
1345 
1346     for (tmp2=0; tmp2<nrqs; tmp2++) {
1347       if (ijonly){
1348         ii = tmp2;
1349       } else {
1350         ierr = MPI_Waitany(nrqs,r_waits4,&ii,r_status4+tmp2);CHKERRQ(ierr);
1351       }
1352       idex   = pa[ii];
1353       sbuf1_i = sbuf1[idex];
1354       jmax    = sbuf1_i[0];
1355       ct1     = 2*jmax + 1;
1356       ct2     = 0;
1357       rbuf2_i = rbuf2[ii];
1358       rbuf3_i = rbuf3[ii];
1359       if (!ijonly) rbuf4_i = rbuf4[ii];
1360       for (j=1; j<=jmax; j++) {
1361         is_no     = sbuf1_i[2*j-1];
1362         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
1363 	rmap_i    = rmap[is_no];
1364         mat       = (Mat_SeqBAIJ*)submats[is_no]->data;
1365         imat_ilen = mat->ilen;
1366         imat_j    = mat->j;
1367         imat_i    = mat->i;
1368         if (!ijonly) imat_a = mat->a;
1369         max1      = sbuf1_i[2*j];
1370         for (k=0; k<max1; k++,ct1++) {
1371           row   = sbuf1_i[ct1];
1372 #if defined (PETSC_USE_CTABLE)
1373 	  ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr);
1374           row--;
1375           if(row < 0) { SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table"); }
1376 #else
1377           row   = rmap_i[row];
1378 #endif
1379           ilen  = imat_ilen[row];
1380           mat_i = imat_i[row];
1381           if (!ijonly) mat_a = imat_a + mat_i*bs2;
1382           mat_j = imat_j + mat_i;
1383           max2 = rbuf2_i[ct1];
1384 
1385           if (!allcolumns[is_no]){
1386             for (l=0; l<max2; l++,ct2++) {
1387 #if defined (PETSC_USE_CTABLE)
1388               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
1389               if (tcol) {
1390 #else
1391               if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1392 #endif
1393                 *mat_j++    = tcol - 1;
1394                 if (!ijonly){
1395                   ierr   = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1396                   mat_a += bs2;
1397                 }
1398                 ilen++;
1399               }
1400             }
1401           } else { /* allcolumns */
1402             for (l=0; l<max2; l++,ct2++) {
1403               *mat_j++    = rbuf3_i[ct2];
1404               if (!ijonly){
1405                 ierr   = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1406                 mat_a += bs2;
1407               }
1408               ilen++;
1409             }
1410           }
1411           imat_ilen[row] = ilen;
1412         }
1413       }
1414     }
1415   }
1416   if (!ijonly){
1417     ierr = PetscFree(r_status4);CHKERRQ(ierr);
1418     ierr = PetscFree(r_waits4);CHKERRQ(ierr);
1419     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);}
1420     ierr = PetscFree(s_waits4);CHKERRQ(ierr);
1421     ierr = PetscFree(s_status4);CHKERRQ(ierr);
1422   }
1423 
1424   /* Restore the indices */
1425   for (i=0; i<ismax; i++) {
1426     if (!allrows[i]){
1427       ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
1428     }
1429     if (!allcolumns[i]){
1430       ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
1431     }
1432   }
1433 
1434   /* Destroy allocated memory */
1435 #if defined(PETSC_USE_CTABLE)
1436   ierr = PetscFree4(irow,icol,nrow,ncol);CHKERRQ(ierr);
1437 #else
1438   ierr = PetscFree5(irow,icol,nrow,ncol,rtable);CHKERRQ(ierr);
1439 #endif
1440   ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
1441   ierr = PetscFree(pa);CHKERRQ(ierr);
1442 
1443   ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr);
1444   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
1445   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
1446   for (i=0; i<nrqr; ++i) {
1447     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
1448   }
1449   for (i=0; i<nrqs; ++i) {
1450     ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr);
1451   }
1452   ierr = PetscFree3(sbuf2,req_size,req_source);CHKERRQ(ierr);
1453   ierr = PetscFree(rbuf3);CHKERRQ(ierr);
1454   ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr);
1455   ierr = PetscFree(sbuf_aj);CHKERRQ(ierr);
1456   if (!ijonly) {
1457     for (i=0; i<nrqs; ++i) {ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);}
1458     ierr = PetscFree(rbuf4);CHKERRQ(ierr);
1459     ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr);
1460     ierr = PetscFree(sbuf_aa);CHKERRQ(ierr);
1461   }
1462 
1463 #if defined (PETSC_USE_CTABLE)
1464   for (i=0; i<ismax; i++) {
1465     ierr = PetscTableDestroy((PetscTable*)&rmap[i]);CHKERRQ(ierr);
1466   }
1467 #endif
1468   ierr = PetscFree(rmap);CHKERRQ(ierr);
1469 
1470   for (i=0; i<ismax; i++){
1471     if (!allcolumns[i]){
1472 #if defined (PETSC_USE_CTABLE)
1473       ierr = PetscTableDestroy((PetscTable*)&cmap[i]);CHKERRQ(ierr);
1474 #else
1475       ierr = PetscFree(cmap[i]);CHKERRQ(ierr);
1476 #endif
1477     }
1478   }
1479   ierr = PetscFree(cmap);CHKERRQ(ierr);
1480   ierr = PetscFree(lens);CHKERRQ(ierr);
1481 
1482   for (i=0; i<ismax; i++) {
1483     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1484     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1485   }
1486 
1487   c->ijonly = PETSC_FALSE; /* set back to the default */
1488   PetscFunctionReturn(0);
1489 }
1490 
1491