xref: /petsc/src/mat/impls/baij/mpi/baijov.c (revision a58ccc280d486ecd8cd7e51bcabfb966bbde3bc0)
1 /*$Id: baijov.c,v 1.53 2000/09/18 14:20:35 bsmith Exp bsmith $*/
2 
3 /*
4    Routines to compute overlapping regions of a parallel MPI matrix
5   and to find submatrices that were shared across processors.
6 */
7 #include "src/mat/impls/baij/mpi/mpibaij.h"
8 #include "petscbt.h"
9 
10 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat,int,IS *);
11 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat,int,char **,int*,int**);
12 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat,int,int **,int**,int*);
13 EXTERN int MatGetRow_MPIBAIJ(Mat,int,int*,int**,Scalar**);
14 EXTERN int MatRestoreRow_MPIBAIJ(Mat,int,int*,int**,Scalar**);
15 
16 #undef __FUNC__
17 #define __FUNC__ /*<a name=""></a>*/"MatCompressIndicesGeneral_MPIBAIJ"
18 static int MatCompressIndicesGeneral_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
19 {
20   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ*)C->data;
21   int          ierr,isz,bs = baij->bs,n,i,j,*idx,ival;
22 #if defined (PETSC_USE_CTABLE)
23   PetscTable   gid1_lid1;
24   int tt, gid1, *nidx;
25   PetscTablePosition tpos;
26 #else
27   int          Nbs,*nidx;
28   PetscBT      table;
29 #endif
30 
31   PetscFunctionBegin;
32 #if defined (PETSC_USE_CTABLE)
33   ierr = PetscTableCreate(baij->mbs,&gid1_lid1);CHKERRQ(ierr);
34 #else
35   Nbs   = baij->Nbs;
36   nidx  = (int*)PetscMalloc((Nbs+1)*sizeof(int));CHKPTRQ(nidx);
37   ierr  = PetscBTCreate(Nbs,table);CHKERRQ(ierr);
38 #endif
39   for (i=0; i<imax; i++) {
40     isz  = 0;
41 #if defined (PETSC_USE_CTABLE)
42     ierr = PetscTableRemoveAll(gid1_lid1);CHKERRQ(ierr);
43 #else
44     ierr = PetscBTMemzero(Nbs,table);CHKERRQ(ierr);
45 #endif
46     ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr);
47     ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr);
48     for (j=0; j<n ; j++) {
49       ival = idx[j]/bs; /* convert the indices into block indices */
50 #if defined (PETSC_USE_CTABLE)
51       ierr = PetscTableFind(gid1_lid1,ival+1,&tt);CHKERRQ(ierr);
52       if (!tt) {
53 	ierr = PetscTableAdd(gid1_lid1,ival+1,isz+1);CHKERRQ(ierr);
54         isz++;
55       }
56 #else
57       if (ival>Nbs) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,0,"index greater than mat-dim");
58       if(!PetscBTLookupSet(table,ival)) { nidx[isz++] = ival;}
59 #endif
60     }
61     ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr);
62 #if defined (PETSC_USE_CTABLE)
63     nidx = (int*)PetscMalloc((isz+1)*sizeof(int));CHKPTRQ(nidx);
64     ierr = PetscTableGetHeadPosition(gid1_lid1,&tpos);CHKERRQ(ierr);
65     j = 0;
66     while (tpos) {
67       ierr = PetscTableGetNext(gid1_lid1,&tpos,&gid1,&tt);CHKERRQ(ierr);
68       if (tt-- > isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,0,"index greater than array-dim"); }
69       nidx[tt] = gid1 - 1;
70       j++;
71     }
72     if (j != isz) { SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,0,"table error: jj != isz"); }
73     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));CHKERRQ(ierr);
74     ierr = PetscFree(nidx);CHKERRQ(ierr);
75 #else
76     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz,nidx,(is_out+i));CHKERRQ(ierr);
77 #endif
78   }
79 #if defined (PETSC_USE_CTABLE)
80   ierr = PetscTableDelete(gid1_lid1);CHKERRQ(ierr);
81 #else
82   ierr = PetscBTDestroy(table);CHKERRQ(ierr);
83   ierr = PetscFree(nidx);CHKERRQ(ierr);
84 #endif
85   PetscFunctionReturn(0);
86 }
87 
88 #undef __FUNC__
89 #define __FUNC__ /*<a name=""></a>*/"MatCompressIndicesSorted_MPIBAIJ"
90 static int MatCompressIndicesSorted_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
91 {
92   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ*)C->data;
93   int          ierr,bs=baij->bs,i,j,k,val,n,*idx,*nidx,*idx_local;
94   PetscTruth   flg;
95 #if defined (PETSC_USE_CTABLE)
96   int maxsz;
97 #else
98   int Nbs=baij->Nbs;
99 #endif
100   PetscFunctionBegin;
101   for (i=0; i<imax; i++) {
102     ierr = ISSorted(is_in[i],&flg);CHKERRQ(ierr);
103     if (!flg) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"Indices are not sorted");
104   }
105 #if defined (PETSC_USE_CTABLE)
106   /* Now check max size */
107   for (i=0,maxsz=0; i<imax; i++) {
108     ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr);
109     ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr);
110     if (n%bs !=0) SETERRA(1,0,"Indices are not block ordered");
111     n = n/bs; /* The reduced index size */
112     if (n > maxsz) maxsz = n;
113   }
114   nidx  = (int*)PetscMalloc((maxsz+1)*sizeof(int));CHKPTRQ(nidx);
115 #else
116   nidx  = (int*)PetscMalloc((Nbs+1)*sizeof(int));CHKPTRQ(nidx);
117 #endif
118   /* Now check if the indices are in block order */
119   for (i=0; i<imax; i++) {
120     ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr);
121     ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr);
122     if (n%bs !=0) SETERRA(1,0,"Indices are not block ordered");
123 
124     n = n/bs; /* The reduced index size */
125     idx_local = idx;
126     for (j=0; j<n ; j++) {
127       val = idx_local[0];
128       if (val%bs != 0) SETERRA(1,0,"Indices are not block ordered");
129       for (k=0; k<bs; k++) {
130         if (val+k != idx_local[k]) SETERRA(1,0,"Indices are not block ordered");
131       }
132       nidx[j] = val/bs;
133       idx_local +=bs;
134     }
135     ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr);
136     ierr = ISCreateGeneral(PETSC_COMM_SELF,n,nidx,(is_out+i));CHKERRQ(ierr);
137   }
138   ierr = PetscFree(nidx);CHKERRQ(ierr);
139 
140   PetscFunctionReturn(0);
141 }
142 
143 #undef __FUNC__
144 #define __FUNC__ /*<a name=""></a>*/"MatExpandIndices_MPIBAIJ"
145 static int MatExpandIndices_MPIBAIJ(Mat C,int imax,IS *is_in,IS *is_out)
146 {
147   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ*)C->data;
148   int          ierr,bs = baij->bs,n,i,j,k,*idx,*nidx;
149 #if defined (PETSC_USE_CTABLE)
150   int maxsz;
151 #else
152   int Nbs = baij->Nbs;
153 #endif
154   PetscFunctionBegin;
155 
156 #if defined (PETSC_USE_CTABLE)
157   /* Now check max size */
158   for (i=0,maxsz=0; i<imax; i++) {
159     ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr);
160     ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr);
161     if (n*bs > maxsz) maxsz = n*bs;
162   }
163   nidx  = (int*)PetscMalloc((maxsz+1)*sizeof(int));CHKPTRQ(nidx);
164 #else
165   nidx  = (int*)PetscMalloc((Nbs*bs+1)*sizeof(int));CHKPTRQ(nidx);
166 #endif
167 
168   for (i=0; i<imax; i++) {
169     ierr = ISGetIndices(is_in[i],&idx);CHKERRQ(ierr);
170     ierr = ISGetLocalSize(is_in[i],&n);CHKERRQ(ierr);
171     for (j=0; j<n ; ++j){
172       for (k=0; k<bs; k++)
173         nidx[j*bs+k] = idx[j]*bs+k;
174     }
175     ierr = ISRestoreIndices(is_in[i],&idx);CHKERRQ(ierr);
176     ierr = ISCreateGeneral(PETSC_COMM_SELF,n*bs,nidx,is_out+i);CHKERRQ(ierr);
177   }
178   ierr = PetscFree(nidx);CHKERRQ(ierr);
179   PetscFunctionReturn(0);
180 }
181 
182 
183 #undef __FUNC__
184 #define __FUNC__ /*<a name=""></a>*/"MatIncreaseOverlap_MPIBAIJ"
185 int MatIncreaseOverlap_MPIBAIJ(Mat C,int imax,IS *is,int ov)
186 {
187   int i,ierr;
188   IS  *is_new;
189 
190   PetscFunctionBegin;
191   is_new = (IS *)PetscMalloc(imax*sizeof(IS));CHKPTRQ(is_new);
192   /* Convert the indices into block format */
193   ierr = MatCompressIndicesGeneral_MPIBAIJ(C,imax,is,is_new);CHKERRQ(ierr);
194   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,0,"Negative overlap specified\n");}
195   for (i=0; i<ov; ++i) {
196     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C,imax,is_new);CHKERRQ(ierr);
197   }
198   for (i=0; i<imax; i++) {ierr = ISDestroy(is[i]);CHKERRQ(ierr);}
199   ierr = MatExpandIndices_MPIBAIJ(C,imax,is_new,is);CHKERRQ(ierr);
200   for (i=0; i<imax; i++) {ierr = ISDestroy(is_new[i]);CHKERRQ(ierr);}
201   ierr = PetscFree(is_new);CHKERRQ(ierr);
202   PetscFunctionReturn(0);
203 }
204 
205 /*
206   Sample message format:
207   If a processor A wants processor B to process some elements corresponding
208   to index sets 1s[1], is[5]
209   mesg [0] = 2   (no of index sets in the mesg)
210   -----------
211   mesg [1] = 1 => is[1]
212   mesg [2] = sizeof(is[1]);
213   -----------
214   mesg [5] = 5  => is[5]
215   mesg [6] = sizeof(is[5]);
216   -----------
217   mesg [7]
218   mesg [n]  datas[1]
219   -----------
220   mesg[n+1]
221   mesg[m]  data(is[5])
222   -----------
223 
224   Notes:
225   nrqs - no of requests sent (or to be sent out)
226   nrqr - no of requests recieved (which have to be or which have been processed
227 */
228 #undef __FUNC__
229 #define __FUNC__ /*<a name=""></a>*/"MatIncreaseOverlap_MPIBAIJ_Once"
230 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat C,int imax,IS *is)
231 {
232   Mat_MPIBAIJ  *c = (Mat_MPIBAIJ*)C->data;
233   int         **idx,*n,*w1,*w2,*w3,*w4,*rtable,**data,len,*idx_i;
234   int         size,rank,Mbs,i,j,k,ierr,**rbuf,row,proc,nrqs,msz,**outdat,**ptr;
235   int         *ctr,*pa,tag,*tmp,bsz,nrqr,*isz,*isz1,**xdata,bsz1,**rbuf2;
236   PetscBT     *table;
237   MPI_Comm    comm;
238   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2;
239   MPI_Status  *s_status,*recv_status;
240 
241   PetscFunctionBegin;
242   comm   = C->comm;
243   tag    = C->tag;
244   size   = c->size;
245   rank   = c->rank;
246   Mbs    = c->Mbs;
247 
248   len    = (imax+1)*sizeof(int*)+ (imax + Mbs)*sizeof(int);
249   idx    = (int**)PetscMalloc(len);CHKPTRQ(idx);
250   n      = (int*)(idx + imax);
251   rtable = n + imax;
252 
253   for (i=0; i<imax; i++) {
254     ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr);
255     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
256   }
257 
258   /* Create hash table for the mapping :row -> proc*/
259   for (i=0,j=0; i<size; i++) {
260     len = c->rowners[i+1];
261     for (; j<len; j++) {
262       rtable[j] = i;
263     }
264   }
265 
266   /* evaluate communication - mesg to who,length of mesg, and buffer space
267      required. Based on this, buffers are allocated, and data copied into them*/
268   w1   = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1);/*  mesg size */
269   w2   = w1 + size;       /* if w2[i] marked, then a message to proc i*/
270   w3   = w2 + size;       /* no of IS that needs to be sent to proc i */
271   w4   = w3 + size;       /* temp work space used in determining w1, w2, w3 */
272   ierr = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/
273   for (i=0; i<imax; i++) {
274     ierr  = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/
275     idx_i = idx[i];
276     len   = n[i];
277     for (j=0; j<len; j++) {
278       row  = idx_i[j];
279       if (row < 0) {
280         SETERRQ(1,1,"Index set cannot have negative entries");
281       }
282       proc = rtable[row];
283       w4[proc]++;
284     }
285     for (j=0; j<size; j++){
286       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
287     }
288   }
289 
290   nrqs     = 0;              /* no of outgoing messages */
291   msz      = 0;              /* total mesg length (for all proc */
292   w1[rank] = 0;              /* no mesg sent to intself */
293   w3[rank] = 0;
294   for (i=0; i<size; i++) {
295     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
296   }
297   /* pa - is list of processors to communicate with */
298   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa);
299   for (i=0,j=0; i<size; i++) {
300     if (w1[i]) {pa[j] = i; j++;}
301   }
302 
303   /* Each message would have a header = 1 + 2*(no of IS) + data */
304   for (i=0; i<nrqs; i++) {
305     j      = pa[i];
306     w1[j] += w2[j] + 2*w3[j];
307     msz   += w1[j];
308   }
309 
310 
311   /* Do a global reduction to determine how many messages to expect*/
312   {
313     int *rw1;
314     rw1   = (int*)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1);
315     ierr  = MPI_Allreduce(w1,rw1,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
316     bsz   = rw1[rank];
317     nrqr  = rw1[size+rank];
318     ierr  = PetscFree(rw1);CHKERRQ(ierr);
319   }
320 
321   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
322   len     = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
323   rbuf    = (int**)PetscMalloc(len);CHKPTRQ(rbuf);
324   rbuf[0] = (int*)(rbuf + nrqr);
325   for (i=1; i<nrqr; ++i) rbuf[i] = rbuf[i-1] + bsz;
326 
327   /* Post the receives */
328   r_waits1 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1);
329   for (i=0; i<nrqr; ++i) {
330     ierr = MPI_Irecv(rbuf[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits1+i);CHKERRQ(ierr);
331   }
332 
333   /* Allocate Memory for outgoing messages */
334   len    = 2*size*sizeof(int*) + (size+msz)*sizeof(int);
335   outdat = (int **)PetscMalloc(len);CHKPTRQ(outdat);
336   ptr    = outdat + size;     /* Pointers to the data in outgoing buffers */
337   ierr   = PetscMemzero(outdat,2*size*sizeof(int*));CHKERRQ(ierr);
338   tmp    = (int*)(outdat + 2*size);
339   ctr    = tmp + msz;
340 
341   {
342     int *iptr = tmp,ict  = 0;
343     for (i=0; i<nrqs; i++) {
344       j         = pa[i];
345       iptr     +=  ict;
346       outdat[j] = iptr;
347       ict       = w1[j];
348     }
349   }
350 
351   /* Form the outgoing messages */
352   /*plug in the headers*/
353   for (i=0; i<nrqs; i++) {
354     j            = pa[i];
355     outdat[j][0] = 0;
356     ierr = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr);
357     ptr[j]       = outdat[j] + 2*w3[j] + 1;
358   }
359 
360   /* Memory for doing local proc's work*/
361   {
362     int  *d_p;
363     char *t_p;
364 
365     len   = (imax)*(sizeof(PetscBT) + sizeof(int*)+ sizeof(int)) +
366       (Mbs)*imax*sizeof(int)  + (Mbs/BITSPERBYTE+1)*imax*sizeof(char) + 1;
367     table = (PetscBT *)PetscMalloc(len);CHKPTRQ(table);
368     ierr  = PetscMemzero(table,len);CHKERRQ(ierr);
369     data  = (int **)(table + imax);
370     isz   = (int  *)(data  + imax);
371     d_p   = (int  *)(isz   + imax);
372     t_p   = (char *)(d_p   + Mbs*imax);
373     for (i=0; i<imax; i++) {
374       table[i] = t_p + (Mbs/BITSPERBYTE+1)*i;
375       data[i]  = d_p + (Mbs)*i;
376     }
377   }
378 
379   /* Parse the IS and update local tables and the outgoing buf with the data*/
380   {
381     int     n_i,*data_i,isz_i,*outdat_j,ctr_j;
382     PetscBT table_i;
383 
384     for (i=0; i<imax; i++) {
385       ierr    = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr);
386       n_i     = n[i];
387       table_i = table[i];
388       idx_i   = idx[i];
389       data_i  = data[i];
390       isz_i   = isz[i];
391       for (j=0;  j<n_i; j++) {  /* parse the indices of each IS */
392         row  = idx_i[j];
393         proc = rtable[row];
394         if (proc != rank) { /* copy to the outgoing buffer */
395           ctr[proc]++;
396           *ptr[proc] = row;
397           ptr[proc]++;
398         }
399         else { /* Update the local table */
400           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
401         }
402       }
403       /* Update the headers for the current IS */
404       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
405         if ((ctr_j = ctr[j])) {
406           outdat_j        = outdat[j];
407           k               = ++outdat_j[0];
408           outdat_j[2*k]   = ctr_j;
409           outdat_j[2*k-1] = i;
410         }
411       }
412       isz[i] = isz_i;
413     }
414   }
415 
416   /*  Now  post the sends */
417   s_waits1 = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1);
418   for (i=0; i<nrqs; ++i) {
419     j    = pa[i];
420     ierr = MPI_Isend(outdat[j],w1[j],MPI_INT,j,tag,comm,s_waits1+i);CHKERRQ(ierr);
421   }
422 
423   /* No longer need the original indices*/
424   for (i=0; i<imax; ++i) {
425     ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr);
426   }
427   ierr = PetscFree(idx);CHKERRQ(ierr);
428 
429   for (i=0; i<imax; ++i) {
430     ierr = ISDestroy(is[i]);CHKERRQ(ierr);
431   }
432 
433   /* Do Local work*/
434   ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr);
435 
436   /* Receive messages*/
437   {
438     int        index;
439 
440     recv_status = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(recv_status);
441     for (i=0; i<nrqr; ++i) {
442       ierr = MPI_Waitany(nrqr,r_waits1,&index,recv_status+i);CHKERRQ(ierr);
443     }
444 
445     s_status = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status);
446     ierr     = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);
447   }
448 
449   /* Phase 1 sends are complete - deallocate buffers */
450   ierr = PetscFree(outdat);CHKERRQ(ierr);
451   ierr = PetscFree(w1);CHKERRQ(ierr);
452 
453   xdata = (int **)PetscMalloc((nrqr+1)*sizeof(int *));CHKPTRQ(xdata);
454   isz1  = (int *)PetscMalloc((nrqr+1)*sizeof(int));CHKPTRQ(isz1);
455   ierr  = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr);
456   ierr  = PetscFree(rbuf);CHKERRQ(ierr);
457 
458   /* Send the data back*/
459   /* Do a global reduction to know the buffer space req for incoming messages*/
460   {
461     int *rw1,*rw2;
462 
463     rw1  = (int *)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1);
464     ierr = PetscMemzero(rw1,2*size*sizeof(int));CHKERRQ(ierr);
465     rw2  = rw1+size;
466     for (i=0; i<nrqr; ++i) {
467       proc      = recv_status[i].MPI_SOURCE;
468       rw1[proc] = isz1[i];
469     }
470 
471     ierr   = MPI_Allreduce(rw1,rw2,size,MPI_INT,MPI_MAX,comm);CHKERRQ(ierr);
472     bsz1   = rw2[rank];
473     ierr = PetscFree(rw1);CHKERRQ(ierr);
474   }
475 
476   /* Allocate buffers*/
477 
478   /* Allocate memory for recv buffers. Prob none if nrqr = 0 ???? */
479   len      = (nrqs+1)*sizeof(int*) + nrqs*bsz1*sizeof(int);
480   rbuf2    = (int**)PetscMalloc(len);CHKPTRQ(rbuf2);
481   rbuf2[0] = (int*)(rbuf2 + nrqs);
482   for (i=1; i<nrqs; ++i) rbuf2[i] = rbuf2[i-1] + bsz1;
483 
484   /* Post the receives */
485   r_waits2 = (MPI_Request *)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2);
486   for (i=0; i<nrqs; ++i) {
487     ierr = MPI_Irecv(rbuf2[i],bsz1,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits2+i);CHKERRQ(ierr);
488   }
489 
490   /*  Now  post the sends */
491   s_waits2 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2);
492   for (i=0; i<nrqr; ++i) {
493     j    = recv_status[i].MPI_SOURCE;
494     ierr = MPI_Isend(xdata[i],isz1[i],MPI_INT,j,tag,comm,s_waits2+i);CHKERRQ(ierr);
495   }
496 
497   /* receive work done on other processors*/
498   {
499     int         index,is_no,ct1,max,*rbuf2_i,isz_i,*data_i,jmax;
500     PetscBT     table_i;
501     MPI_Status  *status2;
502 
503     status2 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(status2);
504 
505     for (i=0; i<nrqs; ++i) {
506       ierr = MPI_Waitany(nrqs,r_waits2,&index,status2+i);CHKERRQ(ierr);
507       /* Process the message*/
508       rbuf2_i = rbuf2[index];
509       ct1     = 2*rbuf2_i[0]+1;
510       jmax    = rbuf2[index][0];
511       for (j=1; j<=jmax; j++) {
512         max     = rbuf2_i[2*j];
513         is_no   = rbuf2_i[2*j-1];
514         isz_i   = isz[is_no];
515         data_i  = data[is_no];
516         table_i = table[is_no];
517         for (k=0; k<max; k++,ct1++) {
518           row = rbuf2_i[ct1];
519           if (!PetscBTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
520         }
521         isz[is_no] = isz_i;
522       }
523     }
524     ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);
525     ierr = PetscFree(status2);CHKERRQ(ierr);
526   }
527 
528   for (i=0; i<imax; ++i) {
529     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],is+i);CHKERRQ(ierr);
530   }
531 
532   ierr = PetscFree(pa);CHKERRQ(ierr);
533   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
534   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
535   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
536   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
537   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
538   ierr = PetscFree(table);CHKERRQ(ierr);
539   ierr = PetscFree(s_status);CHKERRQ(ierr);
540   ierr = PetscFree(recv_status);CHKERRQ(ierr);
541   ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
542   ierr = PetscFree(xdata);CHKERRQ(ierr);
543   ierr = PetscFree(isz1);CHKERRQ(ierr);
544   PetscFunctionReturn(0);
545 }
546 
547 #undef __FUNC__
548 #define __FUNC__ /*<a name=""></a>*/"MatIncreaseOverlap_MPIBAIJ_Local"
549 /*
550    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
551        the work on the local processor.
552 
553      Inputs:
554       C      - MAT_MPIBAIJ;
555       imax - total no of index sets processed at a time;
556       table  - an array of char - size = Mbs bits.
557 
558      Output:
559       isz    - array containing the count of the solution elements correspondign
560                to each index set;
561       data   - pointer to the solutions
562 */
563 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat C,int imax,PetscBT *table,int *isz,int **data)
564 {
565   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
566   Mat         A = c->A,B = c->B;
567   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
568   int         start,end,val,max,rstart,cstart,*ai,*aj;
569   int         *bi,*bj,*garray,i,j,k,row,*data_i,isz_i;
570   PetscBT     table_i;
571 
572   PetscFunctionBegin;
573   rstart = c->rstart;
574   cstart = c->cstart;
575   ai     = a->i;
576   aj     = a->j;
577   bi     = b->i;
578   bj     = b->j;
579   garray = c->garray;
580 
581 
582   for (i=0; i<imax; i++) {
583     data_i  = data[i];
584     table_i = table[i];
585     isz_i   = isz[i];
586     for (j=0,max=isz[i]; j<max; j++) {
587       row   = data_i[j] - rstart;
588       start = ai[row];
589       end   = ai[row+1];
590       for (k=start; k<end; k++) { /* Amat */
591         val = aj[k] + cstart;
592         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
593       }
594       start = bi[row];
595       end   = bi[row+1];
596       for (k=start; k<end; k++) { /* Bmat */
597         val = garray[bj[k]];
598         if (!PetscBTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
599       }
600     }
601     isz[i] = isz_i;
602   }
603   PetscFunctionReturn(0);
604 }
605 #undef __FUNC__
606 #define __FUNC__ /*<a name=""></a>*/"MatIncreaseOverlap_MPIBAIJ_Receive"
607 /*
608       MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
609          and return the output
610 
611          Input:
612            C    - the matrix
613            nrqr - no of messages being processed.
614            rbuf - an array of pointers to the recieved requests
615 
616          Output:
617            xdata - array of messages to be sent back
618            isz1  - size of each message
619 
620   For better efficiency perhaps we should malloc seperately each xdata[i],
621 then if a remalloc is required we need only copy the data for that one row
622 rather then all previous rows as it is now where a single large chunck of
623 memory is used.
624 
625 */
626 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,int nrqr,int **rbuf,int **xdata,int * isz1)
627 {
628   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
629   Mat         A = c->A,B = c->B;
630   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
631   int         rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
632   int         row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
633   int         val,max1,max2,rank,Mbs,no_malloc =0,*tmp,new_estimate,ctr;
634   int         *rbuf_i,kmax,rbuf_0,ierr;
635   PetscBT     xtable;
636 
637   PetscFunctionBegin;
638   rank   = c->rank;
639   Mbs    = c->Mbs;
640   rstart = c->rstart;
641   cstart = c->cstart;
642   ai     = a->i;
643   aj     = a->j;
644   bi     = b->i;
645   bj     = b->j;
646   garray = c->garray;
647 
648 
649   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
650     rbuf_i  =  rbuf[i];
651     rbuf_0  =  rbuf_i[0];
652     ct     += rbuf_0;
653     for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; }
654   }
655 
656   if (c->Mbs) max1 = ct*(a->nz +b->nz)/c->Mbs;
657   else        max1 = 1;
658   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
659   xdata[0]     = (int*)PetscMalloc(mem_estimate*sizeof(int));CHKPTRQ(xdata[0]);
660   ++no_malloc;
661   ierr         = PetscBTCreate(Mbs,xtable);CHKERRQ(ierr);
662   ierr         = PetscMemzero(isz1,nrqr*sizeof(int));CHKERRQ(ierr);
663 
664   ct3 = 0;
665   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
666     rbuf_i =  rbuf[i];
667     rbuf_0 =  rbuf_i[0];
668     ct1    =  2*rbuf_0+1;
669     ct2    =  ct1;
670     ct3    += ct1;
671     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
672       ierr = PetscBTMemzero(Mbs,xtable);CHKERRQ(ierr);
673       oct2 = ct2;
674       kmax = rbuf_i[2*j];
675       for (k=0; k<kmax; k++,ct1++) {
676         row = rbuf_i[ct1];
677         if (!PetscBTLookupSet(xtable,row)) {
678           if (!(ct3 < mem_estimate)) {
679             new_estimate = (int)(1.5*mem_estimate)+1;
680             tmp          = (int*)PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
681             ierr = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr);
682             ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
683             xdata[0]     = tmp;
684             mem_estimate = new_estimate; ++no_malloc;
685             for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
686           }
687           xdata[i][ct2++] = row;
688           ct3++;
689         }
690       }
691       for (k=oct2,max2=ct2; k<max2; k++)  {
692         row   = xdata[i][k] - rstart;
693         start = ai[row];
694         end   = ai[row+1];
695         for (l=start; l<end; l++) {
696           val = aj[l] + cstart;
697           if (!PetscBTLookupSet(xtable,val)) {
698             if (!(ct3 < mem_estimate)) {
699               new_estimate = (int)(1.5*mem_estimate)+1;
700               tmp          = (int*)PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
701               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr);
702               ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
703               xdata[0]     = tmp;
704               mem_estimate = new_estimate; ++no_malloc;
705               for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
706             }
707             xdata[i][ct2++] = val;
708             ct3++;
709           }
710         }
711         start = bi[row];
712         end   = bi[row+1];
713         for (l=start; l<end; l++) {
714           val = garray[bj[l]];
715           if (!PetscBTLookupSet(xtable,val)) {
716             if (!(ct3 < mem_estimate)) {
717               new_estimate = (int)(1.5*mem_estimate)+1;
718               tmp          = (int*)PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
719               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));CHKERRQ(ierr);
720               ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
721               xdata[0]     = tmp;
722               mem_estimate = new_estimate; ++no_malloc;
723               for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
724             }
725             xdata[i][ct2++] = val;
726             ct3++;
727           }
728         }
729       }
730       /* Update the header*/
731       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
732       xdata[i][2*j-1] = rbuf_i[2*j-1];
733     }
734     xdata[i][0] = rbuf_0;
735     xdata[i+1]  = xdata[i] + ct2;
736     isz1[i]     = ct2; /* size of each message */
737   }
738   ierr = PetscBTDestroy(xtable);CHKERRQ(ierr);
739   PLogInfo(0,"MatIncreaseOverlap_MPIBAIJ:[%d] Allocated %d bytes, required %d, no of mallocs = %d\n",rank,mem_estimate,ct3,no_malloc);
740   PetscFunctionReturn(0);
741 }
742 
743 static int MatGetSubMatrices_MPIBAIJ_local(Mat,int,IS *,IS *,MatReuse,Mat *);
744 
745 #undef __FUNC__
746 #define __FUNC__ /*<a name=""></a>*/"MatGetSubMatrices_MPIBAIJ"
747 int MatGetSubMatrices_MPIBAIJ(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat **submat)
748 {
749   IS          *isrow_new,*iscol_new;
750   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
751   int         nmax,nstages_local,nstages,i,pos,max_no,ierr;
752 
753   PetscFunctionBegin;
754   /* The compression and expansion should be avoided. Does'nt point
755      out errors might change the indices hence buggey */
756 
757   isrow_new = (IS *)PetscMalloc(2*(ismax+1)*sizeof(IS));CHKPTRQ(isrow_new);
758   iscol_new = isrow_new + ismax;
759   ierr = MatCompressIndicesSorted_MPIBAIJ(C,ismax,isrow,isrow_new);CHKERRQ(ierr);
760   ierr = MatCompressIndicesSorted_MPIBAIJ(C,ismax,iscol,iscol_new);CHKERRQ(ierr);
761 
762   /* Allocate memory to hold all the submatrices */
763   if (scall != MAT_REUSE_MATRIX) {
764     *submat = (Mat *)PetscMalloc((ismax+1)*sizeof(Mat));CHKPTRQ(*submat);
765   }
766   /* Determine the number of stages through which submatrices are done */
767   nmax          = 20*1000000 / (c->Nbs * sizeof(int));
768   if (!nmax) nmax = 1;
769   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
770 
771   /* Make sure every porcessor loops through the nstages */
772   ierr = MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);CHKERRQ(ierr);
773   for (i=0,pos=0; i<nstages; i++) {
774     if (pos+nmax <= ismax) max_no = nmax;
775     else if (pos == ismax) max_no = 0;
776     else                   max_no = ismax-pos;
777     ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos);CHKERRQ(ierr);
778     pos += max_no;
779   }
780 
781   for (i=0; i<ismax; i++) {
782     ierr = ISDestroy(isrow_new[i]);CHKERRQ(ierr);
783     ierr = ISDestroy(iscol_new[i]);CHKERRQ(ierr);
784   }
785   ierr = PetscFree(isrow_new);CHKERRQ(ierr);
786   PetscFunctionReturn(0);
787 }
788 
789 #if defined (PETSC_USE_CTABLE)
790 #undef __FUNC__
791 #define __FUNC__ "PetscGetProc"
792 int PetscGetProc(const int gid, const int numprocs, const int proc_gnode[], int *proc)
793 {
794   int nGlobalNd = proc_gnode[numprocs];
795   int fproc = (int) ((float)gid * (float)numprocs / (float)nGlobalNd + 0.5);
796 
797   PetscFunctionBegin;
798   /* if(fproc < 0) SETERRQ(1,1,"fproc < 0");*/
799   if (fproc > numprocs) fproc = numprocs;
800   while (gid < proc_gnode[fproc] || gid >= proc_gnode[fproc+1]) {
801     if (gid < proc_gnode[fproc]) fproc--;
802     else                         fproc++;
803   }
804   /* if(fproc<0 || fproc>=numprocs) { SETERRQ(1,1,"fproc < 0 || fproc >= numprocs"); }*/
805   *proc = fproc;
806   PetscFunctionReturn(0);
807 }
808 #endif
809 
810 /* -------------------------------------------------------------------------*/
811 #undef __FUNC__
812 #define __FUNC__ /*<a name=""></a>*/"MatGetSubMatrices_MPIBAIJ_local"
813 static int MatGetSubMatrices_MPIBAIJ_local(Mat C,int ismax,IS *isrow,IS *iscol,MatReuse scall,Mat *submats)
814 {
815   Mat_MPIBAIJ *c = (Mat_MPIBAIJ*)C->data;
816   Mat         A = c->A;
817   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)c->B->data,*mat;
818   int         **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,start,end,size;
819   int         **sbuf1,**sbuf2,rank,i,j,k,l,ct1,ct2,ierr,**rbuf1,row,proc;
820   int         nrqs,msz,**ptr,index,*req_size,*ctr,*pa,*tmp,tcol,bsz,nrqr;
821   int         **rbuf3,*req_source,**sbuf_aj,**rbuf2,max1,max2;
822   int         **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i;
823   int         len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
824   int         bs=c->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA,*cworkB;
825   int         cstart = c->cstart,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
826   int         *bmap = c->garray,ctmp,rstart=c->rstart,tag0,tag1,tag2,tag3;
827   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
828   MPI_Request *r_waits4,*s_waits3,*s_waits4;
829   MPI_Status  *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
830   MPI_Status  *r_status3,*r_status4,*s_status4;
831   MPI_Comm    comm;
832   MatScalar   **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i,*vworkA,*vworkB;
833   MatScalar   *a_a=a->a,*b_a=b->a;
834   PetscTruth  flag;
835   int *rw1;
836 #if defined (PETSC_USE_CTABLE)
837   int tt;
838   PetscTable  *rowmaps,*colmaps,lrow1_grow1,lcol1_gcol1;
839 #else
840   int         **cmap,*cmap_i,*rtable,*rmap_i,**rmap, Mbs = c->Mbs;
841 #endif
842 
843   PetscFunctionBegin;
844   comm   = C->comm;
845   tag0   = C->tag;
846   size   = c->size;
847   rank   = c->rank;
848 
849   /* Get some new tags to keep the communication clean */
850   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
851   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
852   ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr);
853 
854   /* Check if the col indices are sorted */
855   for (i=0; i<ismax; i++) {
856     ierr = ISSorted(iscol[i],(PetscTruth*)&j);CHKERRQ(ierr);
857     if (!j) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,0,"IS is not sorted");
858   }
859 
860   len    = (2*ismax+1)*(sizeof(int*)+ sizeof(int));
861 #if !defined (PETSC_USE_CTABLE)
862   len    += (Mbs+1)*sizeof(int);
863 #endif
864   irow   = (int **)PetscMalloc(len);CHKPTRQ(irow);
865   icol   = irow + ismax;
866   nrow   = (int*)(icol + ismax);
867   ncol   = nrow + ismax;
868 #if !defined (PETSC_USE_CTABLE)
869   rtable = ncol + ismax;
870   /* Create hash table for the mapping :row -> proc*/
871   for (i=0,j=0; i<size; i++) {
872     jmax = c->rowners[i+1];
873     for (; j<jmax; j++) {
874       rtable[j] = i;
875     }
876   }
877 #endif
878 
879   for (i=0; i<ismax; i++) {
880     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
881     ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
882     ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
883     ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
884   }
885 
886   /* evaluate communication - mesg to who,length of mesg,and buffer space
887      required. Based on this, buffers are allocated, and data copied into them*/
888   w1     = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1); /* mesg size */
889   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
890   w3     = w2 + size;      /* no of IS that needs to be sent to proc i */
891   w4     = w3 + size;      /* temp work space used in determining w1, w2, w3 */
892   ierr   = PetscMemzero(w1,size*3*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/
893   for (i=0; i<ismax; i++) {
894     ierr   = PetscMemzero(w4,size*sizeof(int));CHKERRQ(ierr); /* initialise work vector*/
895     jmax   = nrow[i];
896     irow_i = irow[i];
897     for (j=0; j<jmax; j++) {
898       row  = irow_i[j];
899 #if defined (PETSC_USE_CTABLE)
900       ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr);
901 #else
902       proc = rtable[row];
903 #endif
904       w4[proc]++;
905     }
906     for (j=0; j<size; j++) {
907       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
908     }
909   }
910 
911   nrqs     = 0;              /* no of outgoing messages */
912   msz      = 0;              /* total mesg length for all proc */
913   w1[rank] = 0;              /* no mesg sent to intself */
914   w3[rank] = 0;
915   for (i=0; i<size; i++) {
916     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
917   }
918   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa); /*(proc -array)*/
919   for (i=0,j=0; i<size; i++) {
920     if (w1[i]) { pa[j] = i; j++; }
921   }
922 
923   /* Each message would have a header = 1 + 2*(no of IS) + data */
924   for (i=0; i<nrqs; i++) {
925     j     = pa[i];
926     w1[j] += w2[j] + 2* w3[j];
927     msz   += w1[j];
928   }
929   /* Do a global reduction to determine how many messages to expect*/
930   rw1  = (int *)PetscMalloc(2*size*sizeof(int));CHKPTRQ(rw1);
931   ierr = MPI_Allreduce(w1,rw1,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
932   bsz  = rw1[rank];
933   nrqr = rw1[size+rank];
934   ierr = PetscFree(rw1);CHKERRQ(ierr);
935 
936   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
937   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
938   rbuf1    = (int**)PetscMalloc(len);CHKPTRQ(rbuf1);
939   rbuf1[0] = (int*)(rbuf1 + nrqr);
940   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
941 
942   /* Post the receives */
943   r_waits1 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1);
944   for (i=0; i<nrqr; ++i) {
945     ierr = MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);CHKERRQ(ierr);
946   }
947 
948   /* Allocate Memory for outgoing messages */
949   len      = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
950   sbuf1    = (int **)PetscMalloc(len);CHKPTRQ(sbuf1);
951   ptr      = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
952   ierr     = PetscMemzero(sbuf1,2*size*sizeof(int*));CHKERRQ(ierr);
953   /* allocate memory for outgoing data + buf to receive the first reply */
954   tmp      = (int*)(ptr + size);
955   ctr      = tmp + 2*msz;
956 
957   {
958     int *iptr = tmp,ict = 0;
959     for (i=0; i<nrqs; i++) {
960       j         = pa[i];
961       iptr     += ict;
962       sbuf1[j]  = iptr;
963       ict       = w1[j];
964     }
965   }
966 
967   /* Form the outgoing messages */
968   /* Initialise the header space */
969   for (i=0; i<nrqs; i++) {
970     j           = pa[i];
971     sbuf1[j][0] = 0;
972     ierr        = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(int));CHKERRQ(ierr);
973     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
974   }
975 
976   /* Parse the isrow and copy data into outbuf */
977   for (i=0; i<ismax; i++) {
978     ierr   = PetscMemzero(ctr,size*sizeof(int));CHKERRQ(ierr);
979     irow_i = irow[i];
980     jmax   = nrow[i];
981     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
982       row  = irow_i[j];
983 #if defined (PETSC_USE_CTABLE)
984       ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr);
985 #else
986       proc = rtable[row];
987 #endif
988       if (proc != rank) { /* copy to the outgoing buf*/
989         ctr[proc]++;
990         *ptr[proc] = row;
991         ptr[proc]++;
992       }
993     }
994     /* Update the headers for the current IS */
995     for (j=0; j<size; j++) { /* Can Optimise this loop too */
996       if ((ctr_j = ctr[j])) {
997         sbuf1_j        = sbuf1[j];
998         k              = ++sbuf1_j[0];
999         sbuf1_j[2*k]   = ctr_j;
1000         sbuf1_j[2*k-1] = i;
1001       }
1002     }
1003   }
1004 
1005   /*  Now  post the sends */
1006   s_waits1 = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1);
1007   for (i=0; i<nrqs; ++i) {
1008     j = pa[i];
1009     ierr = MPI_Isend(sbuf1[j],w1[j],MPI_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr);
1010   }
1011 
1012   /* Post Recieves to capture the buffer size */
1013   r_waits2 = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2);
1014   rbuf2    = (int**)PetscMalloc((nrqs+1)*sizeof(int *));CHKPTRQ(rbuf2);
1015   rbuf2[0] = tmp + msz;
1016   for (i=1; i<nrqs; ++i) {
1017     j        = pa[i];
1018     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
1019   }
1020   for (i=0; i<nrqs; ++i) {
1021     j    = pa[i];
1022     ierr = MPI_Irecv(rbuf2[i],w1[j],MPI_INT,j,tag1,comm,r_waits2+i);CHKERRQ(ierr);
1023   }
1024 
1025   /* Send to other procs the buf size they should allocate */
1026 
1027   /* Receive messages*/
1028   s_waits2   = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2);
1029   r_status1  = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(r_status1);
1030   len        = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*);
1031   sbuf2      = (int**)PetscMalloc(len);CHKPTRQ(sbuf2);
1032   req_size   = (int*)(sbuf2 + nrqr);
1033   req_source = req_size + nrqr;
1034 
1035   {
1036     Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*)c->A->data,*sB = (Mat_SeqBAIJ*)c->B->data;
1037     int        *sAi = sA->i,*sBi = sB->i,id,*sbuf2_i;
1038 
1039     for (i=0; i<nrqr; ++i) {
1040       ierr = MPI_Waitany(nrqr,r_waits1,&index,r_status1+i);CHKERRQ(ierr);
1041       req_size[index] = 0;
1042       rbuf1_i         = rbuf1[index];
1043       start           = 2*rbuf1_i[0] + 1;
1044       ierr = MPI_Get_count(r_status1+i,MPI_INT,&end);CHKERRQ(ierr);
1045       sbuf2[index] = (int *)PetscMalloc(end*sizeof(int));CHKPTRQ(sbuf2[index]);
1046       sbuf2_i      = sbuf2[index];
1047       for (j=start; j<end; j++) {
1048         id               = rbuf1_i[j] - rstart;
1049         ncols            = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
1050         sbuf2_i[j]       = ncols;
1051         req_size[index] += ncols;
1052       }
1053       req_source[index] = r_status1[i].MPI_SOURCE;
1054       /* form the header */
1055       sbuf2_i[0]   = req_size[index];
1056       for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
1057       ierr = MPI_Isend(sbuf2_i,end,MPI_INT,req_source[index],tag1,comm,s_waits2+i);CHKERRQ(ierr);
1058     }
1059   }
1060   ierr = PetscFree(r_status1);CHKERRQ(ierr);
1061   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
1062 
1063   /*  recv buffer sizes */
1064   /* Receive messages*/
1065 
1066   rbuf3     = (int**)PetscMalloc((nrqs+1)*sizeof(int*));CHKPTRQ(rbuf3);
1067   rbuf4     = (MatScalar**)PetscMalloc((nrqs+1)*sizeof(MatScalar*));CHKPTRQ(rbuf4);
1068   r_waits3  = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits3);
1069   r_waits4  = (MPI_Request*)PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits4);
1070   r_status2 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status2);
1071 
1072   for (i=0; i<nrqs; ++i) {
1073     ierr = MPI_Waitany(nrqs,r_waits2,&index,r_status2+i);CHKERRQ(ierr);
1074     rbuf3[index] = (int *)PetscMalloc(rbuf2[index][0]*sizeof(int));CHKPTRQ(rbuf3[index]);
1075     rbuf4[index] = (MatScalar *)PetscMalloc(rbuf2[index][0]*bs2*sizeof(MatScalar));CHKPTRQ(rbuf4[index]);
1076     ierr = MPI_Irecv(rbuf3[index],rbuf2[index][0],MPI_INT,
1077                      r_status2[i].MPI_SOURCE,tag2,comm,r_waits3+index);CHKERRQ(ierr);
1078     ierr = MPI_Irecv(rbuf4[index],rbuf2[index][0]*bs2,MPIU_MATSCALAR,
1079                      r_status2[i].MPI_SOURCE,tag3,comm,r_waits4+index);CHKERRQ(ierr);
1080   }
1081   ierr = PetscFree(r_status2);CHKERRQ(ierr);
1082   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
1083 
1084   /* Wait on sends1 and sends2 */
1085   s_status1 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
1086   s_status2 = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status2);
1087 
1088   ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);
1089   ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);
1090   ierr = PetscFree(s_status1);CHKERRQ(ierr);
1091   ierr = PetscFree(s_status2);CHKERRQ(ierr);
1092   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
1093   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
1094 
1095   /* Now allocate buffers for a->j, and send them off */
1096   sbuf_aj = (int **)PetscMalloc((nrqr+1)*sizeof(int *));CHKPTRQ(sbuf_aj);
1097   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1098   sbuf_aj[0] = (int*)PetscMalloc((j+1)*sizeof(int));CHKPTRQ(sbuf_aj[0]);
1099   for (i=1; i<nrqr; i++)  sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
1100 
1101   s_waits3 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits3);
1102   {
1103      for (i=0; i<nrqr; i++) {
1104       rbuf1_i   = rbuf1[i];
1105       sbuf_aj_i = sbuf_aj[i];
1106       ct1       = 2*rbuf1_i[0] + 1;
1107       ct2       = 0;
1108       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1109         kmax = rbuf1[i][2*j];
1110         for (k=0; k<kmax; k++,ct1++) {
1111           row    = rbuf1_i[ct1] - rstart;
1112           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1113           ncols  = nzA + nzB;
1114           cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
1115 
1116           /* load the column indices for this row into cols*/
1117           cols  = sbuf_aj_i + ct2;
1118           for (l=0; l<nzB; l++) {
1119             if ((ctmp = bmap[cworkB[l]]) < cstart)  cols[l] = ctmp;
1120             else break;
1121           }
1122           imark = l;
1123           for (l=0; l<nzA; l++)   cols[imark+l] = cstart + cworkA[l];
1124           for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
1125           ct2 += ncols;
1126         }
1127       }
1128       ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);CHKERRQ(ierr);
1129     }
1130   }
1131   r_status3 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status3);
1132   s_status3 = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status3);
1133 
1134   /* Allocate buffers for a->a, and send them off */
1135   sbuf_aa = (MatScalar **)PetscMalloc((nrqr+1)*sizeof(MatScalar *));CHKPTRQ(sbuf_aa);
1136   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1137   sbuf_aa[0] = (MatScalar*)PetscMalloc((j+1)*bs2*sizeof(MatScalar));CHKPTRQ(sbuf_aa[0]);
1138   for (i=1; i<nrqr; i++)  sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
1139 
1140   s_waits4 = (MPI_Request*)PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits4);
1141   {
1142     for (i=0; i<nrqr; i++) {
1143       rbuf1_i   = rbuf1[i];
1144       sbuf_aa_i = sbuf_aa[i];
1145       ct1       = 2*rbuf1_i[0]+1;
1146       ct2       = 0;
1147       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1148         kmax = rbuf1_i[2*j];
1149         for (k=0; k<kmax; k++,ct1++) {
1150           row    = rbuf1_i[ct1] - rstart;
1151           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1152           ncols  = nzA + nzB;
1153           cworkA = a_j + a_i[row];     cworkB = b_j + b_i[row];
1154           vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;
1155 
1156           /* load the column values for this row into vals*/
1157           vals  = sbuf_aa_i+ct2*bs2;
1158           for (l=0; l<nzB; l++) {
1159             if ((bmap[cworkB[l]]) < cstart) {
1160               ierr = PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1161             }
1162             else break;
1163           }
1164           imark = l;
1165           for (l=0; l<nzA; l++) {
1166             ierr = PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1167           }
1168           for (l=imark; l<nzB; l++) {
1169             ierr = PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1170           }
1171           ct2 += ncols;
1172         }
1173       }
1174       ierr = MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_MATSCALAR,req_source[i],tag3,comm,s_waits4+i);CHKERRQ(ierr);
1175     }
1176   }
1177   r_status4 = (MPI_Status*)PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status4);
1178   s_status4 = (MPI_Status*)PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status4);
1179   ierr = PetscFree(rbuf1);CHKERRQ(ierr);
1180 
1181   /* Form the matrix */
1182   /* create col map */
1183   {
1184     int *icol_i;
1185 #if defined (PETSC_USE_CTABLE)
1186     /* Create row map*/
1187     colmaps = (PetscTable*)PetscMalloc((1+ismax)*sizeof(PetscTable));CHKPTRQ(colmaps);
1188     for (i=0; i<ismax+1; i++) {
1189       ierr = PetscTableCreate(((i<ismax) ? ncol[i] : ncol[i-1])+1,&colmaps[i]);CHKERRQ(ierr);
1190     }
1191 #else
1192     len     = (1+ismax)*sizeof(int*)+ ismax*c->Nbs*sizeof(int);
1193     cmap    = (int **)PetscMalloc(len);CHKPTRQ(cmap);
1194     cmap[0] = (int *)(cmap + ismax);
1195     ierr    = PetscMemzero(cmap[0],(1+ismax*c->Nbs)*sizeof(int));CHKERRQ(ierr);
1196     for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; }
1197 #endif
1198     for (i=0; i<ismax; i++) {
1199       jmax   = ncol[i];
1200       icol_i = icol[i];
1201 #if defined (PETSC_USE_CTABLE)
1202       lcol1_gcol1 = colmaps[i];
1203       for (j=0; j<jmax; j++) {
1204 	ierr = PetscTableAdd(lcol1_gcol1,icol_i[j]+1,j+1);CHKERRQ(ierr);
1205       }
1206 #else
1207       cmap_i = cmap[i];
1208       for (j=0; j<jmax; j++) {
1209         cmap_i[icol_i[j]] = j+1;
1210       }
1211 #endif
1212     }
1213   }
1214 
1215   /* Create lens which is required for MatCreate... */
1216   for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
1217   len     = (1+ismax)*sizeof(int*)+ j*sizeof(int);
1218   lens    = (int **)PetscMalloc(len);CHKPTRQ(lens);
1219   lens[0] = (int *)(lens + ismax);
1220   ierr    = PetscMemzero(lens[0],j*sizeof(int));CHKERRQ(ierr);
1221   for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
1222 
1223   /* Update lens from local data */
1224   for (i=0; i<ismax; i++) {
1225     jmax   = nrow[i];
1226 #if defined (PETSC_USE_CTABLE)
1227     lcol1_gcol1 = colmaps[i];
1228 #else
1229     cmap_i = cmap[i];
1230 #endif
1231     irow_i = irow[i];
1232     lens_i = lens[i];
1233     for (j=0; j<jmax; j++) {
1234       row  = irow_i[j];
1235 #if defined (PETSC_USE_CTABLE)
1236       ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr);
1237 #else
1238       proc = rtable[row];
1239 #endif
1240       if (proc == rank) {
1241         /* Get indices from matA and then from matB */
1242         row    = row - rstart;
1243         nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1244         cworkA =  a_j + a_i[row]; cworkB = b_j + b_i[row];
1245 #if defined (PETSC_USE_CTABLE)
1246        for (k=0; k<nzA; k++) {
1247 	 ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[k]+1,&tt);CHKERRQ(ierr);
1248           if (tt) { lens_i[j]++; }
1249         }
1250         for (k=0; k<nzB; k++) {
1251 	  ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[k]]+1,&tt);CHKERRQ(ierr);
1252           if (tt) { lens_i[j]++; }
1253         }
1254 #else
1255         for (k=0; k<nzA; k++) {
1256           if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++; }
1257         }
1258         for (k=0; k<nzB; k++) {
1259           if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++; }
1260         }
1261 #endif
1262       }
1263     }
1264   }
1265 #if defined (PETSC_USE_CTABLE)
1266   /* Create row map*/
1267   rowmaps = (PetscTable*)PetscMalloc((1+ismax)*sizeof(PetscTable));CHKPTRQ(rowmaps);
1268   for (i=0; i<ismax+1; i++){
1269     ierr = PetscTableCreate((i<ismax) ? nrow[i] : nrow[i-1],&rowmaps[i]);CHKERRQ(ierr);
1270   }
1271 #else
1272   /* Create row map*/
1273   len     = (1+ismax)*sizeof(int*)+ ismax*Mbs*sizeof(int);
1274   rmap    = (int **)PetscMalloc(len);CHKPTRQ(rmap);
1275   rmap[0] = (int *)(rmap + ismax);
1276   ierr    = PetscMemzero(rmap[0],ismax*Mbs*sizeof(int));CHKERRQ(ierr);
1277   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + Mbs;}
1278 #endif
1279   for (i=0; i<ismax; i++) {
1280     irow_i = irow[i];
1281     jmax   = nrow[i];
1282 #if defined (PETSC_USE_CTABLE)
1283     lrow1_grow1 = rowmaps[i];
1284     for (j=0; j<jmax; j++) {
1285       ierr = PetscTableAdd(lrow1_grow1,irow_i[j]+1,j+1);CHKERRQ(ierr);
1286     }
1287 #else
1288     rmap_i = rmap[i];
1289     for (j=0; j<jmax; j++) {
1290       rmap_i[irow_i[j]] = j;
1291     }
1292 #endif
1293   }
1294 
1295   /* Update lens from offproc data */
1296   {
1297     int *rbuf2_i,*rbuf3_i,*sbuf1_i;
1298 
1299     for (tmp2=0; tmp2<nrqs; tmp2++) {
1300       ierr    = MPI_Waitany(nrqs,r_waits3,&i,r_status3+tmp2);CHKERRQ(ierr);
1301       index   = pa[i];
1302       sbuf1_i = sbuf1[index];
1303       jmax    = sbuf1_i[0];
1304       ct1     = 2*jmax+1;
1305       ct2     = 0;
1306       rbuf2_i = rbuf2[i];
1307       rbuf3_i = rbuf3[i];
1308       for (j=1; j<=jmax; j++) {
1309         is_no   = sbuf1_i[2*j-1];
1310         max1    = sbuf1_i[2*j];
1311         lens_i  = lens[is_no];
1312 #if defined (PETSC_USE_CTABLE)
1313 	lcol1_gcol1 = colmaps[is_no];
1314 	lrow1_grow1 = rowmaps[is_no];
1315 #else
1316         cmap_i  = cmap[is_no];
1317 	rmap_i  = rmap[is_no];
1318 #endif
1319         for (k=0; k<max1; k++,ct1++) {
1320 #if defined (PETSC_USE_CTABLE)
1321 	  ierr = PetscTableFind(lrow1_grow1,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr);
1322           row--;
1323           if(row < 0) { SETERRQ(1,1,"row not found in table"); }
1324 #else
1325           row  = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1326 #endif
1327           max2 = rbuf2_i[ct1];
1328           for (l=0; l<max2; l++,ct2++) {
1329 #if defined (PETSC_USE_CTABLE)
1330 	    ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tt);CHKERRQ(ierr);
1331 	    if (tt) {
1332               lens_i[row]++;
1333             }
1334 #else
1335             if (cmap_i[rbuf3_i[ct2]]) {
1336               lens_i[row]++;
1337             }
1338 #endif
1339           }
1340         }
1341       }
1342     }
1343   }
1344   ierr = PetscFree(r_status3);CHKERRQ(ierr);
1345   ierr = PetscFree(r_waits3);CHKERRQ(ierr);
1346   ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);
1347   ierr = PetscFree(s_status3);CHKERRQ(ierr);
1348   ierr = PetscFree(s_waits3);CHKERRQ(ierr);
1349 
1350   /* Create the submatrices */
1351   if (scall == MAT_REUSE_MATRIX) {
1352     /*
1353         Assumes new rows are same length as the old rows, hence bug!
1354     */
1355     for (i=0; i<ismax; i++) {
1356       mat = (Mat_SeqBAIJ *)(submats[i]->data);
1357       if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || mat->bs != bs)) {
1358         SETERRQ(PETSC_ERR_ARG_SIZ,0,"Cannot reuse matrix. wrong size");
1359       }
1360       ierr = PetscMemcmp(mat->ilen,lens[i],mat->mbs *sizeof(int),&flag);CHKERRQ(ierr);
1361       if (flag == PETSC_FALSE) {
1362         SETERRQ(PETSC_ERR_ARG_INCOMP,0,"Cannot reuse matrix. wrong no of nonzeros");
1363       }
1364       /* Initial matrix as if empty */
1365       ierr = PetscMemzero(mat->ilen,mat->mbs*sizeof(int));CHKERRQ(ierr);
1366       submats[i]->factor = C->factor;
1367     }
1368   } else {
1369     for (i=0; i<ismax; i++) {
1370       ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,a->bs,nrow[i]*bs,ncol[i]*bs,0,lens[i],submats+i);CHKERRQ(ierr);
1371     }
1372   }
1373 
1374   /* Assemble the matrices */
1375   /* First assemble the local rows */
1376   {
1377     int       ilen_row,*imat_ilen,*imat_j,*imat_i;
1378     MatScalar *imat_a;
1379 
1380     for (i=0; i<ismax; i++) {
1381       mat       = (Mat_SeqBAIJ*)submats[i]->data;
1382       imat_ilen = mat->ilen;
1383       imat_j    = mat->j;
1384       imat_i    = mat->i;
1385       imat_a    = mat->a;
1386 
1387 #if defined (PETSC_USE_CTABLE)
1388       lcol1_gcol1 = colmaps[i];
1389       lrow1_grow1 = rowmaps[i];
1390 #else
1391       cmap_i    = cmap[i];
1392       rmap_i    = rmap[i];
1393 #endif
1394       irow_i    = irow[i];
1395       jmax      = nrow[i];
1396       for (j=0; j<jmax; j++) {
1397         row      = irow_i[j];
1398 #if defined (PETSC_USE_CTABLE)
1399 	ierr = PetscGetProc(row,size,c->rowners,&proc);CHKERRQ(ierr);
1400 #else
1401 	proc = rtable[row];
1402 #endif
1403         if (proc == rank) {
1404           row      = row - rstart;
1405           nzA      = a_i[row+1] - a_i[row];
1406           nzB      = b_i[row+1] - b_i[row];
1407           cworkA   = a_j + a_i[row];
1408           cworkB   = b_j + b_i[row];
1409           vworkA   = a_a + a_i[row]*bs2;
1410           vworkB   = b_a + b_i[row]*bs2;
1411 #if defined (PETSC_USE_CTABLE)
1412 	  ierr = PetscTableFind(lrow1_grow1,row+rstart+1,&row);CHKERRQ(ierr);
1413           row--;
1414           if (row < 0) { SETERRQ(1,1,"row not found in table"); }
1415 #else
1416           row      = rmap_i[row + rstart];
1417 #endif
1418           mat_i    = imat_i[row];
1419           mat_a    = imat_a + mat_i*bs2;
1420           mat_j    = imat_j + mat_i;
1421           ilen_row = imat_ilen[row];
1422 
1423           /* load the column indices for this row into cols*/
1424           for (l=0; l<nzB; l++) {
1425 	    if ((ctmp = bmap[cworkB[l]]) < cstart) {
1426 #if defined (PETSC_USE_CTABLE)
1427 	      ierr = PetscTableFind(lcol1_gcol1,ctmp+1,&tcol);CHKERRQ(ierr);
1428 	      if (tcol) {
1429 #else
1430               if ((tcol = cmap_i[ctmp])) {
1431 #endif
1432                 *mat_j++ = tcol - 1;
1433                 ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1434                 mat_a   += bs2;
1435                 ilen_row++;
1436               }
1437             } else break;
1438           }
1439           imark = l;
1440           for (l=0; l<nzA; l++) {
1441 #if defined (PETSC_USE_CTABLE)
1442 	    ierr = PetscTableFind(lcol1_gcol1,cstart+cworkA[l]+1,&tcol);CHKERRQ(ierr);
1443 	    if (tcol) {
1444 #else
1445 	    if ((tcol = cmap_i[cstart + cworkA[l]])) {
1446 #endif
1447               *mat_j++ = tcol - 1;
1448               ierr     = PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1449               mat_a   += bs2;
1450               ilen_row++;
1451             }
1452           }
1453           for (l=imark; l<nzB; l++) {
1454 #if defined (PETSC_USE_CTABLE)
1455 	    ierr = PetscTableFind(lcol1_gcol1,bmap[cworkB[l]]+1,&tcol);CHKERRQ(ierr);
1456 	    if (tcol) {
1457 #else
1458             if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1459 #endif
1460               *mat_j++ = tcol - 1;
1461               ierr     = PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1462               mat_a   += bs2;
1463               ilen_row++;
1464             }
1465           }
1466           imat_ilen[row] = ilen_row;
1467         }
1468       }
1469 
1470     }
1471   }
1472 
1473   /*   Now assemble the off proc rows*/
1474   {
1475     int       *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1476     int       *imat_j,*imat_i;
1477     MatScalar *imat_a,*rbuf4_i;
1478 
1479     for (tmp2=0; tmp2<nrqs; tmp2++) {
1480       ierr    = MPI_Waitany(nrqs,r_waits4,&i,r_status4+tmp2);CHKERRQ(ierr);
1481       index   = pa[i];
1482       sbuf1_i = sbuf1[index];
1483       jmax    = sbuf1_i[0];
1484       ct1     = 2*jmax + 1;
1485       ct2     = 0;
1486       rbuf2_i = rbuf2[i];
1487       rbuf3_i = rbuf3[i];
1488       rbuf4_i = rbuf4[i];
1489       for (j=1; j<=jmax; j++) {
1490         is_no     = sbuf1_i[2*j-1];
1491 #if defined (PETSC_USE_CTABLE)
1492 	lrow1_grow1 = rowmaps[is_no];
1493 	lcol1_gcol1 = colmaps[is_no];
1494 #else
1495         rmap_i    = rmap[is_no];
1496         cmap_i    = cmap[is_no];
1497 #endif
1498         mat       = (Mat_SeqBAIJ*)submats[is_no]->data;
1499         imat_ilen = mat->ilen;
1500         imat_j    = mat->j;
1501         imat_i    = mat->i;
1502         imat_a    = mat->a;
1503         max1      = sbuf1_i[2*j];
1504         for (k=0; k<max1; k++,ct1++) {
1505           row   = sbuf1_i[ct1];
1506 #if defined (PETSC_USE_CTABLE)
1507 	  ierr = PetscTableFind(lrow1_grow1,row+1,&row);CHKERRQ(ierr);
1508           row--;
1509           if(row < 0) { SETERRQ(1,1,"row not found in table"); }
1510 #else
1511           row   = rmap_i[row];
1512 #endif
1513           ilen  = imat_ilen[row];
1514           mat_i = imat_i[row];
1515           mat_a = imat_a + mat_i*bs2;
1516           mat_j = imat_j + mat_i;
1517           max2 = rbuf2_i[ct1];
1518           for (l=0; l<max2; l++,ct2++) {
1519 #if defined (PETSC_USE_CTABLE)
1520 	    ierr = PetscTableFind(lcol1_gcol1,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
1521 	    if (tcol) {
1522 #else
1523 	    if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1524 #endif
1525               *mat_j++    = tcol - 1;
1526               /* *mat_a++= rbuf4_i[ct2]; */
1527               ierr        = PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(MatScalar));CHKERRQ(ierr);
1528               mat_a      += bs2;
1529               ilen++;
1530             }
1531           }
1532           imat_ilen[row] = ilen;
1533         }
1534       }
1535     }
1536   }
1537   ierr = PetscFree(r_status4);CHKERRQ(ierr);
1538   ierr = PetscFree(r_waits4);CHKERRQ(ierr);
1539   ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);
1540   ierr = PetscFree(s_waits4);CHKERRQ(ierr);
1541   ierr = PetscFree(s_status4);CHKERRQ(ierr);
1542 
1543   /* Restore the indices */
1544   for (i=0; i<ismax; i++) {
1545     ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
1546     ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
1547   }
1548 
1549   /* Destroy allocated memory */
1550   ierr = PetscFree(irow);CHKERRQ(ierr);
1551   ierr = PetscFree(w1);CHKERRQ(ierr);
1552   ierr = PetscFree(pa);CHKERRQ(ierr);
1553 
1554   ierr = PetscFree(sbuf1);CHKERRQ(ierr);
1555   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
1556   for (i=0; i<nrqr; ++i) {
1557     ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
1558   }
1559   for (i=0; i<nrqs; ++i) {
1560     ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr);
1561     ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);
1562   }
1563 
1564   ierr = PetscFree(sbuf2);CHKERRQ(ierr);
1565   ierr = PetscFree(rbuf3);CHKERRQ(ierr);
1566   ierr = PetscFree(rbuf4);CHKERRQ(ierr);
1567   ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr);
1568   ierr = PetscFree(sbuf_aj);CHKERRQ(ierr);
1569   ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr);
1570   ierr = PetscFree(sbuf_aa);CHKERRQ(ierr);
1571 
1572 #if defined (PETSC_USE_CTABLE)
1573   for (i=0; i<ismax+1; i++){
1574     ierr = PetscTableDelete(rowmaps[i]);CHKERRQ(ierr);
1575     ierr = PetscTableDelete(colmaps[i]);CHKERRQ(ierr);
1576   }
1577   ierr = PetscFree(colmaps);CHKERRQ(ierr);
1578   ierr = PetscFree(rowmaps);CHKERRQ(ierr);
1579   /* Mark Adams */
1580 #else
1581   ierr = PetscFree(rmap);CHKERRQ(ierr);
1582   ierr = PetscFree(cmap);CHKERRQ(ierr);
1583 #endif
1584   ierr = PetscFree(lens);CHKERRQ(ierr);
1585 
1586   for (i=0; i<ismax; i++) {
1587     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1588     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1589   }
1590 
1591   PetscFunctionReturn(0);
1592 }
1593 
1594