xref: /petsc/src/mat/impls/baij/mpi/baijov.c (revision c2ea1186303e6ad186d8293edcc4dffdc3bb3ffe)
1 #ifdef PETSC_RCS_HEADER
2 static char vcid[] = "$Id: baijov.c,v 1.24 1997/10/01 22:21:04 balay Exp balay $";
3 #endif
4 /*
5    Routines to compute overlapping regions of a parallel MPI matrix
6   and to find submatrices that were shared across processors.
7 */
8 #include "src/mat/impls/baij/mpi/mpibaij.h"
9 #include "src/inline/bitarray.h"
10 
11 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat, int, IS *);
12 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat , int , char **,int*, int**);
13 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat , int, int **, int**, int* );
14 extern int MatGetRow_MPIBAIJ(Mat,int,int*,int**,Scalar**);
15 extern int MatRestoreRow_MPIBAIJ(Mat,int,int*,int**,Scalar**);
16 
17 
18 #undef __FUNC__
19 #define __FUNC__ "MatCompressIndicesGeneral_MPIBAIJ"
20 static int MatCompressIndicesGeneral_MPIBAIJ(Mat C, int imax, IS *is_in, IS *is_out)
21 {
22   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ *) C->data;
23   int          ierr,isz,bs = baij->bs,Nbs,n,i,j,*idx,*nidx,ival;
24   BT           table;
25 
26   Nbs   = baij->Nbs;
27   nidx  = (int *) PetscMalloc((Nbs+1)*sizeof(int)); CHKPTRQ(nidx);
28   ierr  = BTCreate(Nbs,table); CHKERRQ(ierr);
29 
30   for (i=0; i<imax; i++) {
31     isz  = 0;
32     BTMemzero(Nbs,table);
33     ierr = ISGetIndices(is_in[i],&idx);  CHKERRQ(ierr);
34     ierr = ISGetSize(is_in[i],&n);  CHKERRQ(ierr);
35     for (j=0; j<n ; j++) {
36       ival = idx[j]/bs; /* convert the indices into block indices */
37       if (ival>Nbs) SETERRQ(1,0,"index greater than mat-dim");
38       if(!BTLookupSet(table, ival)) { nidx[isz++] = ival;}
39     }
40     ierr = ISRestoreIndices(is_in[i],&idx);  CHKERRQ(ierr);
41     ierr = ISCreateGeneral(PETSC_COMM_SELF, isz, nidx, (is_out+i)); CHKERRQ(ierr);
42   }
43   BTDestroy(table);
44   PetscFree(nidx);
45   return 0;
46 }
47 
48 #undef __FUNC__
49 #define __FUNC__ "MatCompressIndicesSorted_MPIBAIJ"
50 static int MatCompressIndicesSorted_MPIBAIJ(Mat C, int imax, IS *is_in, IS *is_out)
51 {
52   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ *) C->data;
53   int          ierr,bs=baij->bs,i,j,k,val,n,*idx,*nidx,Nbs=baij->Nbs,*idx_local;
54   PetscTruth   flg;
55 
56   for (i=0; i<imax; i++) {
57     ierr = ISSorted(is_in[i],&flg); CHKERRQ(ierr);
58     if (!flg) SETERRQ(1,0,"Indices are not sorted");
59   }
60   nidx  = (int *) PetscMalloc((Nbs+1)*sizeof(int)); CHKPTRQ(nidx);
61   /* Now chech if the indices are in block order */
62   for (i=0; i<imax; i++) {
63     ierr = ISGetIndices(is_in[i],&idx);  CHKERRQ(ierr);
64     ierr = ISGetSize(is_in[i],&n);  CHKERRQ(ierr);
65     if ( n%bs !=0 ) SETERRA(1,0,"Indices are not block ordered");
66 
67     n = n/bs; /* The reduced index size */
68     idx_local = idx;
69     for (j=0; j<n ; j++) {
70       val = idx_local[0];
71       if(val%bs != 0) SETERRA(1,0,"Indices are not block ordered");
72       for (k=0; k<bs; k++) {
73         if ( val+k != idx_local[k]) SETERRA(1,0,"Indices are not block ordered");
74       }
75       nidx[j] = val/bs;
76       idx_local +=bs;
77     }
78     ierr = ISRestoreIndices(is_in[i],&idx);  CHKERRQ(ierr);
79     ierr = ISCreateGeneral(PETSC_COMM_SELF,n,nidx,(is_out+i)); CHKERRQ(ierr);
80   }
81   PetscFree(nidx);
82   return 0;
83 }
84 
85 #undef __FUNC__
86 #define __FUNC__ "MatExpandIndices_MPIBAIJ"
87 static int MatExpandIndices_MPIBAIJ(Mat C, int imax, IS *is_in, IS *is_out)
88 {
89   Mat_MPIBAIJ  *baij = (Mat_MPIBAIJ *) C->data;
90   int          ierr,bs = baij->bs,Nbs,n,i,j,k,*idx,*nidx;
91 
92   Nbs   = baij->Nbs;
93 
94   nidx  = (int *) PetscMalloc((Nbs*bs+1)*sizeof(int)); CHKPTRQ(nidx);
95 
96   for ( i=0; i<imax; i++ ) {
97     ierr = ISGetIndices(is_in[i],&idx);  CHKERRQ(ierr);
98     ierr = ISGetSize(is_in[i],&n);  CHKERRQ(ierr);
99     for (j=0; j<n ; ++j){
100       for (k=0; k<bs; k++)
101         nidx[j*bs+k] = idx[j]*bs+k;
102     }
103     ierr = ISRestoreIndices(is_in[i],&idx);  CHKERRQ(ierr);
104     ierr = ISCreateGeneral(PETSC_COMM_SELF, n*bs, nidx, (is_out+i)); CHKERRQ(ierr);
105   }
106   PetscFree(nidx);
107   return 0;
108 }
109 
110 
111 #undef __FUNC__
112 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ"
113 int MatIncreaseOverlap_MPIBAIJ(Mat C, int imax, IS *is, int ov)
114 {
115   int i, ierr;
116   IS *is_new;
117 
118   is_new = (IS *)PetscMalloc(imax*sizeof(IS)); CHKPTRQ(is_new);
119   /* Convert the indices into block format */
120   ierr = MatCompressIndicesGeneral_MPIBAIJ(C, imax, is,is_new); CHKERRQ(ierr);
121   if (ov < 0){ SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,0,"Negative overlap specified\n");}
122   for (i=0; i<ov; ++i) {
123     ierr = MatIncreaseOverlap_MPIBAIJ_Once(C, imax, is_new); CHKERRQ(ierr);
124   }
125   for (i=0; i<imax; i++) ISDestroy(is[i]);
126   ierr = MatExpandIndices_MPIBAIJ(C, imax, is_new,is); CHKERRQ(ierr);
127   for (i=0; i<imax; i++) ISDestroy(is_new[i]);
128   PetscFree(is_new);
129   return 0;
130 }
131 
132 /*
133   Sample message format:
134   If a processor A wants processor B to process some elements corresponding
135   to index sets 1s[1], is[5]
136   mesg [0] = 2   ( no of index sets in the mesg)
137   -----------
138   mesg [1] = 1 => is[1]
139   mesg [2] = sizeof(is[1]);
140   -----------
141   mesg [5] = 5  => is[5]
142   mesg [6] = sizeof(is[5]);
143   -----------
144   mesg [7]
145   mesg [n]  datas[1]
146   -----------
147   mesg[n+1]
148   mesg[m]  data(is[5])
149   -----------
150 
151   Notes:
152   nrqs - no of requests sent (or to be sent out)
153   nrqr - no of requests recieved (which have to be or which have been processed
154 */
155 #undef __FUNC__
156 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Once"
157 static int MatIncreaseOverlap_MPIBAIJ_Once(Mat C, int imax, IS *is)
158 {
159   Mat_MPIBAIJ  *c = (Mat_MPIBAIJ *) C->data;
160   int         **idx, *n, *w1, *w2, *w3, *w4, *rtable,**data,len,*idx_i;
161   int         size,rank,Mbs,i,j,k,ierr,**rbuf,row,proc,nrqs,msz,**outdat,**ptr;
162   int         *ctr,*pa,tag,*tmp,bsz,nrqr,*isz,*isz1,**xdata,bsz1,**rbuf2;
163   BT          *table;
164   MPI_Comm    comm;
165   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2;
166   MPI_Status  *s_status,*recv_status;
167 
168   comm   = C->comm;
169   tag    = C->tag;
170   size   = c->size;
171   rank   = c->rank;
172   Mbs      = c->Mbs;
173 
174   len    = (imax+1)*sizeof(int *) + (imax + Mbs)*sizeof(int);
175   idx    = (int **) PetscMalloc(len); CHKPTRQ(idx);
176   n      = (int *) (idx + imax);
177   rtable = n + imax;
178 
179   for (i=0; i<imax; i++) {
180     ierr = ISGetIndices(is[i],&idx[i]);  CHKERRQ(ierr);
181     ierr = ISGetSize(is[i],&n[i]);  CHKERRQ(ierr);
182   }
183 
184   /* Create hash table for the mapping :row -> proc*/
185   for (i=0,j=0; i<size; i++) {
186     len = c->rowners[i+1];
187     for (; j<len; j++) {
188       rtable[j] = i;
189     }
190   }
191 
192   /* evaluate communication - mesg to who, length of mesg, and buffer space
193      required. Based on this, buffers are allocated, and data copied into them*/
194   w1   = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1);/*  mesg size */
195   w2   = w1 + size;       /* if w2[i] marked, then a message to proc i*/
196   w3   = w2 + size;       /* no of IS that needs to be sent to proc i */
197   w4   = w3 + size;       /* temp work space used in determining w1, w2, w3 */
198   PetscMemzero(w1,size*3*sizeof(int)); /* initialise work vector*/
199   for (i=0; i<imax; i++) {
200     PetscMemzero(w4,size*sizeof(int)); /* initialise work vector*/
201     idx_i = idx[i];
202     len   = n[i];
203     for (j=0; j<len; j++) {
204       row  = idx_i[j];
205       proc = rtable[row];
206       w4[proc]++;
207     }
208     for (j=0; j<size; j++){
209       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
210     }
211   }
212 
213   nrqs     = 0;              /* no of outgoing messages */
214   msz      = 0;              /* total mesg length (for all proc */
215   w1[rank] = 0;              /* no mesg sent to intself */
216   w3[rank] = 0;
217   for ( i=0; i<size; i++) {
218     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
219   }
220   /* pa - is list of processors to communicate with */
221   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa);
222   for (i=0,j=0; i<size; i++) {
223     if (w1[i]) {pa[j] = i; j++;}
224   }
225 
226   /* Each message would have a header = 1 + 2*(no of IS) + data */
227   for (i=0; i<nrqs; i++) {
228     j      = pa[i];
229     w1[j] += w2[j] + 2*w3[j];
230     msz   += w1[j];
231   }
232 
233 
234   /* Do a global reduction to determine how many messages to expect*/
235   {
236     int *rw1, *rw2;
237     rw1   = (int *) PetscMalloc(2*size*sizeof(int)); CHKPTRQ(rw1);
238     rw2   = rw1+size;
239     MPI_Allreduce(w1, rw1, size, MPI_INT, MPI_MAX, comm);
240     bsz   = rw1[rank];
241     MPI_Allreduce(w2, rw2, size, MPI_INT, MPI_SUM, comm);
242     nrqr  = rw2[rank];
243     PetscFree(rw1);
244   }
245 
246   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
247   len     = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
248   rbuf    = (int**) PetscMalloc(len);  CHKPTRQ(rbuf);
249   rbuf[0] = (int *) (rbuf + nrqr);
250   for (i=1; i<nrqr; ++i) rbuf[i] = rbuf[i-1] + bsz;
251 
252   /* Post the receives */
253   r_waits1 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));
254   CHKPTRQ(r_waits1);
255   for (i=0; i<nrqr; ++i) {
256     MPI_Irecv(rbuf[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag,comm,r_waits1+i);
257   }
258 
259   /* Allocate Memory for outgoing messages */
260   len    = 2*size*sizeof(int*) + (size+msz)*sizeof(int);
261   outdat = (int **)PetscMalloc(len); CHKPTRQ(outdat);
262   ptr    = outdat + size;     /* Pointers to the data in outgoing buffers */
263   PetscMemzero(outdat,2*size*sizeof(int*));
264   tmp    = (int *) (outdat + 2*size);
265   ctr    = tmp + msz;
266 
267   {
268     int *iptr = tmp,ict  = 0;
269     for (i=0; i<nrqs; i++) {
270       j         = pa[i];
271       iptr     +=  ict;
272       outdat[j] = iptr;
273       ict       = w1[j];
274     }
275   }
276 
277   /* Form the outgoing messages */
278   /*plug in the headers*/
279   for (i=0; i<nrqs; i++) {
280     j            = pa[i];
281     outdat[j][0] = 0;
282     PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(int));
283     ptr[j]       = outdat[j] + 2*w3[j] + 1;
284   }
285 
286   /* Memory for doing local proc's work*/
287   {
288     int  *d_p;
289     char *t_p;
290 
291     len      = (imax)*(sizeof(BT) + sizeof(int *) + sizeof(int)) +
292                (Mbs)*imax*sizeof(int)  + (Mbs/BITSPERBYTE+1)*imax*sizeof(char) + 1;
293     table    = (BT *)PetscMalloc(len);  CHKPTRQ(table);
294     PetscMemzero(table,len);
295     data     = (int **)(table + imax);
296     isz      = (int  *)(data  + imax);
297     d_p      = (int  *)(isz   + imax);
298     t_p      = (char *)(d_p   + Mbs*imax);
299     for (i=0; i<imax; i++) {
300       table[i] = t_p + (Mbs/BITSPERBYTE+1)*i;
301       data[i]  = d_p + (Mbs)*i;
302     }
303   }
304 
305   /* Parse the IS and update local tables and the outgoing buf with the data*/
306   {
307     int  n_i,*data_i,isz_i,*outdat_j,ctr_j;
308     BT   table_i;
309 
310     for (i=0; i<imax; i++) {
311       PetscMemzero(ctr,size*sizeof(int));
312       n_i     = n[i];
313       table_i = table[i];
314       idx_i   = idx[i];
315       data_i  = data[i];
316       isz_i   = isz[i];
317       for (j=0;  j<n_i; j++) {  /* parse the indices of each IS */
318         row  = idx_i[j];
319         proc = rtable[row];
320         if (proc != rank) { /* copy to the outgoing buffer */
321           ctr[proc]++;
322           *ptr[proc] = row;
323           ptr[proc]++;
324         }
325         else { /* Update the local table */
326           if (!BTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
327         }
328       }
329       /* Update the headers for the current IS */
330       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
331         if ((ctr_j = ctr[j])) {
332           outdat_j        = outdat[j];
333           k               = ++outdat_j[0];
334           outdat_j[2*k]   = ctr_j;
335           outdat_j[2*k-1] = i;
336         }
337       }
338       isz[i] = isz_i;
339     }
340   }
341 
342 
343 
344   /*  Now  post the sends */
345   s_waits1 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));
346   CHKPTRQ(s_waits1);
347   for (i=0; i<nrqs; ++i) {
348     j = pa[i];
349     MPI_Isend(outdat[j], w1[j], MPI_INT, j, tag, comm, s_waits1+i);
350   }
351 
352   /* No longer need the original indices*/
353   for (i=0; i<imax; ++i) {
354     ierr = ISRestoreIndices(is[i], idx+i); CHKERRQ(ierr);
355   }
356   PetscFree(idx);
357 
358   for (i=0; i<imax; ++i) {
359     ierr = ISDestroy(is[i]); CHKERRQ(ierr);
360   }
361 
362   /* Do Local work*/
363   ierr = MatIncreaseOverlap_MPIBAIJ_Local(C,imax,table,isz,data);CHKERRQ(ierr);
364 
365   /* Receive messages*/
366   {
367     int        index;
368 
369     recv_status = (MPI_Status *) PetscMalloc( (nrqr+1)*sizeof(MPI_Status) );
370     CHKPTRQ(recv_status);
371     for (i=0; i<nrqr; ++i) {
372       MPI_Waitany(nrqr, r_waits1, &index, recv_status+i);
373     }
374 
375     s_status = (MPI_Status *) PetscMalloc( (nrqs+1)*sizeof(MPI_Status) );
376     CHKPTRQ(s_status);
377     MPI_Waitall(nrqs,s_waits1,s_status);
378   }
379 
380   /* Phase 1 sends are complete - deallocate buffers */
381   PetscFree(outdat);
382   PetscFree(w1);
383 
384   xdata = (int **)PetscMalloc((nrqr+1)*sizeof(int *)); CHKPTRQ(xdata);
385   isz1  = (int *)PetscMalloc((nrqr+1)*sizeof(int)); CHKPTRQ(isz1);
386   ierr  = MatIncreaseOverlap_MPIBAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr);
387   PetscFree(rbuf);
388 
389   /* Send the data back*/
390   /* Do a global reduction to know the buffer space req for incoming messages*/
391   {
392     int *rw1, *rw2;
393 
394     rw1 = (int *)PetscMalloc(2*size*sizeof(int)); CHKPTRQ(rw1);
395     PetscMemzero(rw1,2*size*sizeof(int));
396     rw2 = rw1+size;
397     for (i=0; i<nrqr; ++i) {
398       proc      = recv_status[i].MPI_SOURCE;
399       rw1[proc] = isz1[i];
400     }
401 
402     MPI_Allreduce(rw1, rw2, size, MPI_INT, MPI_MAX, comm);
403     bsz1   = rw2[rank];
404     PetscFree(rw1);
405   }
406 
407   /* Allocate buffers*/
408 
409   /* Allocate memory for recv buffers. Prob none if nrqr = 0 ???? */
410   len      = (nrqs+1)*sizeof(int*) + nrqs*bsz1*sizeof(int);
411   rbuf2    = (int**) PetscMalloc(len);  CHKPTRQ(rbuf2);
412   rbuf2[0] = (int *) (rbuf2 + nrqs);
413   for (i=1; i<nrqs; ++i) rbuf2[i] = rbuf2[i-1] + bsz1;
414 
415   /* Post the receives */
416   r_waits2 = (MPI_Request *)PetscMalloc((nrqs+1)*sizeof(MPI_Request));
417   CHKPTRQ(r_waits2);
418   for (i=0; i<nrqs; ++i) {
419     MPI_Irecv(rbuf2[i], bsz1, MPI_INT, MPI_ANY_SOURCE, tag, comm, r_waits2+i);
420   }
421 
422   /*  Now  post the sends */
423   s_waits2 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));
424   CHKPTRQ(s_waits2);
425   for (i=0; i<nrqr; ++i) {
426     j = recv_status[i].MPI_SOURCE;
427     MPI_Isend( xdata[i], isz1[i], MPI_INT, j, tag, comm, s_waits2+i);
428   }
429 
430   /* receive work done on other processors*/
431   {
432     int         index, is_no, ct1, max,*rbuf2_i,isz_i,*data_i,jmax;
433     BT          table_i;
434     MPI_Status  *status2;
435 
436     status2 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(status2);
437 
438     for (i=0; i<nrqs; ++i) {
439       MPI_Waitany(nrqs, r_waits2, &index, status2+i);
440       /* Process the message*/
441       rbuf2_i = rbuf2[index];
442       ct1     = 2*rbuf2_i[0]+1;
443       jmax    = rbuf2[index][0];
444       for (j=1; j<=jmax; j++) {
445         max     = rbuf2_i[2*j];
446         is_no   = rbuf2_i[2*j-1];
447         isz_i   = isz[is_no];
448         data_i  = data[is_no];
449         table_i = table[is_no];
450         for (k=0; k<max; k++,ct1++) {
451           row = rbuf2_i[ct1];
452           if (!BTLookupSet(table_i,row)) { data_i[isz_i++] = row;}
453         }
454         isz[is_no] = isz_i;
455       }
456     }
457     MPI_Waitall(nrqr,s_waits2,status2);
458     PetscFree(status2);
459   }
460 
461   for (i=0; i<imax; ++i) {
462     ierr = ISCreateGeneral(PETSC_COMM_SELF, isz[i], data[i], is+i); CHKERRQ(ierr);
463   }
464 
465   PetscFree(pa);
466   PetscFree(rbuf2);
467   PetscFree(s_waits1);
468   PetscFree(r_waits1);
469   PetscFree(s_waits2);
470   PetscFree(r_waits2);
471   PetscFree(table);
472   PetscFree(s_status);
473   PetscFree(recv_status);
474   PetscFree(xdata[0]);
475   PetscFree(xdata);
476   PetscFree(isz1);
477   return 0;
478 }
479 
480 #undef __FUNC__
481 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Local"
482 /*
483    MatIncreaseOverlap_MPIBAIJ_Local - Called by MatincreaseOverlap, to do
484        the work on the local processor.
485 
486      Inputs:
487       C      - MAT_MPIBAIJ;
488       imax - total no of index sets processed at a time;
489       table  - an array of char - size = Mbs bits.
490 
491      Output:
492       isz    - array containing the count of the solution elements correspondign
493                to each index set;
494       data   - pointer to the solutions
495 */
496 static int MatIncreaseOverlap_MPIBAIJ_Local(Mat C,int imax,BT *table,int *isz,
497                                            int **data)
498 {
499   Mat_MPIBAIJ *c = (Mat_MPIBAIJ *) C->data;
500   Mat        A = c->A, B = c->B;
501   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
502   int        start, end, val, max, rstart,cstart,*ai, *aj;
503   int        *bi, *bj, *garray, i, j, k, row,*data_i,isz_i;
504   BT         table_i;
505 
506   rstart = c->rstart;
507   cstart = c->cstart;
508   ai     = a->i;
509   aj     = a->j;
510   bi     = b->i;
511   bj     = b->j;
512   garray = c->garray;
513 
514 
515   for (i=0; i<imax; i++) {
516     data_i  = data[i];
517     table_i = table[i];
518     isz_i   = isz[i];
519     for (j=0, max=isz[i]; j<max; j++) {
520       row   = data_i[j] - rstart;
521       start = ai[row];
522       end   = ai[row+1];
523       for (k=start; k<end; k++) { /* Amat */
524         val = aj[k] + cstart;
525         if (!BTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
526       }
527       start = bi[row];
528       end   = bi[row+1];
529       for (k=start; k<end; k++) { /* Bmat */
530         val = garray[bj[k]];
531         if (!BTLookupSet(table_i,val)) { data_i[isz_i++] = val;}
532       }
533     }
534     isz[i] = isz_i;
535   }
536   return 0;
537 }
538 #undef __FUNC__
539 #define __FUNC__ "MatIncreaseOverlap_MPIBAIJ_Receive"
540 /*
541       MatIncreaseOverlap_MPIBAIJ_Receive - Process the recieved messages,
542          and return the output
543 
544          Input:
545            C    - the matrix
546            nrqr - no of messages being processed.
547            rbuf - an array of pointers to the recieved requests
548 
549          Output:
550            xdata - array of messages to be sent back
551            isz1  - size of each message
552 
553   For better efficiency perhaps we should malloc seperately each xdata[i],
554 then if a remalloc is required we need only copy the data for that one row
555 rather then all previous rows as it is now where a single large chunck of
556 memory is used.
557 
558 */
559 static int MatIncreaseOverlap_MPIBAIJ_Receive(Mat C,int nrqr,int **rbuf,
560                                             int **xdata, int * isz1)
561 {
562   Mat_MPIBAIJ  *c = (Mat_MPIBAIJ *) C->data;
563   Mat         A = c->A, B = c->B;
564   Mat_SeqBAIJ  *a = (Mat_SeqBAIJ*)A->data,*b = (Mat_SeqBAIJ*)B->data;
565   int         rstart,cstart,*ai, *aj, *bi, *bj, *garray, i, j, k;
566   int         row,total_sz,ct, ct1, ct2, ct3,mem_estimate, oct2, l, start, end;
567   int         val, max1, max2, rank, Mbs, no_malloc =0, *tmp, new_estimate, ctr;
568   int         *rbuf_i,kmax,rbuf_0,ierr;
569   BT          xtable;
570 
571   rank   = c->rank;
572   Mbs      = c->Mbs;
573   rstart = c->rstart;
574   cstart = c->cstart;
575   ai     = a->i;
576   aj     = a->j;
577   bi     = b->i;
578   bj     = b->j;
579   garray = c->garray;
580 
581 
582   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
583     rbuf_i =  rbuf[i];
584     rbuf_0 =  rbuf_i[0];
585     ct     += rbuf_0;
586     for (j=1; j<=rbuf_0; j++) { total_sz += rbuf_i[2*j]; }
587   }
588 
589   max1         = ct*(a->nz +b->nz)/c->Mbs;
590   mem_estimate =  3*((total_sz > max1 ? total_sz : max1)+1);
591   xdata[0]     = (int *)PetscMalloc(mem_estimate*sizeof(int)); CHKPTRQ(xdata[0]);
592   ++no_malloc;
593   ierr         = BTCreate(Mbs,xtable); CHKERRQ(ierr);
594   PetscMemzero(isz1,nrqr*sizeof(int));
595 
596   ct3 = 0;
597   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
598     rbuf_i =  rbuf[i];
599     rbuf_0 =  rbuf_i[0];
600     ct1    =  2*rbuf_0+1;
601     ct2    =  ct1;
602     ct3    += ct1;
603     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
604       BTMemzero(Mbs,xtable);
605       oct2 = ct2;
606       kmax = rbuf_i[2*j];
607       for (k=0; k<kmax; k++, ct1++) {
608         row = rbuf_i[ct1];
609         if (!BTLookupSet(xtable,row)) {
610           if (!(ct3 < mem_estimate)) {
611             new_estimate = (int)(1.5*mem_estimate)+1;
612             tmp          = (int*) PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
613             PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
614             PetscFree(xdata[0]);
615             xdata[0]     = tmp;
616             mem_estimate = new_estimate; ++no_malloc;
617             for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
618           }
619           xdata[i][ct2++] = row;
620           ct3++;
621         }
622       }
623       for (k=oct2,max2=ct2; k<max2; k++)  {
624         row   = xdata[i][k] - rstart;
625         start = ai[row];
626         end   = ai[row+1];
627         for (l=start; l<end; l++) {
628           val = aj[l] + cstart;
629           if (!BTLookupSet(xtable,val)) {
630             if (!(ct3 < mem_estimate)) {
631               new_estimate = (int)(1.5*mem_estimate)+1;
632               tmp          = (int*) PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
633               PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
634               PetscFree(xdata[0]);
635               xdata[0]     = tmp;
636               mem_estimate = new_estimate; ++no_malloc;
637               for (ctr=1; ctr<=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
638             }
639             xdata[i][ct2++] = val;
640             ct3++;
641           }
642         }
643         start = bi[row];
644         end   = bi[row+1];
645         for (l=start; l<end; l++) {
646           val = garray[bj[l]];
647           if (!BTLookupSet(xtable,val)) {
648             if (!(ct3 < mem_estimate)) {
649               new_estimate = (int)(1.5*mem_estimate)+1;
650               tmp          = (int*) PetscMalloc(new_estimate * sizeof(int));CHKPTRQ(tmp);
651               PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(int));
652               PetscFree(xdata[0]);
653               xdata[0]     = tmp;
654               mem_estimate = new_estimate; ++no_malloc;
655               for (ctr =1; ctr <=i; ctr++) { xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];}
656             }
657             xdata[i][ct2++] = val;
658             ct3++;
659           }
660         }
661       }
662       /* Update the header*/
663       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
664       xdata[i][2*j-1] = rbuf_i[2*j-1];
665     }
666     xdata[i][0] = rbuf_0;
667     xdata[i+1]  = xdata[i] + ct2;
668     isz1[i]     = ct2; /* size of each message */
669   }
670   BTDestroy(xtable);
671   PLogInfo(0,"MatIncreaseOverlap_MPIBAIJ:[%d] Allocated %d bytes, required %d bytes, no of mallocs = %d\n",rank,mem_estimate, ct3,no_malloc);
672   return 0;
673 }
674 
675 static int MatGetSubMatrices_MPIBAIJ_local(Mat,int,IS *,IS *,MatGetSubMatrixCall,Mat *);
676 
677 #undef __FUNC__
678 #define __FUNC__ "MatGetSubMatrices_MPIBAIJ"
679 int MatGetSubMatrices_MPIBAIJ(Mat C,int ismax,IS *isrow,IS *iscol,
680                              MatGetSubMatrixCall scall,Mat **submat)
681 {
682   IS          *isrow_new,*iscol_new;
683   Mat_MPIBAIJ *c = (Mat_MPIBAIJ *) C->data;
684   int         nmax,nstages_local,nstages,i,pos,max_no,ierr;
685 
686   /* The compression and expansion should be avoided. Dos'nt point
687      out errors might change the indices hence buggey */
688 
689   isrow_new = (IS *)PetscMalloc(2*ismax*sizeof(IS)); CHKPTRQ(isrow_new);
690   iscol_new = isrow_new + ismax;
691   ierr = MatCompressIndicesSorted_MPIBAIJ(C, ismax, isrow,isrow_new); CHKERRQ(ierr);
692   ierr = MatCompressIndicesSorted_MPIBAIJ(C, ismax, iscol,iscol_new); CHKERRQ(ierr);
693 
694   /* Allocate memory to hold all the submatrices */
695   if (scall != MAT_REUSE_MATRIX) {
696     *submat = (Mat *)PetscMalloc((ismax+1)*sizeof(Mat));CHKPTRQ(*submat);
697   }
698   /* Determine the number of stages through which submatrices are done */
699   nmax          = 20*1000000 / (c->Nbs * sizeof(int));
700   if (nmax == 0) nmax = 1;
701   nstages_local = ismax/nmax + ((ismax % nmax)?1:0);
702 
703   /* Make sure every porcessor loops through the nstages */
704   MPI_Allreduce(&nstages_local,&nstages,1,MPI_INT,MPI_MAX,C->comm);
705 
706 
707   for ( i=0,pos=0; i<nstages; i++ ) {
708     if (pos+nmax <= ismax) max_no = nmax;
709     else if (pos == ismax) max_no = 0;
710     else                   max_no = ismax-pos;
711     ierr = MatGetSubMatrices_MPIBAIJ_local(C,max_no,isrow_new+pos,iscol_new+pos,scall,*submat+pos); CHKERRQ(ierr);
712     pos += max_no;
713   }
714 
715   for (i=0; i<ismax; i++) {
716     ISDestroy(isrow_new[i]);
717     ISDestroy(iscol_new[i]);
718   }
719   PetscFree(isrow_new);
720   return 0;
721 }
722 
723 /* -------------------------------------------------------------------------*/
724 #undef __FUNC__
725 #define __FUNC__ "MatGetSubMatrices_MPIBAIJ_local"
726 static int MatGetSubMatrices_MPIBAIJ_local(Mat C,int ismax,IS *isrow,IS *iscol,
727                              MatGetSubMatrixCall scall,Mat *submats)
728 {
729   Mat_MPIBAIJ  *c = (Mat_MPIBAIJ *) C->data;
730   Mat         A = c->A;
731   Mat_SeqBAIJ  *a = (Mat_SeqBAIJ*)A->data, *b = (Mat_SeqBAIJ*)c->B->data, *mat;
732   int         **irow,**icol,*nrow,*ncol,*w1,*w2,*w3,*w4,*rtable,start,end,size;
733   int         **sbuf1,**sbuf2, rank, Mbs,i,j,k,l,ct1,ct2,ierr, **rbuf1,row,proc;
734   int         nrqs, msz, **ptr,index,*req_size,*ctr,*pa,*tmp,tcol,bsz,nrqr;
735   int         **rbuf3,*req_source,**sbuf_aj, **rbuf2, max1,max2,**rmap;
736   int         **cmap,**lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax,*irow_i;
737   int         len,ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*cmap_i,*lens_i;
738   int         *rmap_i,bs=c->bs,bs2=c->bs2,*a_j=a->j,*b_j=b->j,*cworkA, *cworkB;
739   int         cstart = c->cstart,nzA,nzB,*a_i=a->i,*b_i=b->i,imark;
740   int         *bmap = c->garray,ctmp,rstart=c->rstart,tag0,tag1,tag2,tag3;
741   MPI_Request *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
742   MPI_Request *r_waits4,*s_waits3,*s_waits4;
743   MPI_Status  *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
744   MPI_Status  *r_status3,*r_status4,*s_status4;
745   MPI_Comm    comm;
746   Scalar      **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i,*vworkA,*vworkB;
747   Scalar      *a_a=a->a,*b_a=b->a;
748 
749   comm   = C->comm;
750   tag0    = C->tag;
751   size   = c->size;
752   rank   = c->rank;
753   Mbs      = c->Mbs;
754 
755   /* Get some new tags to keep the communication clean */
756   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1); CHKERRQ(ierr);
757   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2); CHKERRQ(ierr);
758   ierr = PetscObjectGetNewTag((PetscObject)C,&tag3); CHKERRQ(ierr);
759 
760   /* Check if the col indices are sorted */
761   for (i=0; i<ismax; i++) {
762     ierr = ISSorted(iscol[i],(PetscTruth*)&j);
763     if (!j) SETERRQ(1,0,"IS is not sorted");
764   }
765 
766   len    = (2*ismax+1)*(sizeof(int *) + sizeof(int)) + (Mbs+1)*sizeof(int);
767   irow   = (int **)PetscMalloc(len); CHKPTRQ(irow);
768   icol   = irow + ismax;
769   nrow   = (int *) (icol + ismax);
770   ncol   = nrow + ismax;
771   rtable = ncol + ismax;
772 
773   for (i=0; i<ismax; i++) {
774     ierr = ISGetIndices(isrow[i],&irow[i]);  CHKERRQ(ierr);
775     ierr = ISGetIndices(iscol[i],&icol[i]);  CHKERRQ(ierr);
776     ierr = ISGetSize(isrow[i],&nrow[i]);  CHKERRQ(ierr);
777     ierr = ISGetSize(iscol[i],&ncol[i]);  CHKERRQ(ierr);
778   }
779 
780   /* Create hash table for the mapping :row -> proc*/
781   for (i=0,j=0; i<size; i++) {
782     jmax = c->rowners[i+1];
783     for (; j<jmax; j++) {
784       rtable[j] = i;
785     }
786   }
787 
788   /* evaluate communication - mesg to who, length of mesg, and buffer space
789      required. Based on this, buffers are allocated, and data copied into them*/
790   w1     = (int *)PetscMalloc(size*4*sizeof(int));CHKPTRQ(w1); /* mesg size */
791   w2     = w1 + size;      /* if w2[i] marked, then a message to proc i*/
792   w3     = w2 + size;      /* no of IS that needs to be sent to proc i */
793   w4     = w3 + size;      /* temp work space used in determining w1, w2, w3 */
794   PetscMemzero(w1,size*3*sizeof(int)); /* initialise work vector*/
795   for (i=0; i<ismax; i++) {
796     PetscMemzero(w4,size*sizeof(int)); /* initialise work vector*/
797     jmax   = nrow[i];
798     irow_i = irow[i];
799     for (j=0; j<jmax; j++) {
800       row  = irow_i[j];
801       proc = rtable[row];
802       w4[proc]++;
803     }
804     for (j=0; j<size; j++) {
805       if (w4[j]) { w1[j] += w4[j];  w3[j]++;}
806     }
807   }
808 
809   nrqs     = 0;              /* no of outgoing messages */
810   msz      = 0;              /* total mesg length for all proc */
811   w1[rank] = 0;              /* no mesg sent to intself */
812   w3[rank] = 0;
813   for (i=0; i<size; i++) {
814     if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
815   }
816   pa = (int *)PetscMalloc((nrqs+1)*sizeof(int));CHKPTRQ(pa); /*(proc -array)*/
817   for (i=0, j=0; i<size; i++) {
818     if (w1[i]) { pa[j] = i; j++; }
819   }
820 
821   /* Each message would have a header = 1 + 2*(no of IS) + data */
822   for (i=0; i<nrqs; i++) {
823     j     = pa[i];
824     w1[j] += w2[j] + 2* w3[j];
825     msz   += w1[j];
826   }
827   /* Do a global reduction to determine how many messages to expect*/
828   {
829     int *rw1, *rw2;
830     rw1 = (int *)PetscMalloc(2*size*sizeof(int)); CHKPTRQ(rw1);
831     rw2 = rw1+size;
832     MPI_Allreduce(w1, rw1, size, MPI_INT, MPI_MAX, comm);
833     bsz   = rw1[rank];
834     MPI_Allreduce(w2, rw2, size, MPI_INT, MPI_SUM, comm);
835     nrqr  = rw2[rank];
836     PetscFree(rw1);
837   }
838 
839   /* Allocate memory for recv buffers . Prob none if nrqr = 0 ???? */
840   len      = (nrqr+1)*sizeof(int*) + nrqr*bsz*sizeof(int);
841   rbuf1    = (int**) PetscMalloc(len);  CHKPTRQ(rbuf1);
842   rbuf1[0] = (int *) (rbuf1 + nrqr);
843   for (i=1; i<nrqr; ++i) rbuf1[i] = rbuf1[i-1] + bsz;
844 
845   /* Post the receives */
846   r_waits1 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(r_waits1);
847   for (i=0; i<nrqr; ++i) {
848     MPI_Irecv(rbuf1[i],bsz,MPI_INT,MPI_ANY_SOURCE,tag0,comm,r_waits1+i);
849   }
850 
851   /* Allocate Memory for outgoing messages */
852   len      = 2*size*sizeof(int*) + 2*msz*sizeof(int) + size*sizeof(int);
853   sbuf1    = (int **)PetscMalloc(len); CHKPTRQ(sbuf1);
854   ptr      = sbuf1 + size;   /* Pointers to the data in outgoing buffers */
855   PetscMemzero(sbuf1,2*size*sizeof(int*));
856   /* allocate memory for outgoing data + buf to receive the first reply */
857   tmp      = (int *) (ptr + size);
858   ctr      = tmp + 2*msz;
859 
860   {
861 
862     int *iptr = tmp,ict = 0;
863     for (i=0; i<nrqs; i++) {
864       j         = pa[i];
865       iptr     += ict;
866       sbuf1[j]  = iptr;
867       ict       = w1[j];
868     }
869   }
870 
871   /* Form the outgoing messages */
872   /* Initialise the header space */
873   for (i=0; i<nrqs; i++) {
874     j           = pa[i];
875     sbuf1[j][0] = 0;
876     PetscMemzero(sbuf1[j]+1, 2*w3[j]*sizeof(int));
877     ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
878   }
879 
880   /* Parse the isrow and copy data into outbuf */
881   for (i=0; i<ismax; i++) {
882     PetscMemzero(ctr,size*sizeof(int));
883     irow_i = irow[i];
884     jmax   = nrow[i];
885     for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
886       row  = irow_i[j];
887       proc = rtable[row];
888       if (proc != rank) { /* copy to the outgoing buf*/
889         ctr[proc]++;
890         *ptr[proc] = row;
891         ptr[proc]++;
892       }
893     }
894     /* Update the headers for the current IS */
895     for (j=0; j<size; j++) { /* Can Optimise this loop too */
896       if ((ctr_j = ctr[j])) {
897         sbuf1_j        = sbuf1[j];
898         k              = ++sbuf1_j[0];
899         sbuf1_j[2*k]   = ctr_j;
900         sbuf1_j[2*k-1] = i;
901       }
902     }
903   }
904 
905   /*  Now  post the sends */
906   s_waits1 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(s_waits1);
907   for (i=0; i<nrqs; ++i) {
908     j = pa[i];
909     /* printf("[%d] Send Req to %d: size %d \n", rank,j, w1[j]); */
910     MPI_Isend( sbuf1[j], w1[j], MPI_INT, j, tag0, comm, s_waits1+i);
911   }
912 
913   /* Post Recieves to capture the buffer size */
914   r_waits2 = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits2);
915   rbuf2    = (int**)PetscMalloc((nrqs+1)*sizeof(int *));CHKPTRQ(rbuf2);
916   rbuf2[0] = tmp + msz;
917   for (i=1; i<nrqs; ++i) {
918     j        = pa[i];
919     rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
920   }
921   for (i=0; i<nrqs; ++i) {
922     j = pa[i];
923     MPI_Irecv( rbuf2[i], w1[j], MPI_INT, j, tag1, comm, r_waits2+i);
924   }
925 
926   /* Send to other procs the buf size they should allocate */
927 
928 
929   /* Receive messages*/
930   s_waits2  = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits2);
931   r_status1 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(r_status1);
932   len         = 2*nrqr*sizeof(int) + (nrqr+1)*sizeof(int*);
933   sbuf2       = (int**) PetscMalloc(len);CHKPTRQ(sbuf2);
934   req_size    = (int *) (sbuf2 + nrqr);
935   req_source  = req_size + nrqr;
936 
937   {
938     Mat_SeqBAIJ *sA = (Mat_SeqBAIJ*) c->A->data, *sB = (Mat_SeqBAIJ*) c->B->data;
939     int        *sAi = sA->i, *sBi = sB->i, id;
940     int        *sbuf2_i;
941 
942     for (i=0; i<nrqr; ++i) {
943       MPI_Waitany(nrqr, r_waits1, &index, r_status1+i);
944       req_size[index] = 0;
945       rbuf1_i         = rbuf1[index];
946       start           = 2*rbuf1_i[0] + 1;
947       MPI_Get_count(r_status1+i,MPI_INT, &end);
948       sbuf2[index] = (int *)PetscMalloc(end*sizeof(int));CHKPTRQ(sbuf2[index]);
949       sbuf2_i      = sbuf2[index];
950       for (j=start; j<end; j++) {
951         id               = rbuf1_i[j] - rstart;
952         ncols            = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
953         sbuf2_i[j]       = ncols;
954         req_size[index] += ncols;
955       }
956       req_source[index] = r_status1[i].MPI_SOURCE;
957       /* form the header */
958       sbuf2_i[0]   = req_size[index];
959       for (j=1; j<start; j++) { sbuf2_i[j] = rbuf1_i[j]; }
960       MPI_Isend(sbuf2_i,end,MPI_INT,req_source[index],tag1,comm,s_waits2+i);
961     }
962   }
963   PetscFree(r_status1); PetscFree(r_waits1);
964 
965   /*  recv buffer sizes */
966   /* Receive messages*/
967 
968   rbuf3     = (int**)PetscMalloc((nrqs+1)*sizeof(int*)); CHKPTRQ(rbuf3);
969   rbuf4     = (Scalar**)PetscMalloc((nrqs+1)*sizeof(Scalar*));CHKPTRQ(rbuf4);
970   r_waits3  = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits3);
971   r_waits4  = (MPI_Request *) PetscMalloc((nrqs+1)*sizeof(MPI_Request));CHKPTRQ(r_waits4);
972   r_status2 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status2);
973 
974   for (i=0; i<nrqs; ++i) {
975     MPI_Waitany(nrqs, r_waits2, &index, r_status2+i);
976     rbuf3[index] = (int *)PetscMalloc(rbuf2[index][0]*sizeof(int));CHKPTRQ(rbuf3[index]);
977     rbuf4[index] = (Scalar *)PetscMalloc(rbuf2[index][0]*bs2*sizeof(Scalar));CHKPTRQ(rbuf4[index]);
978     MPI_Irecv(rbuf3[index],rbuf2[index][0], MPI_INT,
979               r_status2[i].MPI_SOURCE, tag2, comm, r_waits3+index);
980     MPI_Irecv(rbuf4[index],rbuf2[index][0]*bs2, MPIU_SCALAR,
981               r_status2[i].MPI_SOURCE, tag3, comm, r_waits4+index);
982   }
983   PetscFree(r_status2); PetscFree(r_waits2);
984 
985   /* Wait on sends1 and sends2 */
986   s_status1 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(s_status1);
987   s_status2 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status2);
988 
989   MPI_Waitall(nrqs,s_waits1,s_status1);
990   MPI_Waitall(nrqr,s_waits2,s_status2);
991   PetscFree(s_status1); PetscFree(s_status2);
992   PetscFree(s_waits1); PetscFree(s_waits2);
993 
994   /* Now allocate buffers for a->j, and send them off */
995   sbuf_aj = (int **)PetscMalloc((nrqr+1)*sizeof(int *));CHKPTRQ(sbuf_aj);
996   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
997   sbuf_aj[0] = (int*) PetscMalloc((j+1)*sizeof(int)); CHKPTRQ(sbuf_aj[0]);
998   for (i=1; i<nrqr; i++)  sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
999 
1000   s_waits3 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits3);
1001   {
1002      for (i=0; i<nrqr; i++) {
1003       rbuf1_i   = rbuf1[i];
1004       sbuf_aj_i = sbuf_aj[i];
1005       ct1       = 2*rbuf1_i[0] + 1;
1006       ct2       = 0;
1007       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1008         kmax = rbuf1[i][2*j];
1009         for (k=0; k<kmax; k++,ct1++) {
1010           row    = rbuf1_i[ct1] - rstart;
1011           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1012           ncols  = nzA + nzB;
1013           cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
1014 
1015           /* load the column indices for this row into cols*/
1016           cols  = sbuf_aj_i + ct2;
1017           for (l=0; l<nzB; l++) {
1018             if ((ctmp = bmap[cworkB[l]]) < cstart)  cols[l] = ctmp;
1019             else break;
1020           }
1021           imark = l;
1022           for (l=0; l<nzA; l++)   cols[imark+l] = cstart + cworkA[l];
1023           for (l=imark; l<nzB; l++) cols[nzA+l] = bmap[cworkB[l]];
1024           ct2 += ncols;
1025         }
1026       }
1027       MPI_Isend(sbuf_aj_i,req_size[i],MPI_INT,req_source[i],tag2,comm,s_waits3+i);
1028     }
1029   }
1030   r_status3 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status3);
1031   s_status3 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status3);
1032 
1033   /* Allocate buffers for a->a, and send them off */
1034   sbuf_aa = (Scalar **)PetscMalloc((nrqr+1)*sizeof(Scalar *));CHKPTRQ(sbuf_aa);
1035   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1036   sbuf_aa[0] = (Scalar*) PetscMalloc((j+1)*bs2*sizeof(Scalar));CHKPTRQ(sbuf_aa[0]);
1037   for (i=1; i<nrqr; i++)  sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1]*bs2;
1038 
1039   s_waits4 = (MPI_Request *) PetscMalloc((nrqr+1)*sizeof(MPI_Request));CHKPTRQ(s_waits4);
1040   {
1041     for (i=0; i<nrqr; i++) {
1042       rbuf1_i   = rbuf1[i];
1043       sbuf_aa_i = sbuf_aa[i];
1044       ct1       = 2*rbuf1_i[0]+1;
1045       ct2       = 0;
1046       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
1047         kmax = rbuf1_i[2*j];
1048         for (k=0; k<kmax; k++,ct1++) {
1049           row    = rbuf1_i[ct1] - rstart;
1050           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1051           ncols  = nzA + nzB;
1052           cworkA = a_j + a_i[row];     cworkB = b_j + b_i[row];
1053           vworkA = a_a + a_i[row]*bs2; vworkB = b_a + b_i[row]*bs2;
1054 
1055           /* load the column values for this row into vals*/
1056           vals  = sbuf_aa_i+ct2*bs2;
1057           for (l=0; l<nzB; l++) {
1058             if ((bmap[cworkB[l]]) < cstart)
1059               PetscMemcpy(vals+l*bs2,vworkB+l*bs2,bs2*sizeof(Scalar));
1060             else break;
1061           }
1062           imark = l;
1063           for (l=0; l<nzA; l++)
1064             PetscMemcpy(vals+(imark+l)*bs2,vworkA+l*bs2,bs2*sizeof(Scalar));
1065           for (l=imark; l<nzB; l++)
1066             PetscMemcpy(vals+(nzA+l)*bs2,vworkB+l*bs2,bs2*sizeof(Scalar));
1067           ct2 += ncols;
1068         }
1069       }
1070       MPI_Isend(sbuf_aa_i,req_size[i]*bs2,MPIU_SCALAR,req_source[i],tag3,comm,s_waits4+i);
1071     }
1072   }
1073   r_status4 = (MPI_Status *) PetscMalloc((nrqs+1)*sizeof(MPI_Status));CHKPTRQ(r_status4);
1074   s_status4 = (MPI_Status *) PetscMalloc((nrqr+1)*sizeof(MPI_Status));CHKPTRQ(s_status4);
1075   PetscFree(rbuf1);
1076 
1077   /* Form the matrix */
1078   /* create col map */
1079   {
1080     int *icol_i;
1081 
1082     len     = (1+ismax)*sizeof(int *) + ismax*c->Nbs*sizeof(int);
1083     cmap    = (int **)PetscMalloc(len); CHKPTRQ(cmap);
1084     cmap[0] = (int *)(cmap + ismax);
1085     PetscMemzero(cmap[0],(1+ismax*c->Nbs)*sizeof(int));
1086     for (i=1; i<ismax; i++) { cmap[i] = cmap[i-1] + c->Nbs; }
1087     for (i=0; i<ismax; i++) {
1088       jmax   = ncol[i];
1089       icol_i = icol[i];
1090       cmap_i = cmap[i];
1091       for (j=0; j<jmax; j++) {
1092         cmap_i[icol_i[j]] = j+1;
1093       }
1094     }
1095   }
1096 
1097 
1098   /* Create lens which is required for MatCreate... */
1099   for (i=0,j=0; i<ismax; i++) { j += nrow[i]; }
1100   len     = (1+ismax)*sizeof(int *) + j*sizeof(int);
1101   lens    = (int **)PetscMalloc(len); CHKPTRQ(lens);
1102   lens[0] = (int *)(lens + ismax);
1103   PetscMemzero(lens[0], j*sizeof(int));
1104   for (i=1; i<ismax; i++) { lens[i] = lens[i-1] + nrow[i-1]; }
1105 
1106   /* Update lens from local data */
1107   for (i=0; i<ismax; i++) {
1108     jmax   = nrow[i];
1109     cmap_i = cmap[i];
1110     irow_i = irow[i];
1111     lens_i = lens[i];
1112     for (j=0; j<jmax; j++) {
1113       row  = irow_i[j];
1114       proc = rtable[row];
1115       if (proc == rank) {
1116         /* Get indices from matA and then from matB */
1117         row    = row - rstart;
1118         nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
1119         cworkA =  a_j + a_i[row]; cworkB = b_j + b_i[row];
1120         for (k=0; k<nzA; k++)
1121           if (cmap_i[cstart + cworkA[k]]) { lens_i[j]++;}
1122         for (k=0; k<nzB; k++)
1123           if (cmap_i[bmap[cworkB[k]]]) { lens_i[j]++;}
1124       }
1125     }
1126   }
1127 
1128   /* Create row map*/
1129   len     = (1+ismax)*sizeof(int *) + ismax*c->Mbs*sizeof(int);
1130   rmap    = (int **)PetscMalloc(len); CHKPTRQ(rmap);
1131   rmap[0] = (int *)(rmap + ismax);
1132   PetscMemzero(rmap[0],ismax*c->Mbs*sizeof(int));
1133   for (i=1; i<ismax; i++) { rmap[i] = rmap[i-1] + c->Mbs;}
1134   for (i=0; i<ismax; i++) {
1135     rmap_i = rmap[i];
1136     irow_i = irow[i];
1137     jmax   = nrow[i];
1138     for (j=0; j<jmax; j++) {
1139       rmap_i[irow_i[j]] = j;
1140     }
1141   }
1142 
1143   /* Update lens from offproc data */
1144   {
1145     int *rbuf2_i, *rbuf3_i, *sbuf1_i;
1146 
1147     for (tmp2=0; tmp2<nrqs; tmp2++) {
1148       MPI_Waitany(nrqs, r_waits3, &i, r_status3+tmp2);
1149       index   = pa[i];
1150       sbuf1_i = sbuf1[index];
1151       jmax    = sbuf1_i[0];
1152       ct1     = 2*jmax+1;
1153       ct2     = 0;
1154       rbuf2_i = rbuf2[i];
1155       rbuf3_i = rbuf3[i];
1156       for (j=1; j<=jmax; j++) {
1157         is_no   = sbuf1_i[2*j-1];
1158         max1    = sbuf1_i[2*j];
1159         lens_i  = lens[is_no];
1160         cmap_i  = cmap[is_no];
1161         rmap_i  = rmap[is_no];
1162         for (k=0; k<max1; k++,ct1++) {
1163           row  = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
1164           max2 = rbuf2_i[ct1];
1165           for (l=0; l<max2; l++,ct2++) {
1166             if (cmap_i[rbuf3_i[ct2]]) {
1167               lens_i[row]++;
1168             }
1169           }
1170         }
1171       }
1172     }
1173   }
1174   PetscFree(r_status3); PetscFree(r_waits3);
1175   MPI_Waitall(nrqr,s_waits3,s_status3);
1176   PetscFree(s_status3); PetscFree(s_waits3);
1177 
1178   /* Create the submatrices */
1179   if (scall == MAT_REUSE_MATRIX) {
1180     /*
1181         Assumes new rows are same length as the old rows, hence bug!
1182     */
1183     for (i=0; i<ismax; i++) {
1184       mat = (Mat_SeqBAIJ *)(submats[i]->data);
1185       if ((mat->mbs != nrow[i]) || (mat->nbs != ncol[i] || mat->bs != bs)) {
1186         SETERRQ(1,0,"Cannot reuse matrix. wrong size");
1187       }
1188       if (PetscMemcmp(mat->ilen,lens[i], mat->mbs *sizeof(int))) {
1189         SETERRQ(1,0,"Cannot reuse matrix. wrong no of nonzeros");
1190       }
1191       /* Initial matrix as if empty */
1192       PetscMemzero(mat->ilen,mat->mbs*sizeof(int));
1193       submats[i]->factor = C->factor;
1194     }
1195   }
1196   else {
1197     /* *submat = submats = (Mat *)PetscMalloc(ismax*sizeof(Mat)); CHKPTRQ(submats); */
1198     for (i=0; i<ismax; i++) {
1199       ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,a->bs,nrow[i]*bs,ncol[i]*bs,0,lens[i],submats+i);
1200              CHKERRQ(ierr);
1201     }
1202   }
1203 
1204   /* Assemble the matrices */
1205   /* First assemble the local rows */
1206   {
1207     int    ilen_row,*imat_ilen, *imat_j, *imat_i;
1208     Scalar *imat_a;
1209 
1210     for (i=0; i<ismax; i++) {
1211       mat       = (Mat_SeqBAIJ *) submats[i]->data;
1212       imat_ilen = mat->ilen;
1213       imat_j    = mat->j;
1214       imat_i    = mat->i;
1215       imat_a    = mat->a;
1216       cmap_i    = cmap[i];
1217       rmap_i    = rmap[i];
1218       irow_i    = irow[i];
1219       jmax      = nrow[i];
1220       for (j=0; j<jmax; j++) {
1221         row      = irow_i[j];
1222         proc     = rtable[row];
1223         if (proc == rank) {
1224           row      = row - rstart;
1225           nzA      = a_i[row+1] - a_i[row];
1226           nzB      = b_i[row+1] - b_i[row];
1227           cworkA   = a_j + a_i[row];
1228           cworkB   = b_j + b_i[row];
1229           vworkA   = a_a + a_i[row]*bs2;
1230           vworkB   = b_a + b_i[row]*bs2;
1231 
1232           row      = rmap_i[row + rstart];
1233           mat_i    = imat_i[row];
1234           mat_a    = imat_a + mat_i*bs2;
1235           mat_j    = imat_j + mat_i;
1236           ilen_row = imat_ilen[row];
1237 
1238           /* load the column indices for this row into cols*/
1239           for (l=0; l<nzB; l++) {
1240             if ((ctmp = bmap[cworkB[l]]) < cstart) {
1241               if ((tcol = cmap_i[ctmp])) {
1242                 *mat_j++ = tcol - 1;
1243                 PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(Scalar)); mat_a += bs2;
1244                 ilen_row++;
1245               }
1246             }
1247             else break;
1248           }
1249           imark = l;
1250           for (l=0; l<nzA; l++) {
1251             if ((tcol = cmap_i[cstart + cworkA[l]])) {
1252               *mat_j++ = tcol - 1;
1253               PetscMemcpy(mat_a,vworkA+l*bs2,bs2*sizeof(Scalar)); mat_a += bs2;
1254               ilen_row++;
1255             }
1256           }
1257           for (l=imark; l<nzB; l++) {
1258             if ((tcol = cmap_i[bmap[cworkB[l]]])) {
1259               *mat_j++ = tcol - 1;
1260               PetscMemcpy(mat_a,vworkB+l*bs2,bs2*sizeof(Scalar)); mat_a += bs2;
1261               ilen_row++;
1262             }
1263           }
1264           imat_ilen[row] = ilen_row;
1265         }
1266       }
1267 
1268     }
1269   }
1270 
1271   /*   Now assemble the off proc rows*/
1272   {
1273     int    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
1274     int    *imat_j,*imat_i;
1275     Scalar *imat_a,*rbuf4_i;
1276 
1277     for (tmp2=0; tmp2<nrqs; tmp2++) {
1278       MPI_Waitany(nrqs, r_waits4, &i, r_status4+tmp2);
1279       index   = pa[i];
1280       sbuf1_i = sbuf1[index];
1281       jmax    = sbuf1_i[0];
1282       ct1     = 2*jmax + 1;
1283       ct2     = 0;
1284       rbuf2_i = rbuf2[i];
1285       rbuf3_i = rbuf3[i];
1286       rbuf4_i = rbuf4[i];
1287       for (j=1; j<=jmax; j++) {
1288         is_no     = sbuf1_i[2*j-1];
1289         rmap_i    = rmap[is_no];
1290         cmap_i    = cmap[is_no];
1291         mat       = (Mat_SeqBAIJ *) submats[is_no]->data;
1292         imat_ilen = mat->ilen;
1293         imat_j    = mat->j;
1294         imat_i    = mat->i;
1295         imat_a    = mat->a;
1296         max1      = sbuf1_i[2*j];
1297         for (k=0; k<max1; k++, ct1++) {
1298           row   = sbuf1_i[ct1];
1299           row   = rmap_i[row];
1300           ilen  = imat_ilen[row];
1301           mat_i = imat_i[row];
1302           mat_a = imat_a + mat_i*bs2;
1303           mat_j = imat_j + mat_i;
1304           max2 = rbuf2_i[ct1];
1305           for (l=0; l<max2; l++,ct2++) {
1306             if ((tcol = cmap_i[rbuf3_i[ct2]])) {
1307               *mat_j++ = tcol - 1;
1308               /* *mat_a++ = rbuf4_i[ct2]; */
1309               PetscMemcpy(mat_a,rbuf4_i+ct2*bs2,bs2*sizeof(Scalar)); mat_a += bs2;
1310               ilen++;
1311             }
1312           }
1313           imat_ilen[row] = ilen;
1314         }
1315       }
1316     }
1317   }
1318   PetscFree(r_status4); PetscFree(r_waits4);
1319   MPI_Waitall(nrqr,s_waits4,s_status4);
1320   PetscFree(s_waits4); PetscFree(s_status4);
1321 
1322   /* Restore the indices */
1323   for (i=0; i<ismax; i++) {
1324     ierr = ISRestoreIndices(isrow[i], irow+i); CHKERRQ(ierr);
1325     ierr = ISRestoreIndices(iscol[i], icol+i); CHKERRQ(ierr);
1326   }
1327 
1328   /* Destroy allocated memory */
1329   PetscFree(irow);
1330   PetscFree(w1);
1331   PetscFree(pa);
1332 
1333   PetscFree(sbuf1);
1334   PetscFree(rbuf2);
1335   for (i=0; i<nrqr; ++i) {
1336     PetscFree(sbuf2[i]);
1337   }
1338   for (i=0; i<nrqs; ++i) {
1339     PetscFree(rbuf3[i]);
1340     PetscFree(rbuf4[i]);
1341   }
1342 
1343   PetscFree(sbuf2);
1344   PetscFree(rbuf3);
1345   PetscFree(rbuf4 );
1346   PetscFree(sbuf_aj[0]);
1347   PetscFree(sbuf_aj);
1348   PetscFree(sbuf_aa[0]);
1349   PetscFree(sbuf_aa);
1350 
1351   PetscFree(cmap);
1352   PetscFree(rmap);
1353   PetscFree(lens);
1354 
1355   for (i=0; i<ismax; i++) {
1356     ierr = MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr);
1357     ierr = MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY); CHKERRQ(ierr);
1358   }
1359 
1360   ierr = PetscObjectRestoreNewTag((PetscObject)C,&tag3); CHKERRQ(ierr);
1361   ierr = PetscObjectRestoreNewTag((PetscObject)C,&tag2); CHKERRQ(ierr);
1362   ierr = PetscObjectRestoreNewTag((PetscObject)C,&tag1); CHKERRQ(ierr);
1363 
1364   return 0;
1365 }
1366