xref: /petsc/src/mat/impls/aij/mpi/mpiov.c (revision 89e7405202ba1b2260cd7d4838693082e70360c1)
1 /*
2    Routines to compute overlapping regions of a parallel MPI matrix
3   and to find submatrices that were shared across processors.
4 */
5 #include <../src/mat/impls/aij/seq/aij.h>
6 #include <../src/mat/impls/aij/mpi/mpiaij.h>
7 #include <petscbt.h>
8 #include <petscsf.h>
9 
10 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat,PetscInt,IS*);
11 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat,PetscInt,char**,PetscInt*,PetscInt**,PetscTable*);
12 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat,PetscInt,PetscInt**,PetscInt**,PetscInt*);
13 extern PetscErrorCode MatGetRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
14 extern PetscErrorCode MatRestoreRow_MPIAIJ(Mat,PetscInt,PetscInt*,PetscInt**,PetscScalar**);
15 
16 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat,PetscInt,IS*);
17 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat,PetscInt,IS*);
18 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat,PetscInt,PetscMPIInt,PetscMPIInt *,PetscInt *, PetscInt *,PetscInt **,PetscInt **);
19 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat,PetscInt,IS*,PetscInt,PetscInt *);
20 
21 
22 PetscErrorCode MatIncreaseOverlap_MPIAIJ(Mat C,PetscInt imax,IS is[],PetscInt ov)
23 {
24   PetscErrorCode ierr;
25   PetscInt       i;
26 
27   PetscFunctionBegin;
28   if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified");
29   for (i=0; i<ov; ++i) {
30     ierr = MatIncreaseOverlap_MPIAIJ_Once(C,imax,is);CHKERRQ(ierr);
31   }
32   PetscFunctionReturn(0);
33 }
34 
35 PetscErrorCode MatIncreaseOverlap_MPIAIJ_Scalable(Mat C,PetscInt imax,IS is[],PetscInt ov)
36 {
37   PetscErrorCode ierr;
38   PetscInt       i;
39 
40   PetscFunctionBegin;
41   if (ov < 0) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_OUTOFRANGE,"Negative overlap specified");
42   for (i=0; i<ov; ++i) {
43     ierr = MatIncreaseOverlap_MPIAIJ_Once_Scalable(C,imax,is);CHKERRQ(ierr);
44   }
45   PetscFunctionReturn(0);
46 }
47 
48 
49 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once_Scalable(Mat mat,PetscInt nidx,IS is[])
50 {
51   PetscErrorCode   ierr;
52   MPI_Comm         comm;
53   PetscInt        *length,length_i,tlength,*remoterows,nrrows,reducednrrows,*rrow_ranks,*rrow_isids,i,j,owner;
54   PetscInt         *tosizes,*tosizes_temp,*toffsets,*fromsizes,*todata,*fromdata;
55   PetscInt         nrecvrows,*sbsizes = 0,*sbdata = 0;
56   const PetscInt  *indices_i,**indices;
57   PetscLayout      rmap;
58   PetscMPIInt      rank,size,*toranks,*fromranks,nto,nfrom;
59   PetscSF          sf;
60   PetscSFNode     *remote;
61 
62   PetscFunctionBegin;
63   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
64   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
65   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
66   /* get row map to determine where rows should be going */
67   ierr = MatGetLayouts(mat,&rmap,NULL);CHKERRQ(ierr);
68   /* retrieve IS data and put all together so that we
69    * can optimize communication
70    *  */
71   ierr = PetscCalloc2(nidx,(PetscInt ***)&indices,nidx,&length);CHKERRQ(ierr);
72   for (i=0,tlength=0; i<nidx; i++){
73     ierr = ISGetLocalSize(is[i],&length[i]);CHKERRQ(ierr);
74     tlength += length[i];
75     ierr = ISGetIndices(is[i],&indices[i]);CHKERRQ(ierr);
76   }
77   /* find these rows on remote processors */
78   ierr = PetscCalloc3(tlength,&remoterows,tlength,&rrow_ranks,tlength,&rrow_isids);CHKERRQ(ierr);
79   ierr = PetscCalloc3(size,&toranks,2*size,&tosizes,size,&tosizes_temp);CHKERRQ(ierr);
80   nrrows = 0;
81   for (i=0; i<nidx; i++){
82     length_i     = length[i];
83     indices_i    = indices[i];
84     for (j=0; j<length_i; j++){
85       owner = -1;
86       ierr = PetscLayoutFindOwner(rmap,indices_i[j],&owner);CHKERRQ(ierr);
87       /* remote processors */
88       if (owner != rank){
89         tosizes_temp[owner]++; /* number of rows to owner */
90         rrow_ranks[nrrows]  = owner; /* processor */
91         rrow_isids[nrrows]   = i; /* is id */
92         remoterows[nrrows++] = indices_i[j]; /* row */
93       }
94     }
95     ierr = ISRestoreIndices(is[i],&indices[i]);CHKERRQ(ierr);
96   }
97   ierr = PetscFree2(indices,length);CHKERRQ(ierr);
98   /* test if we need to exchange messages
99    * generally speaking, we do not need to exchange
100    * data when overlap is 1
101    * */
102   ierr = MPIU_Allreduce(&nrrows,&reducednrrows,1,MPIU_INT,MPIU_MAX,comm);CHKERRQ(ierr);
103   /* we do not have any messages
104    * It usually corresponds to overlap 1
105    * */
106   if (!reducednrrows){
107     ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr);
108     ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr);
109     ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr);
110     PetscFunctionReturn(0);
111   }
112   nto = 0;
113   /* send sizes and ranks for building a two-sided communcation */
114   for (i=0; i<size; i++){
115    if (tosizes_temp[i]){
116      tosizes[nto*2]  = tosizes_temp[i]*2; /* size */
117      tosizes_temp[i] = nto; /* a map from processor to index */
118      toranks[nto++]  = i; /* processor */
119    }
120   }
121   ierr = PetscCalloc1(nto+1,&toffsets);CHKERRQ(ierr);
122   for (i=0; i<nto; i++){
123     toffsets[i+1]  = toffsets[i]+tosizes[2*i]; /* offsets */
124     tosizes[2*i+1] = toffsets[i]; /* offsets to send */
125   }
126   /* send information to other processors */
127   ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nto,toranks,tosizes,&nfrom,&fromranks,&fromsizes);CHKERRQ(ierr);
128   nrecvrows = 0;
129   for (i=0; i<nfrom; i++) nrecvrows += fromsizes[2*i];
130   ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr);
131   nrecvrows = 0;
132   for (i=0; i<nfrom; i++){
133     for (j=0; j<fromsizes[2*i]; j++){
134       remote[nrecvrows].rank    = fromranks[i];
135       remote[nrecvrows++].index = fromsizes[2*i+1]+j;
136     }
137   }
138   ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr);
139   ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
140   /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */
141   ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr);
142   ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
143   /* message pair <no of is, row>  */
144   ierr = PetscCalloc2(2*nrrows,&todata,nrecvrows,&fromdata);CHKERRQ(ierr);
145   for (i=0; i<nrrows; i++){
146     owner = rrow_ranks[i]; /* processor */
147     j     = tosizes_temp[owner]; /* index */
148     todata[toffsets[j]++] = rrow_isids[i];
149     todata[toffsets[j]++] = remoterows[i];
150   }
151   ierr = PetscFree3(toranks,tosizes,tosizes_temp);CHKERRQ(ierr);
152   ierr = PetscFree3(remoterows,rrow_ranks,rrow_isids);CHKERRQ(ierr);
153   ierr = PetscFree(toffsets);CHKERRQ(ierr);
154   ierr = PetscSFBcastBegin(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr);
155   ierr = PetscSFBcastEnd(sf,MPIU_INT,todata,fromdata);CHKERRQ(ierr);
156   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
157   /* send rows belonging to the remote so that then we could get the overlapping data back */
158   ierr = MatIncreaseOverlap_MPIAIJ_Send_Scalable(mat,nidx,nfrom,fromranks,fromsizes,fromdata,&sbsizes,&sbdata);CHKERRQ(ierr);
159   ierr = PetscFree2(todata,fromdata);CHKERRQ(ierr);
160   ierr = PetscFree(fromsizes);CHKERRQ(ierr);
161   ierr = PetscCommBuildTwoSided(comm,2,MPIU_INT,nfrom,fromranks,sbsizes,&nto,&toranks,&tosizes);CHKERRQ(ierr);
162   ierr = PetscFree(fromranks);CHKERRQ(ierr);
163   nrecvrows = 0;
164   for (i=0; i<nto; i++) nrecvrows += tosizes[2*i];
165   ierr = PetscCalloc1(nrecvrows,&todata);CHKERRQ(ierr);
166   ierr = PetscMalloc1(nrecvrows,&remote);CHKERRQ(ierr);
167   nrecvrows = 0;
168   for (i=0; i<nto; i++){
169     for (j=0; j<tosizes[2*i]; j++){
170       remote[nrecvrows].rank    = toranks[i];
171       remote[nrecvrows++].index = tosizes[2*i+1]+j;
172     }
173   }
174   ierr = PetscSFCreate(comm,&sf);CHKERRQ(ierr);
175   ierr = PetscSFSetGraph(sf,nrecvrows,nrecvrows,NULL,PETSC_OWN_POINTER,remote,PETSC_OWN_POINTER);CHKERRQ(ierr);
176   /* use two-sided communication by default since OPENMPI has some bugs for one-sided one */
177   ierr = PetscSFSetType(sf,PETSCSFBASIC);CHKERRQ(ierr);
178   ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
179   /* overlap communication and computation */
180   ierr = PetscSFBcastBegin(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr);
181   ierr = MatIncreaseOverlap_MPIAIJ_Local_Scalable(mat,nidx,is);CHKERRQ(ierr);
182   ierr = PetscSFBcastEnd(sf,MPIU_INT,sbdata,todata);CHKERRQ(ierr);
183   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
184   ierr = PetscFree2(sbdata,sbsizes);CHKERRQ(ierr);
185   ierr = MatIncreaseOverlap_MPIAIJ_Receive_Scalable(mat,nidx,is,nrecvrows,todata);CHKERRQ(ierr);
186   ierr = PetscFree(toranks);CHKERRQ(ierr);
187   ierr = PetscFree(tosizes);CHKERRQ(ierr);
188   ierr = PetscFree(todata);CHKERRQ(ierr);
189   PetscFunctionReturn(0);
190 }
191 
192 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive_Scalable(Mat mat,PetscInt nidx, IS is[], PetscInt nrecvs, PetscInt *recvdata)
193 {
194   PetscInt         *isz,isz_i,i,j,is_id, data_size;
195   PetscInt          col,lsize,max_lsize,*indices_temp, *indices_i;
196   const PetscInt   *indices_i_temp;
197   PetscErrorCode    ierr;
198 
199   PetscFunctionBegin;
200   max_lsize = 0;
201   ierr = PetscMalloc1(nidx,&isz);CHKERRQ(ierr);
202   for (i=0; i<nidx; i++){
203     ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr);
204     max_lsize = lsize>max_lsize ? lsize:max_lsize;
205     isz[i]    = lsize;
206   }
207   ierr = PetscMalloc1((max_lsize+nrecvs)*nidx,&indices_temp);CHKERRQ(ierr);
208   for (i=0; i<nidx; i++){
209     ierr = ISGetIndices(is[i],&indices_i_temp);CHKERRQ(ierr);
210     ierr = PetscMemcpy(indices_temp+i*(max_lsize+nrecvs),indices_i_temp, sizeof(PetscInt)*isz[i]);CHKERRQ(ierr);
211     ierr = ISRestoreIndices(is[i],&indices_i_temp);CHKERRQ(ierr);
212     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
213   }
214   /* retrieve information to get row id and its overlap */
215   for (i=0; i<nrecvs; ){
216     is_id      = recvdata[i++];
217     data_size  = recvdata[i++];
218     indices_i  = indices_temp+(max_lsize+nrecvs)*is_id;
219     isz_i      = isz[is_id];
220     for (j=0; j< data_size; j++){
221       col = recvdata[i++];
222       indices_i[isz_i++] = col;
223     }
224     isz[is_id] = isz_i;
225   }
226   /* remove duplicate entities */
227   for (i=0; i<nidx; i++){
228     indices_i  = indices_temp+(max_lsize+nrecvs)*i;
229     isz_i      = isz[i];
230     ierr = PetscSortRemoveDupsInt(&isz_i,indices_i);CHKERRQ(ierr);
231     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz_i,indices_i,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr);
232   }
233   ierr = PetscFree(isz);CHKERRQ(ierr);
234   ierr = PetscFree(indices_temp);CHKERRQ(ierr);
235   PetscFunctionReturn(0);
236 }
237 
238 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Send_Scalable(Mat mat,PetscInt nidx, PetscMPIInt nfrom,PetscMPIInt *fromranks,PetscInt *fromsizes, PetscInt *fromrows, PetscInt **sbrowsizes, PetscInt **sbrows)
239 {
240   PetscLayout       rmap,cmap;
241   PetscInt          i,j,k,l,*rows_i,*rows_data_ptr,**rows_data,max_fszs,rows_pos,*rows_pos_i;
242   PetscInt          is_id,tnz,an,bn,rstart,cstart,row,start,end,col,totalrows,*sbdata;
243   PetscInt         *indv_counts,indvc_ij,*sbsizes,*indices_tmp,*offsets;
244   const PetscInt   *gcols,*ai,*aj,*bi,*bj;
245   Mat               amat,bmat;
246   PetscMPIInt       rank;
247   PetscBool         done;
248   MPI_Comm          comm;
249   PetscErrorCode    ierr;
250 
251   PetscFunctionBegin;
252   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
253   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
254   ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr);
255   /* Even if the mat is symmetric, we still assume it is not symmetric */
256   ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr);
257   if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n");
258   ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr);
259   if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n");
260   /* total number of nonzero values is used to estimate the memory usage in the next step */
261   tnz  = ai[an]+bi[bn];
262   ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr);
263   ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr);
264   ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr);
265   /* to find the longest message */
266   max_fszs = 0;
267   for (i=0; i<nfrom; i++) max_fszs = fromsizes[2*i]>max_fszs ? fromsizes[2*i]:max_fszs;
268   /* better way to estimate number of nonzero in the mat??? */
269   ierr = PetscCalloc5(max_fszs*nidx,&rows_data_ptr,nidx,&rows_data,nidx,&rows_pos_i,nfrom*nidx,&indv_counts,tnz,&indices_tmp);CHKERRQ(ierr);
270   for (i=0; i<nidx; i++) rows_data[i] = rows_data_ptr+max_fszs*i;
271   rows_pos  = 0;
272   totalrows = 0;
273   for (i=0; i<nfrom; i++){
274     ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr);
275     /* group data together */
276     for (j=0; j<fromsizes[2*i]; j+=2){
277       is_id                       = fromrows[rows_pos++];/* no of is */
278       rows_i                      = rows_data[is_id];
279       rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++];/* row */
280     }
281     /* estimate a space to avoid multiple allocations  */
282     for (j=0; j<nidx; j++){
283       indvc_ij = 0;
284       rows_i   = rows_data[j];
285       for (l=0; l<rows_pos_i[j]; l++){
286         row    = rows_i[l]-rstart;
287         start  = ai[row];
288         end    = ai[row+1];
289         for (k=start; k<end; k++){ /* Amat */
290           col = aj[k] + cstart;
291           indices_tmp[indvc_ij++] = col;/* do not count the rows from the original rank */
292         }
293         start = bi[row];
294         end   = bi[row+1];
295         for (k=start; k<end; k++) { /* Bmat */
296           col = gcols[bj[k]];
297           indices_tmp[indvc_ij++] = col;
298         }
299       }
300       ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr);
301       indv_counts[i*nidx+j] = indvc_ij;
302       totalrows            += indvc_ij;
303     }
304   }
305   /* message triple <no of is, number of rows, rows> */
306   ierr = PetscCalloc2(totalrows+nidx*nfrom*2,&sbdata,2*nfrom,&sbsizes);CHKERRQ(ierr);
307   totalrows = 0;
308   rows_pos  = 0;
309   /* use this code again */
310   for (i=0;i<nfrom;i++){
311     ierr = PetscMemzero(rows_pos_i,sizeof(PetscInt)*nidx);CHKERRQ(ierr);
312     for (j=0; j<fromsizes[2*i]; j+=2){
313       is_id                       = fromrows[rows_pos++];
314       rows_i                      = rows_data[is_id];
315       rows_i[rows_pos_i[is_id]++] = fromrows[rows_pos++];
316     }
317     /* add data  */
318     for (j=0; j<nidx; j++){
319       if (!indv_counts[i*nidx+j]) continue;
320       indvc_ij = 0;
321       sbdata[totalrows++] = j;
322       sbdata[totalrows++] = indv_counts[i*nidx+j];
323       sbsizes[2*i]       += 2;
324       rows_i              = rows_data[j];
325       for (l=0; l<rows_pos_i[j]; l++){
326         row   = rows_i[l]-rstart;
327         start = ai[row];
328         end   = ai[row+1];
329         for (k=start; k<end; k++){ /* Amat */
330           col = aj[k] + cstart;
331           indices_tmp[indvc_ij++] = col;
332         }
333         start = bi[row];
334         end   = bi[row+1];
335         for (k=start; k<end; k++) { /* Bmat */
336           col = gcols[bj[k]];
337           indices_tmp[indvc_ij++] = col;
338         }
339       }
340       ierr = PetscSortRemoveDupsInt(&indvc_ij,indices_tmp);CHKERRQ(ierr);
341       sbsizes[2*i]  += indvc_ij;
342       ierr = PetscMemcpy(sbdata+totalrows,indices_tmp,sizeof(PetscInt)*indvc_ij);CHKERRQ(ierr);
343       totalrows += indvc_ij;
344     }
345   }
346   ierr = PetscCalloc1(nfrom+1,&offsets);CHKERRQ(ierr);
347   for (i=0; i<nfrom; i++){
348     offsets[i+1]   = offsets[i] + sbsizes[2*i];
349     sbsizes[2*i+1] = offsets[i];
350   }
351   ierr = PetscFree(offsets);CHKERRQ(ierr);
352   if (sbrowsizes) *sbrowsizes = sbsizes;
353   if (sbrows) *sbrows = sbdata;
354   ierr = PetscFree5(rows_data_ptr,rows_data,rows_pos_i,indv_counts,indices_tmp);CHKERRQ(ierr);
355   ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr);
356   ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr);
357   PetscFunctionReturn(0);
358 }
359 
360 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local_Scalable(Mat mat,PetscInt nidx, IS is[])
361 {
362   const PetscInt   *gcols,*ai,*aj,*bi,*bj, *indices;
363   PetscInt          tnz,an,bn,i,j,row,start,end,rstart,cstart,col,k,*indices_temp;
364   PetscInt          lsize,lsize_tmp,owner;
365   PetscMPIInt       rank;
366   Mat               amat,bmat;
367   PetscBool         done;
368   PetscLayout       cmap,rmap;
369   MPI_Comm          comm;
370   PetscErrorCode    ierr;
371 
372   PetscFunctionBegin;
373   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
374   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
375   ierr = MatMPIAIJGetSeqAIJ(mat,&amat,&bmat,&gcols);CHKERRQ(ierr);
376   ierr = MatGetRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr);
377   if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n");
378   ierr = MatGetRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr);
379   if (!done) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"can not get row IJ \n");
380   /* is it a safe way to compute number of nonzero values ? */
381   tnz  = ai[an]+bi[bn];
382   ierr = MatGetLayouts(mat,&rmap,&cmap);CHKERRQ(ierr);
383   ierr = PetscLayoutGetRange(rmap,&rstart,NULL);CHKERRQ(ierr);
384   ierr = PetscLayoutGetRange(cmap,&cstart,NULL);CHKERRQ(ierr);
385   /* it is a better way to estimate memory than the old implementation
386    * where global size of matrix is used
387    * */
388   ierr = PetscMalloc1(tnz,&indices_temp);CHKERRQ(ierr);
389   for (i=0; i<nidx; i++) {
390     ierr = ISGetLocalSize(is[i],&lsize);CHKERRQ(ierr);
391     ierr = ISGetIndices(is[i],&indices);CHKERRQ(ierr);
392     lsize_tmp = 0;
393     for (j=0; j<lsize; j++) {
394       owner = -1;
395       row   = indices[j];
396       ierr = PetscLayoutFindOwner(rmap,row,&owner);CHKERRQ(ierr);
397       if (owner != rank) continue;
398       /* local number */
399       row  -= rstart;
400       start = ai[row];
401       end   = ai[row+1];
402       for (k=start; k<end; k++) { /* Amat */
403         col = aj[k] + cstart;
404         indices_temp[lsize_tmp++] = col;
405       }
406       start = bi[row];
407       end   = bi[row+1];
408       for (k=start; k<end; k++) { /* Bmat */
409         col = gcols[bj[k]];
410         indices_temp[lsize_tmp++] = col;
411       }
412     }
413    ierr = ISRestoreIndices(is[i],&indices);CHKERRQ(ierr);
414    ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
415    ierr = PetscSortRemoveDupsInt(&lsize_tmp,indices_temp);CHKERRQ(ierr);
416    ierr = ISCreateGeneral(PETSC_COMM_SELF,lsize_tmp,indices_temp,PETSC_COPY_VALUES,&is[i]);CHKERRQ(ierr);
417   }
418   ierr = PetscFree(indices_temp);CHKERRQ(ierr);
419   ierr = MatRestoreRowIJ(amat,0,PETSC_FALSE,PETSC_FALSE,&an,&ai,&aj,&done);CHKERRQ(ierr);
420   ierr = MatRestoreRowIJ(bmat,0,PETSC_FALSE,PETSC_FALSE,&bn,&bi,&bj,&done);CHKERRQ(ierr);
421   PetscFunctionReturn(0);
422 }
423 
424 
425 /*
426   Sample message format:
427   If a processor A wants processor B to process some elements corresponding
428   to index sets is[1],is[5]
429   mesg [0] = 2   (no of index sets in the mesg)
430   -----------
431   mesg [1] = 1 => is[1]
432   mesg [2] = sizeof(is[1]);
433   -----------
434   mesg [3] = 5  => is[5]
435   mesg [4] = sizeof(is[5]);
436   -----------
437   mesg [5]
438   mesg [n]  datas[1]
439   -----------
440   mesg[n+1]
441   mesg[m]  data(is[5])
442   -----------
443 
444   Notes:
445   nrqs - no of requests sent (or to be sent out)
446   nrqr - no of requests recieved (which have to be or which have been processed
447 */
448 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Once(Mat C,PetscInt imax,IS is[])
449 {
450   Mat_MPIAIJ     *c = (Mat_MPIAIJ*)C->data;
451   PetscMPIInt    *w1,*w2,nrqr,*w3,*w4,*onodes1,*olengths1,*onodes2,*olengths2;
452   const PetscInt **idx,*idx_i;
453   PetscInt       *n,**data,len;
454 #if defined(PETSC_USE_CTABLE)
455   PetscTable     *table_data,table_data_i;
456   PetscInt       *tdata,tcount,tcount_max;
457 #else
458   PetscInt       *data_i,*d_p;
459 #endif
460   PetscErrorCode ierr;
461   PetscMPIInt    size,rank,tag1,tag2;
462   PetscInt       M,i,j,k,**rbuf,row,proc = 0,nrqs,msz,**outdat,**ptr;
463   PetscInt       *ctr,*pa,*tmp,*isz,*isz1,**xdata,**rbuf2;
464   PetscBT        *table;
465   MPI_Comm       comm;
466   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2;
467   MPI_Status     *s_status,*recv_status;
468   char           *t_p;
469 
470   PetscFunctionBegin;
471   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
472   size = c->size;
473   rank = c->rank;
474   M    = C->rmap->N;
475 
476   ierr = PetscObjectGetNewTag((PetscObject)C,&tag1);CHKERRQ(ierr);
477   ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
478 
479   ierr = PetscMalloc2(imax,&idx,imax,&n);CHKERRQ(ierr);
480 
481   for (i=0; i<imax; i++) {
482     ierr = ISGetIndices(is[i],&idx[i]);CHKERRQ(ierr);
483     ierr = ISGetLocalSize(is[i],&n[i]);CHKERRQ(ierr);
484   }
485 
486   /* evaluate communication - mesg to who,length of mesg, and buffer space
487      required. Based on this, buffers are allocated, and data copied into them  */
488   ierr = PetscMalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr);
489   ierr = PetscMemzero(w1,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/
490   ierr = PetscMemzero(w2,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/
491   ierr = PetscMemzero(w3,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/
492   for (i=0; i<imax; i++) {
493     ierr  = PetscMemzero(w4,size*sizeof(PetscMPIInt));CHKERRQ(ierr); /* initialise work vector*/
494     idx_i = idx[i];
495     len   = n[i];
496     for (j=0; j<len; j++) {
497       row = idx_i[j];
498       if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Index set cannot have negative entries");
499       ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr);
500       w4[proc]++;
501     }
502     for (j=0; j<size; j++) {
503       if (w4[j]) { w1[j] += w4[j]; w3[j]++;}
504     }
505   }
506 
507   nrqs     = 0;              /* no of outgoing messages */
508   msz      = 0;              /* total mesg length (for all proc */
509   w1[rank] = 0;              /* no mesg sent to intself */
510   w3[rank] = 0;
511   for (i=0; i<size; i++) {
512     if (w1[i])  {w2[i] = 1; nrqs++;} /* there exists a message to proc i */
513   }
514   /* pa - is list of processors to communicate with */
515   ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr);
516   for (i=0,j=0; i<size; i++) {
517     if (w1[i]) {pa[j] = i; j++;}
518   }
519 
520   /* Each message would have a header = 1 + 2*(no of IS) + data */
521   for (i=0; i<nrqs; i++) {
522     j      = pa[i];
523     w1[j] += w2[j] + 2*w3[j];
524     msz   += w1[j];
525   }
526 
527   /* Determine the number of messages to expect, their lengths, from from-ids */
528   ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
529   ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
530 
531   /* Now post the Irecvs corresponding to these messages */
532   ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf,&r_waits1);CHKERRQ(ierr);
533 
534   /* Allocate Memory for outgoing messages */
535   ierr = PetscMalloc4(size,&outdat,size,&ptr,msz,&tmp,size,&ctr);CHKERRQ(ierr);
536   ierr = PetscMemzero(outdat,size*sizeof(PetscInt*));CHKERRQ(ierr);
537   ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
538 
539   {
540     PetscInt *iptr = tmp,ict  = 0;
541     for (i=0; i<nrqs; i++) {
542       j         = pa[i];
543       iptr     +=  ict;
544       outdat[j] = iptr;
545       ict       = w1[j];
546     }
547   }
548 
549   /* Form the outgoing messages */
550   /* plug in the headers */
551   for (i=0; i<nrqs; i++) {
552     j            = pa[i];
553     outdat[j][0] = 0;
554     ierr         = PetscMemzero(outdat[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
555     ptr[j]       = outdat[j] + 2*w3[j] + 1;
556   }
557 
558   /* Memory for doing local proc's work */
559   {
560     PetscInt M_BPB_imax = 0;
561 #if defined(PETSC_USE_CTABLE)
562     ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr);
563     ierr = PetscMalloc1(imax,&table_data);CHKERRQ(ierr);
564     for (i=0; i<imax; i++) {
565       ierr = PetscTableCreate(n[i]+1,M+1,&table_data[i]);CHKERRQ(ierr);
566     }
567     ierr = PetscCalloc4(imax,&table, imax,&data, imax,&isz, M_BPB_imax,&t_p);CHKERRQ(ierr);
568     for (i=0; i<imax; i++) {
569       table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i;
570     }
571 #else
572     PetscInt Mimax = 0;
573     ierr = PetscIntMultError(M,imax, &Mimax);CHKERRQ(ierr);
574     ierr = PetscIntMultError((M/PETSC_BITS_PER_BYTE+1),imax, &M_BPB_imax);CHKERRQ(ierr);
575     ierr = PetscCalloc5(imax,&table, imax,&data, imax,&isz, Mimax,&d_p, M_BPB_imax,&t_p);CHKERRQ(ierr);
576     for (i=0; i<imax; i++) {
577       table[i] = t_p + (M/PETSC_BITS_PER_BYTE+1)*i;
578       data[i]  = d_p + M*i;
579     }
580 #endif
581   }
582 
583   /* Parse the IS and update local tables and the outgoing buf with the data */
584   {
585     PetscInt n_i,isz_i,*outdat_j,ctr_j;
586     PetscBT  table_i;
587 
588     for (i=0; i<imax; i++) {
589       ierr    = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
590       n_i     = n[i];
591       table_i = table[i];
592       idx_i   = idx[i];
593 #if defined(PETSC_USE_CTABLE)
594       table_data_i = table_data[i];
595 #else
596       data_i  = data[i];
597 #endif
598       isz_i   = isz[i];
599       for (j=0; j<n_i; j++) {   /* parse the indices of each IS */
600         row  = idx_i[j];
601         ierr = PetscLayoutFindOwner(C->rmap,row,&proc);CHKERRQ(ierr);
602         if (proc != rank) { /* copy to the outgoing buffer */
603           ctr[proc]++;
604           *ptr[proc] = row;
605           ptr[proc]++;
606         } else if (!PetscBTLookupSet(table_i,row)) {
607 #if defined(PETSC_USE_CTABLE)
608           ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr);
609 #else
610           data_i[isz_i] = row; /* Update the local table */
611 #endif
612           isz_i++;
613         }
614       }
615       /* Update the headers for the current IS */
616       for (j=0; j<size; j++) { /* Can Optimise this loop by using pa[] */
617         if ((ctr_j = ctr[j])) {
618           outdat_j        = outdat[j];
619           k               = ++outdat_j[0];
620           outdat_j[2*k]   = ctr_j;
621           outdat_j[2*k-1] = i;
622         }
623       }
624       isz[i] = isz_i;
625     }
626   }
627 
628   /*  Now  post the sends */
629   ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr);
630   for (i=0; i<nrqs; ++i) {
631     j    = pa[i];
632     ierr = MPI_Isend(outdat[j],w1[j],MPIU_INT,j,tag1,comm,s_waits1+i);CHKERRQ(ierr);
633   }
634 
635   /* No longer need the original indices */
636   for (i=0; i<imax; ++i) {
637     ierr = ISRestoreIndices(is[i],idx+i);CHKERRQ(ierr);
638   }
639   ierr = PetscFree2(idx,n);CHKERRQ(ierr);
640 
641   for (i=0; i<imax; ++i) {
642     ierr = ISDestroy(&is[i]);CHKERRQ(ierr);
643   }
644 
645   /* Do Local work */
646 #if defined(PETSC_USE_CTABLE)
647   ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,NULL,table_data);CHKERRQ(ierr);
648 #else
649   ierr = MatIncreaseOverlap_MPIAIJ_Local(C,imax,table,isz,data,NULL);CHKERRQ(ierr);
650 #endif
651 
652   /* Receive messages */
653   ierr = PetscMalloc1(nrqr+1,&recv_status);CHKERRQ(ierr);
654   if (nrqr) {ierr = MPI_Waitall(nrqr,r_waits1,recv_status);CHKERRQ(ierr);}
655 
656   ierr = PetscMalloc1(nrqs+1,&s_status);CHKERRQ(ierr);
657   if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status);CHKERRQ(ierr);}
658 
659   /* Phase 1 sends are complete - deallocate buffers */
660   ierr = PetscFree4(outdat,ptr,tmp,ctr);CHKERRQ(ierr);
661   ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
662 
663   ierr = PetscMalloc1(nrqr+1,&xdata);CHKERRQ(ierr);
664   ierr = PetscMalloc1(nrqr+1,&isz1);CHKERRQ(ierr);
665   ierr = MatIncreaseOverlap_MPIAIJ_Receive(C,nrqr,rbuf,xdata,isz1);CHKERRQ(ierr);
666   ierr = PetscFree(rbuf[0]);CHKERRQ(ierr);
667   ierr = PetscFree(rbuf);CHKERRQ(ierr);
668 
669 
670   /* Send the data back */
671   /* Do a global reduction to know the buffer space req for incoming messages */
672   {
673     PetscMPIInt *rw1;
674 
675     ierr = PetscCalloc1(size,&rw1);CHKERRQ(ierr);
676 
677     for (i=0; i<nrqr; ++i) {
678       proc = recv_status[i].MPI_SOURCE;
679 
680       if (proc != onodes1[i]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPI_SOURCE mismatch");
681       rw1[proc] = isz1[i];
682     }
683     ierr = PetscFree(onodes1);CHKERRQ(ierr);
684     ierr = PetscFree(olengths1);CHKERRQ(ierr);
685 
686     /* Determine the number of messages to expect, their lengths, from from-ids */
687     ierr = PetscGatherMessageLengths(comm,nrqr,nrqs,rw1,&onodes2,&olengths2);CHKERRQ(ierr);
688     ierr = PetscFree(rw1);CHKERRQ(ierr);
689   }
690   /* Now post the Irecvs corresponding to these messages */
691   ierr = PetscPostIrecvInt(comm,tag2,nrqs,onodes2,olengths2,&rbuf2,&r_waits2);CHKERRQ(ierr);
692 
693   /* Now  post the sends */
694   ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr);
695   for (i=0; i<nrqr; ++i) {
696     j    = recv_status[i].MPI_SOURCE;
697     ierr = MPI_Isend(xdata[i],isz1[i],MPIU_INT,j,tag2,comm,s_waits2+i);CHKERRQ(ierr);
698   }
699 
700   /* receive work done on other processors */
701   {
702     PetscInt    is_no,ct1,max,*rbuf2_i,isz_i,jmax;
703     PetscMPIInt idex;
704     PetscBT     table_i;
705     MPI_Status  *status2;
706 
707     ierr = PetscMalloc1((PetscMax(nrqr,nrqs)+1),&status2);CHKERRQ(ierr);
708     for (i=0; i<nrqs; ++i) {
709       ierr = MPI_Waitany(nrqs,r_waits2,&idex,status2+i);CHKERRQ(ierr);
710       /* Process the message */
711       rbuf2_i = rbuf2[idex];
712       ct1     = 2*rbuf2_i[0]+1;
713       jmax    = rbuf2[idex][0];
714       for (j=1; j<=jmax; j++) {
715         max     = rbuf2_i[2*j];
716         is_no   = rbuf2_i[2*j-1];
717         isz_i   = isz[is_no];
718         table_i = table[is_no];
719 #if defined(PETSC_USE_CTABLE)
720         table_data_i = table_data[is_no];
721 #else
722         data_i  = data[is_no];
723 #endif
724         for (k=0; k<max; k++,ct1++) {
725           row = rbuf2_i[ct1];
726           if (!PetscBTLookupSet(table_i,row)) {
727 #if defined(PETSC_USE_CTABLE)
728             ierr = PetscTableAdd(table_data_i,row+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr);
729 #else
730             data_i[isz_i] = row;
731 #endif
732             isz_i++;
733           }
734         }
735         isz[is_no] = isz_i;
736       }
737     }
738 
739     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,status2);CHKERRQ(ierr);}
740     ierr = PetscFree(status2);CHKERRQ(ierr);
741   }
742 
743 #if defined(PETSC_USE_CTABLE)
744   tcount_max = 0;
745   for (i=0; i<imax; ++i) {
746     table_data_i = table_data[i];
747     ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr);
748     if (tcount_max < tcount) tcount_max = tcount;
749   }
750   ierr = PetscMalloc1(tcount_max+1,&tdata);CHKERRQ(ierr);
751 #endif
752 
753   for (i=0; i<imax; ++i) {
754 #if defined(PETSC_USE_CTABLE)
755     PetscTablePosition tpos;
756     table_data_i = table_data[i];
757 
758     ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr);
759     while (tpos) {
760       ierr = PetscTableGetNext(table_data_i,&tpos,&k,&j);CHKERRQ(ierr);
761       tdata[--j] = --k;
762     }
763     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],tdata,PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
764 #else
765     ierr = ISCreateGeneral(PETSC_COMM_SELF,isz[i],data[i],PETSC_COPY_VALUES,is+i);CHKERRQ(ierr);
766 #endif
767   }
768 
769   ierr = PetscFree(onodes2);CHKERRQ(ierr);
770   ierr = PetscFree(olengths2);CHKERRQ(ierr);
771 
772   ierr = PetscFree(pa);CHKERRQ(ierr);
773   ierr = PetscFree(rbuf2[0]);CHKERRQ(ierr);
774   ierr = PetscFree(rbuf2);CHKERRQ(ierr);
775   ierr = PetscFree(s_waits1);CHKERRQ(ierr);
776   ierr = PetscFree(r_waits1);CHKERRQ(ierr);
777   ierr = PetscFree(s_waits2);CHKERRQ(ierr);
778   ierr = PetscFree(r_waits2);CHKERRQ(ierr);
779   ierr = PetscFree(s_status);CHKERRQ(ierr);
780   ierr = PetscFree(recv_status);CHKERRQ(ierr);
781   ierr = PetscFree(xdata[0]);CHKERRQ(ierr);
782   ierr = PetscFree(xdata);CHKERRQ(ierr);
783   ierr = PetscFree(isz1);CHKERRQ(ierr);
784 #if defined(PETSC_USE_CTABLE)
785   for (i=0; i<imax; i++) {
786     ierr = PetscTableDestroy((PetscTable*)&table_data[i]);CHKERRQ(ierr);
787   }
788   ierr = PetscFree(table_data);CHKERRQ(ierr);
789   ierr = PetscFree(tdata);CHKERRQ(ierr);
790   ierr = PetscFree4(table,data,isz,t_p);CHKERRQ(ierr);
791 #else
792   ierr = PetscFree5(table,data,isz,d_p,t_p);CHKERRQ(ierr);
793 #endif
794   PetscFunctionReturn(0);
795 }
796 
797 /*
798    MatIncreaseOverlap_MPIAIJ_Local - Called by MatincreaseOverlap, to do
799        the work on the local processor.
800 
801      Inputs:
802       C      - MAT_MPIAIJ;
803       imax - total no of index sets processed at a time;
804       table  - an array of char - size = m bits.
805 
806      Output:
807       isz    - array containing the count of the solution elements corresponding
808                to each index set;
809       data or table_data  - pointer to the solutions
810 */
811 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Local(Mat C,PetscInt imax,PetscBT *table,PetscInt *isz,PetscInt **data,PetscTable *table_data)
812 {
813   Mat_MPIAIJ *c = (Mat_MPIAIJ*)C->data;
814   Mat        A  = c->A,B = c->B;
815   Mat_SeqAIJ *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data;
816   PetscInt   start,end,val,max,rstart,cstart,*ai,*aj;
817   PetscInt   *bi,*bj,*garray,i,j,k,row,isz_i;
818   PetscBT    table_i;
819 #if defined(PETSC_USE_CTABLE)
820   PetscTable         table_data_i;
821   PetscErrorCode     ierr;
822   PetscTablePosition tpos;
823   PetscInt           tcount,*tdata;
824 #else
825   PetscInt           *data_i;
826 #endif
827 
828   PetscFunctionBegin;
829   rstart = C->rmap->rstart;
830   cstart = C->cmap->rstart;
831   ai     = a->i;
832   aj     = a->j;
833   bi     = b->i;
834   bj     = b->j;
835   garray = c->garray;
836 
837   for (i=0; i<imax; i++) {
838 #if defined(PETSC_USE_CTABLE)
839     /* copy existing entries of table_data_i into tdata[] */
840     table_data_i = table_data[i];
841     ierr = PetscTableGetCount(table_data_i,&tcount);CHKERRQ(ierr);
842     if (tcount != isz[i]) SETERRQ3(PETSC_COMM_SELF,0," tcount %d != isz[%d] %d",tcount,i,isz[i]);
843 
844     ierr = PetscMalloc1(tcount,&tdata);CHKERRQ(ierr);
845     ierr = PetscTableGetHeadPosition(table_data_i,&tpos);CHKERRQ(ierr);
846     while (tpos) {
847       ierr = PetscTableGetNext(table_data_i,&tpos,&row,&j);CHKERRQ(ierr);
848       tdata[--j] = --row;
849       if (j > tcount - 1) SETERRQ2(PETSC_COMM_SELF,0," j %d >= tcount %d",j,tcount);
850     }
851 #else
852     data_i  = data[i];
853 #endif
854     table_i = table[i];
855     isz_i   = isz[i];
856     max     = isz[i];
857 
858     for (j=0; j<max; j++) {
859 #if defined(PETSC_USE_CTABLE)
860       row   = tdata[j] - rstart;
861 #else
862       row   = data_i[j] - rstart;
863 #endif
864       start = ai[row];
865       end   = ai[row+1];
866       for (k=start; k<end; k++) { /* Amat */
867         val = aj[k] + cstart;
868         if (!PetscBTLookupSet(table_i,val)) {
869 #if defined(PETSC_USE_CTABLE)
870           ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr);
871 #else
872           data_i[isz_i] = val;
873 #endif
874           isz_i++;
875         }
876       }
877       start = bi[row];
878       end   = bi[row+1];
879       for (k=start; k<end; k++) { /* Bmat */
880         val = garray[bj[k]];
881         if (!PetscBTLookupSet(table_i,val)) {
882 #if defined(PETSC_USE_CTABLE)
883           ierr = PetscTableAdd(table_data_i,val+1,isz_i+1,INSERT_VALUES);CHKERRQ(ierr);
884 #else
885           data_i[isz_i] = val;
886 #endif
887           isz_i++;
888         }
889       }
890     }
891     isz[i] = isz_i;
892 
893 #if defined(PETSC_USE_CTABLE)
894     ierr = PetscFree(tdata);CHKERRQ(ierr);
895 #endif
896   }
897   PetscFunctionReturn(0);
898 }
899 
900 /*
901       MatIncreaseOverlap_MPIAIJ_Receive - Process the recieved messages,
902          and return the output
903 
904          Input:
905            C    - the matrix
906            nrqr - no of messages being processed.
907            rbuf - an array of pointers to the recieved requests
908 
909          Output:
910            xdata - array of messages to be sent back
911            isz1  - size of each message
912 
913   For better efficiency perhaps we should malloc separately each xdata[i],
914 then if a remalloc is required we need only copy the data for that one row
915 rather then all previous rows as it is now where a single large chunck of
916 memory is used.
917 
918 */
919 static PetscErrorCode MatIncreaseOverlap_MPIAIJ_Receive(Mat C,PetscInt nrqr,PetscInt **rbuf,PetscInt **xdata,PetscInt * isz1)
920 {
921   Mat_MPIAIJ     *c = (Mat_MPIAIJ*)C->data;
922   Mat            A  = c->A,B = c->B;
923   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data;
924   PetscErrorCode ierr;
925   PetscInt       rstart,cstart,*ai,*aj,*bi,*bj,*garray,i,j,k;
926   PetscInt       row,total_sz,ct,ct1,ct2,ct3,mem_estimate,oct2,l,start,end;
927   PetscInt       val,max1,max2,m,no_malloc =0,*tmp,new_estimate,ctr;
928   PetscInt       *rbuf_i,kmax,rbuf_0;
929   PetscBT        xtable;
930 
931   PetscFunctionBegin;
932   m      = C->rmap->N;
933   rstart = C->rmap->rstart;
934   cstart = C->cmap->rstart;
935   ai     = a->i;
936   aj     = a->j;
937   bi     = b->i;
938   bj     = b->j;
939   garray = c->garray;
940 
941 
942   for (i=0,ct=0,total_sz=0; i<nrqr; ++i) {
943     rbuf_i =  rbuf[i];
944     rbuf_0 =  rbuf_i[0];
945     ct    += rbuf_0;
946     for (j=1; j<=rbuf_0; j++) total_sz += rbuf_i[2*j];
947   }
948 
949   if (C->rmap->n) max1 = ct*(a->nz + b->nz)/C->rmap->n;
950   else max1 = 1;
951   mem_estimate = 3*((total_sz > max1 ? total_sz : max1)+1);
952   ierr         = PetscMalloc1(mem_estimate,&xdata[0]);CHKERRQ(ierr);
953   ++no_malloc;
954   ierr = PetscBTCreate(m,&xtable);CHKERRQ(ierr);
955   ierr = PetscMemzero(isz1,nrqr*sizeof(PetscInt));CHKERRQ(ierr);
956 
957   ct3 = 0;
958   for (i=0; i<nrqr; i++) { /* for easch mesg from proc i */
959     rbuf_i =  rbuf[i];
960     rbuf_0 =  rbuf_i[0];
961     ct1    =  2*rbuf_0+1;
962     ct2    =  ct1;
963     ct3   += ct1;
964     for (j=1; j<=rbuf_0; j++) { /* for each IS from proc i*/
965       ierr = PetscBTMemzero(m,xtable);CHKERRQ(ierr);
966       oct2 = ct2;
967       kmax = rbuf_i[2*j];
968       for (k=0; k<kmax; k++,ct1++) {
969         row = rbuf_i[ct1];
970         if (!PetscBTLookupSet(xtable,row)) {
971           if (!(ct3 < mem_estimate)) {
972             new_estimate = (PetscInt)(1.5*mem_estimate)+1;
973             ierr         = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr);
974             ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
975             ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
976             xdata[0]     = tmp;
977             mem_estimate = new_estimate; ++no_malloc;
978             for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
979           }
980           xdata[i][ct2++] = row;
981           ct3++;
982         }
983       }
984       for (k=oct2,max2=ct2; k<max2; k++) {
985         row   = xdata[i][k] - rstart;
986         start = ai[row];
987         end   = ai[row+1];
988         for (l=start; l<end; l++) {
989           val = aj[l] + cstart;
990           if (!PetscBTLookupSet(xtable,val)) {
991             if (!(ct3 < mem_estimate)) {
992               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
993               ierr         = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr);
994               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
995               ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
996               xdata[0]     = tmp;
997               mem_estimate = new_estimate; ++no_malloc;
998               for (ctr=1; ctr<=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
999             }
1000             xdata[i][ct2++] = val;
1001             ct3++;
1002           }
1003         }
1004         start = bi[row];
1005         end   = bi[row+1];
1006         for (l=start; l<end; l++) {
1007           val = garray[bj[l]];
1008           if (!PetscBTLookupSet(xtable,val)) {
1009             if (!(ct3 < mem_estimate)) {
1010               new_estimate = (PetscInt)(1.5*mem_estimate)+1;
1011               ierr         = PetscMalloc1(new_estimate,&tmp);CHKERRQ(ierr);
1012               ierr         = PetscMemcpy(tmp,xdata[0],mem_estimate*sizeof(PetscInt));CHKERRQ(ierr);
1013               ierr         = PetscFree(xdata[0]);CHKERRQ(ierr);
1014               xdata[0]     = tmp;
1015               mem_estimate = new_estimate; ++no_malloc;
1016               for (ctr =1; ctr <=i; ctr++) xdata[ctr] = xdata[ctr-1] + isz1[ctr-1];
1017             }
1018             xdata[i][ct2++] = val;
1019             ct3++;
1020           }
1021         }
1022       }
1023       /* Update the header*/
1024       xdata[i][2*j]   = ct2 - oct2; /* Undo the vector isz1 and use only a var*/
1025       xdata[i][2*j-1] = rbuf_i[2*j-1];
1026     }
1027     xdata[i][0] = rbuf_0;
1028     xdata[i+1]  = xdata[i] + ct2;
1029     isz1[i]     = ct2; /* size of each message */
1030   }
1031   ierr = PetscBTDestroy(&xtable);CHKERRQ(ierr);
1032   ierr = PetscInfo3(C,"Allocated %D bytes, required %D bytes, no of mallocs = %D\n",mem_estimate,ct3,no_malloc);CHKERRQ(ierr);
1033   PetscFunctionReturn(0);
1034 }
1035 /* -------------------------------------------------------------------------*/
1036 extern PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool*,Mat*);
1037 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
1038 /*
1039     Every processor gets the entire matrix
1040 */
1041 PetscErrorCode MatGetSubMatrix_MPIAIJ_All(Mat A,MatGetSubMatrixOption flag,MatReuse scall,Mat *Bin[])
1042 {
1043   Mat            B;
1044   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1045   Mat_SeqAIJ     *b,*ad = (Mat_SeqAIJ*)a->A->data,*bd = (Mat_SeqAIJ*)a->B->data;
1046   PetscErrorCode ierr;
1047   PetscMPIInt    size,rank,*recvcounts = 0,*displs = 0;
1048   PetscInt       sendcount,i,*rstarts = A->rmap->range,n,cnt,j;
1049   PetscInt       m,*b_sendj,*garray = a->garray,*lens,*jsendbuf,*a_jsendbuf,*b_jsendbuf;
1050   MatScalar      *sendbuf,*recvbuf,*a_sendbuf,*b_sendbuf;
1051 
1052   PetscFunctionBegin;
1053   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)A),&size);CHKERRQ(ierr);
1054   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr);
1055 
1056   if (scall == MAT_INITIAL_MATRIX) {
1057     /* ----------------------------------------------------------------
1058          Tell every processor the number of nonzeros per row
1059     */
1060     ierr = PetscMalloc1(A->rmap->N,&lens);CHKERRQ(ierr);
1061     for (i=A->rmap->rstart; i<A->rmap->rend; i++) {
1062       lens[i] = ad->i[i-A->rmap->rstart+1] - ad->i[i-A->rmap->rstart] + bd->i[i-A->rmap->rstart+1] - bd->i[i-A->rmap->rstart];
1063     }
1064     ierr      = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr);
1065     for (i=0; i<size; i++) {
1066       recvcounts[i] = A->rmap->range[i+1] - A->rmap->range[i];
1067       displs[i]     = A->rmap->range[i];
1068     }
1069 #if defined(PETSC_HAVE_MPI_IN_PLACE)
1070     ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1071 #else
1072     sendcount = A->rmap->rend - A->rmap->rstart;
1073     ierr = MPI_Allgatherv(lens+A->rmap->rstart,sendcount,MPIU_INT,lens,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1074 #endif
1075     /* ---------------------------------------------------------------
1076          Create the sequential matrix of the same type as the local block diagonal
1077     */
1078     ierr  = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr);
1079     ierr  = MatSetSizes(B,A->rmap->N,A->cmap->N,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1080     ierr  = MatSetBlockSizesFromMats(B,A,A);CHKERRQ(ierr);
1081     ierr  = MatSetType(B,((PetscObject)a->A)->type_name);CHKERRQ(ierr);
1082     ierr  = MatSeqAIJSetPreallocation(B,0,lens);CHKERRQ(ierr);
1083     ierr  = PetscMalloc1(1,Bin);CHKERRQ(ierr);
1084     **Bin = B;
1085     b     = (Mat_SeqAIJ*)B->data;
1086 
1087     /*--------------------------------------------------------------------
1088        Copy my part of matrix column indices over
1089     */
1090     sendcount  = ad->nz + bd->nz;
1091     jsendbuf   = b->j + b->i[rstarts[rank]];
1092     a_jsendbuf = ad->j;
1093     b_jsendbuf = bd->j;
1094     n          = A->rmap->rend - A->rmap->rstart;
1095     cnt        = 0;
1096     for (i=0; i<n; i++) {
1097 
1098       /* put in lower diagonal portion */
1099       m = bd->i[i+1] - bd->i[i];
1100       while (m > 0) {
1101         /* is it above diagonal (in bd (compressed) numbering) */
1102         if (garray[*b_jsendbuf] > A->rmap->rstart + i) break;
1103         jsendbuf[cnt++] = garray[*b_jsendbuf++];
1104         m--;
1105       }
1106 
1107       /* put in diagonal portion */
1108       for (j=ad->i[i]; j<ad->i[i+1]; j++) {
1109         jsendbuf[cnt++] = A->rmap->rstart + *a_jsendbuf++;
1110       }
1111 
1112       /* put in upper diagonal portion */
1113       while (m-- > 0) {
1114         jsendbuf[cnt++] = garray[*b_jsendbuf++];
1115       }
1116     }
1117     if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt);
1118 
1119     /*--------------------------------------------------------------------
1120        Gather all column indices to all processors
1121     */
1122     for (i=0; i<size; i++) {
1123       recvcounts[i] = 0;
1124       for (j=A->rmap->range[i]; j<A->rmap->range[i+1]; j++) {
1125         recvcounts[i] += lens[j];
1126       }
1127     }
1128     displs[0] = 0;
1129     for (i=1; i<size; i++) {
1130       displs[i] = displs[i-1] + recvcounts[i-1];
1131     }
1132 #if defined(PETSC_HAVE_MPI_IN_PLACE)
1133     ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1134 #else
1135     ierr = MPI_Allgatherv(jsendbuf,sendcount,MPIU_INT,b->j,recvcounts,displs,MPIU_INT,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1136 #endif
1137     /*--------------------------------------------------------------------
1138         Assemble the matrix into useable form (note numerical values not yet set)
1139     */
1140     /* set the b->ilen (length of each row) values */
1141     ierr = PetscMemcpy(b->ilen,lens,A->rmap->N*sizeof(PetscInt));CHKERRQ(ierr);
1142     /* set the b->i indices */
1143     b->i[0] = 0;
1144     for (i=1; i<=A->rmap->N; i++) {
1145       b->i[i] = b->i[i-1] + lens[i-1];
1146     }
1147     ierr = PetscFree(lens);CHKERRQ(ierr);
1148     ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1149     ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1150 
1151   } else {
1152     B = **Bin;
1153     b = (Mat_SeqAIJ*)B->data;
1154   }
1155 
1156   /*--------------------------------------------------------------------
1157        Copy my part of matrix numerical values into the values location
1158   */
1159   if (flag == MAT_GET_VALUES) {
1160     sendcount = ad->nz + bd->nz;
1161     sendbuf   = b->a + b->i[rstarts[rank]];
1162     a_sendbuf = ad->a;
1163     b_sendbuf = bd->a;
1164     b_sendj   = bd->j;
1165     n         = A->rmap->rend - A->rmap->rstart;
1166     cnt       = 0;
1167     for (i=0; i<n; i++) {
1168 
1169       /* put in lower diagonal portion */
1170       m = bd->i[i+1] - bd->i[i];
1171       while (m > 0) {
1172         /* is it above diagonal (in bd (compressed) numbering) */
1173         if (garray[*b_sendj] > A->rmap->rstart + i) break;
1174         sendbuf[cnt++] = *b_sendbuf++;
1175         m--;
1176         b_sendj++;
1177       }
1178 
1179       /* put in diagonal portion */
1180       for (j=ad->i[i]; j<ad->i[i+1]; j++) {
1181         sendbuf[cnt++] = *a_sendbuf++;
1182       }
1183 
1184       /* put in upper diagonal portion */
1185       while (m-- > 0) {
1186         sendbuf[cnt++] = *b_sendbuf++;
1187         b_sendj++;
1188       }
1189     }
1190     if (cnt != sendcount) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Corrupted PETSc matrix: nz given %D actual nz %D",sendcount,cnt);
1191 
1192     /* -----------------------------------------------------------------
1193        Gather all numerical values to all processors
1194     */
1195     if (!recvcounts) {
1196       ierr = PetscMalloc2(size,&recvcounts,size,&displs);CHKERRQ(ierr);
1197     }
1198     for (i=0; i<size; i++) {
1199       recvcounts[i] = b->i[rstarts[i+1]] - b->i[rstarts[i]];
1200     }
1201     displs[0] = 0;
1202     for (i=1; i<size; i++) {
1203       displs[i] = displs[i-1] + recvcounts[i-1];
1204     }
1205     recvbuf = b->a;
1206 #if defined(PETSC_HAVE_MPI_IN_PLACE)
1207     ierr = MPI_Allgatherv(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1208 #else
1209     ierr = MPI_Allgatherv(sendbuf,sendcount,MPIU_SCALAR,recvbuf,recvcounts,displs,MPIU_SCALAR,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
1210 #endif
1211   }  /* endof (flag == MAT_GET_VALUES) */
1212   ierr = PetscFree2(recvcounts,displs);CHKERRQ(ierr);
1213 
1214   if (A->symmetric) {
1215     ierr = MatSetOption(B,MAT_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr);
1216   } else if (A->hermitian) {
1217     ierr = MatSetOption(B,MAT_HERMITIAN,PETSC_TRUE);CHKERRQ(ierr);
1218   } else if (A->structurally_symmetric) {
1219     ierr = MatSetOption(B,MAT_STRUCTURALLY_SYMMETRIC,PETSC_TRUE);CHKERRQ(ierr);
1220   }
1221   PetscFunctionReturn(0);
1222 }
1223 
1224 PetscErrorCode MatDestroy_MPIAIJ_MatGetSubmatrices(Mat C)
1225 {
1226   PetscErrorCode ierr;
1227   Mat_SeqAIJ     *c = (Mat_SeqAIJ*)C->data;
1228   Mat_SubMat     *submatj = c->submatis1;
1229   PetscInt       i;
1230 
1231   PetscFunctionBegin;
1232   if (!submatj->id) { /* delete data that are linked only to submats[id=0] */
1233     ierr = PetscFree4(submatj->sbuf1,submatj->ptr,submatj->tmp,submatj->ctr);CHKERRQ(ierr);
1234 
1235     for (i=0; i<submatj->nrqr; ++i) {
1236       ierr = PetscFree(submatj->sbuf2[i]);CHKERRQ(ierr);
1237     }
1238     ierr = PetscFree3(submatj->sbuf2,submatj->req_size,submatj->req_source1);CHKERRQ(ierr);
1239 
1240     if (submatj->rbuf1) {
1241       ierr = PetscFree(submatj->rbuf1[0]);CHKERRQ(ierr);
1242       ierr = PetscFree(submatj->rbuf1);CHKERRQ(ierr);
1243     }
1244 
1245     for (i=0; i<submatj->nrqs; ++i) {
1246       ierr = PetscFree(submatj->rbuf3[i]);CHKERRQ(ierr);
1247     }
1248     ierr = PetscFree3(submatj->req_source2,submatj->rbuf2,submatj->rbuf3);CHKERRQ(ierr);
1249     ierr = PetscFree(submatj->pa);CHKERRQ(ierr);
1250   }
1251 
1252 #if defined(PETSC_USE_CTABLE)
1253   ierr = PetscTableDestroy((PetscTable*)&submatj->rmap);CHKERRQ(ierr);
1254   if (submatj->cmap_loc) {ierr = PetscFree(submatj->cmap_loc);CHKERRQ(ierr);}
1255   ierr = PetscFree(submatj->rmap_loc);CHKERRQ(ierr);
1256 #else
1257   ierr = PetscFree(submatj->rmap);CHKERRQ(ierr);
1258 #endif
1259 
1260   if (!submatj->allcolumns) {
1261 #if defined(PETSC_USE_CTABLE)
1262     ierr = PetscTableDestroy((PetscTable*)&submatj->cmap);CHKERRQ(ierr);
1263 #else
1264     ierr = PetscFree(submatj->cmap);CHKERRQ(ierr);
1265 #endif
1266   }
1267   ierr = submatj->destroy(C);CHKERRQ(ierr);
1268   ierr = PetscFree(submatj->row2proc);CHKERRQ(ierr);
1269 
1270   ierr = PetscFree(submatj);CHKERRQ(ierr);
1271   PetscFunctionReturn(0);
1272 }
1273 
1274 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool allcolumns,Mat *submats)
1275 {
1276   Mat_MPIAIJ     *c = (Mat_MPIAIJ*)C->data;
1277   Mat            submat,A = c->A,B = c->B;
1278   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)B->data,*subc;
1279   PetscInt       *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,nzA,nzB;
1280   PetscInt       cstart = C->cmap->rstart,cend = C->cmap->rend,rstart = C->rmap->rstart,*bmap = c->garray;
1281   const PetscInt *icol,*irow;
1282   PetscInt       nrow,ncol,start;
1283   PetscErrorCode ierr;
1284   PetscMPIInt    rank,size,tag1,tag2,tag3,tag4,*w1,*w2,nrqr;
1285   PetscInt       **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,ct3,**rbuf1,row,proc;
1286   PetscInt       nrqs=0,msz,**ptr,*req_size,*ctr,*pa,*tmp,tcol,*iptr;
1287   PetscInt       **rbuf3,*req_source1,*req_source2,**sbuf_aj,**rbuf2,max1,nnz;
1288   PetscInt       *lens,rmax,ncols,*cols,Crow;
1289 #if defined(PETSC_USE_CTABLE)
1290   PetscTable     cmap,rmap;
1291   PetscInt       *cmap_loc,*rmap_loc;
1292 #else
1293   PetscInt       *cmap,*rmap;
1294 #endif
1295   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*sbuf1_i,*rbuf2_i,*rbuf3_i;
1296   PetscInt       *cworkB,lwrite,*subcols,*row2proc;
1297   PetscScalar    *vworkA,*vworkB,*a_a = a->a,*b_a = b->a,*subvals=NULL;
1298   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
1299   MPI_Request    *r_waits4,*s_waits3 = NULL,*s_waits4;
1300   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3 = NULL,*s_status2;
1301   MPI_Status     *r_status3 = NULL,*r_status4,*s_status4;
1302   MPI_Comm       comm;
1303   PetscScalar    **rbuf4,**sbuf_aa,*vals,*sbuf_aa_i,*rbuf4_i;
1304   PetscMPIInt    *onodes1,*olengths1,idex,end;
1305   Mat_SubMat     *smatis1;
1306   PetscBool      isrowsorted;
1307 
1308   PetscFunctionBegin;
1309   if (ismax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"This routine only works when all processes have ismax=1");
1310 
1311   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1312   size = c->size;
1313   rank = c->rank;
1314 
1315   ierr = ISSorted(isrow[0],&isrowsorted);CHKERRQ(ierr);
1316   if (!isrowsorted) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"isrow[0] must be sorted");
1317 
1318   ierr = ISGetIndices(isrow[0],&irow);CHKERRQ(ierr);
1319   ierr = ISGetLocalSize(isrow[0],&nrow);CHKERRQ(ierr);
1320   if (allcolumns) {
1321     icol = NULL;
1322     ncol = C->cmap->N;
1323   } else {
1324     ierr = ISGetIndices(iscol[0],&icol);CHKERRQ(ierr);
1325     ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr);
1326   }
1327 
1328   if (scall == MAT_INITIAL_MATRIX) {
1329     PetscInt *sbuf2_i,*cworkA,lwrite,ctmp;
1330 
1331     /* Get some new tags to keep the communication clean */
1332     tag1 = ((PetscObject)C)->tag;
1333     ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
1334     ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr);
1335 
1336     /* evaluate communication - mesg to who, length of mesg, and buffer space
1337      required. Based on this, buffers are allocated, and data copied into them */
1338     ierr = PetscCalloc2(size,&w1,size,&w2);CHKERRQ(ierr);
1339     ierr = PetscMalloc1(nrow,&row2proc);CHKERRQ(ierr);
1340 
1341     /* w1[proc] = num of rows owned by proc -- to be requested */
1342     proc = 0;
1343     nrqs = 0; /* num of outgoing messages */
1344     for (j=0; j<nrow; j++) {
1345       row  = irow[j]; /* sorted! */
1346       while (row >= C->rmap->range[proc+1]) proc++;
1347       w1[proc]++;
1348       row2proc[j] = proc; /* map row index to proc */
1349 
1350       if (proc != rank && !w2[proc]) {
1351         w2[proc] = 1; nrqs++;
1352       }
1353     }
1354     w1[rank] = 0;  /* rows owned by self will not be requested */
1355 
1356     ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/
1357     for (proc=0,j=0; proc<size; proc++) {
1358       if (w1[proc]) { pa[j++] = proc;}
1359     }
1360 
1361     /* Each message would have a header = 1 + 2*(num of IS) + data (here,num of IS = 1) */
1362     msz = 0;              /* total mesg length (for all procs) */
1363     for (i=0; i<nrqs; i++) {
1364       proc      = pa[i];
1365       w1[proc] += 3;
1366       msz      += w1[proc];
1367     }
1368     ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr);
1369 
1370     /* Determine nrqr, the number of messages to expect, their lengths, from from-ids */
1371     /* if w2[proc]=1, a message of length w1[proc] will be sent to proc; */
1372     ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
1373 
1374     /* Input: nrqs: nsend; nrqr: nrecv; w1: msg length to be sent;
1375        Output: onodes1: recv node-ids; olengths1: corresponding recv message length */
1376     ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
1377 
1378     /* Now post the Irecvs corresponding to these messages */
1379     ierr = PetscPostIrecvInt(comm,tag1,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr);
1380 
1381     ierr = PetscFree(onodes1);CHKERRQ(ierr);
1382     ierr = PetscFree(olengths1);CHKERRQ(ierr);
1383 
1384     /* Allocate Memory for outgoing messages */
1385     ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr);
1386     ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr);
1387     ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
1388 
1389     /* subf1[pa[0]] = tmp, subf1[pa[i]] = subf1[pa[i-1]] + w1[pa[i-1]] */
1390     iptr = tmp;
1391     for (i=0; i<nrqs; i++) {
1392       proc        = pa[i];
1393       sbuf1[proc] = iptr;
1394       iptr       += w1[proc];
1395     }
1396 
1397     /* Form the outgoing messages */
1398     /* Initialize the header space */
1399     for (i=0; i<nrqs; i++) {
1400       proc      = pa[i];
1401       ierr      = PetscMemzero(sbuf1[proc],3*sizeof(PetscInt));CHKERRQ(ierr);
1402       ptr[proc] = sbuf1[proc] + 3;
1403     }
1404 
1405     /* Parse the isrow and copy data into outbuf */
1406     ierr = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
1407     for (j=0; j<nrow; j++) {  /* parse the indices of each IS */
1408       proc = row2proc[j];
1409       if (proc != rank) { /* copy to the outgoing buf*/
1410         *ptr[proc] = irow[j];
1411         ctr[proc]++; ptr[proc]++;
1412       }
1413     }
1414 
1415     /* Update the headers for the current IS */
1416     for (j=0; j<size; j++) { /* Can Optimise this loop too */
1417       if ((ctr_j = ctr[j])) {
1418         sbuf1_j        = sbuf1[j];
1419         k              = ++sbuf1_j[0];
1420         sbuf1_j[2*k]   = ctr_j;
1421         sbuf1_j[2*k-1] = 0;
1422       }
1423     }
1424 
1425     /* Now post the sends */
1426     ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr);
1427     for (i=0; i<nrqs; ++i) {
1428       proc = pa[i];
1429       ierr = MPI_Isend(sbuf1[proc],w1[proc],MPIU_INT,proc,tag1,comm,s_waits1+i);CHKERRQ(ierr);
1430     }
1431 
1432     /* Post Receives to capture the buffer size */
1433     ierr = PetscMalloc4(nrqs+1,&r_status2,nrqr+1,&s_waits2,nrqs+1,&r_waits2,nrqr+1,&s_status2);CHKERRQ(ierr);
1434     ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr);
1435 
1436     rbuf2[0] = tmp + msz;
1437     for (i=1; i<nrqs; ++i) rbuf2[i] = rbuf2[i-1] + w1[pa[i-1]];
1438 
1439     for (i=0; i<nrqs; ++i) {
1440       proc = pa[i];
1441       ierr = MPI_Irecv(rbuf2[i],w1[proc],MPIU_INT,proc,tag2,comm,r_waits2+i);CHKERRQ(ierr);
1442     }
1443 
1444     ierr = PetscFree2(w1,w2);CHKERRQ(ierr);
1445 
1446     /* Send to other procs the buf size they should allocate */
1447     /* Receive messages*/
1448     ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr);
1449     ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr);
1450 
1451     ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr);
1452     for (i=0; i<nrqr; ++i) {
1453       req_size[i] = 0;
1454       rbuf1_i        = rbuf1[i];
1455       start          = 2*rbuf1_i[0] + 1;
1456       ierr           = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr);
1457       ierr           = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr);
1458       sbuf2_i        = sbuf2[i];
1459       for (j=start; j<end; j++) {
1460         k            = rbuf1_i[j] - rstart;
1461         ncols        = ai[k+1] - ai[k] + bi[k+1] - bi[k];
1462         sbuf2_i[j]   = ncols;
1463         req_size[i] += ncols;
1464       }
1465       req_source1[i] = r_status1[i].MPI_SOURCE;
1466 
1467       /* form the header */
1468       sbuf2_i[0] = req_size[i];
1469       for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j];
1470 
1471       ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr);
1472     }
1473 
1474     ierr = PetscFree(r_status1);CHKERRQ(ierr);
1475     ierr = PetscFree(r_waits1);CHKERRQ(ierr);
1476 
1477     /* rbuf2 is received, Post recv column indices a->j */
1478     ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr);
1479 
1480     ierr = PetscMalloc4(nrqs+1,&r_waits3,nrqr+1,&s_waits3,nrqs+1,&r_status3,nrqr+1,&s_status3);CHKERRQ(ierr);
1481     for (i=0; i<nrqs; ++i) {
1482       ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr);
1483       req_source2[i] = r_status2[i].MPI_SOURCE;
1484       ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr);
1485     }
1486 
1487     /* Wait on sends1 and sends2 */
1488     ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr);
1489     ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);
1490     ierr = PetscFree(s_waits1);CHKERRQ(ierr);
1491     ierr = PetscFree(s_status1);CHKERRQ(ierr);
1492 
1493     ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);
1494     ierr = PetscFree4(r_status2,s_waits2,r_waits2,s_status2);CHKERRQ(ierr);
1495 
1496     /* Now allocate sending buffers for a->j, and send them off */
1497     ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr);
1498     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1499     ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr);
1500     for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
1501 
1502     for (i=0; i<nrqr; i++) { /* for each requested message */
1503       rbuf1_i   = rbuf1[i];
1504       sbuf_aj_i = sbuf_aj[i];
1505       ct1       = 2*rbuf1_i[0] + 1;
1506       ct2       = 0;
1507       /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ1(PETSC_COMM_SELF,0,"max1 %d != 1",max1); */
1508 
1509       kmax = rbuf1[i][2];
1510       for (k=0; k<kmax; k++,ct1++) { /* for each row */
1511         row    = rbuf1_i[ct1] - rstart;
1512         nzA    = ai[row+1] - ai[row];
1513         nzB    = bi[row+1] - bi[row];
1514         ncols  = nzA + nzB;
1515         cworkA = aj + ai[row]; cworkB = bj + bi[row];
1516 
1517         /* load the column indices for this row into cols*/
1518         cols = sbuf_aj_i + ct2;
1519 
1520         lwrite = 0;
1521         for (l=0; l<nzB; l++) {
1522           if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp;
1523         }
1524         for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l];
1525         for (l=0; l<nzB; l++) {
1526           if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp;
1527         }
1528 
1529         ct2 += ncols;
1530       }
1531       ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr);
1532     }
1533 
1534     /* create column map (cmap): global col of C -> local col of submat */
1535 #if defined(PETSC_USE_CTABLE)
1536     if (!allcolumns) {
1537       ierr = PetscTableCreate(ncol+1,C->cmap->N+1,&cmap);CHKERRQ(ierr);
1538       ierr = PetscCalloc1(C->cmap->n,&cmap_loc);CHKERRQ(ierr);
1539       for (j=0; j<ncol; j++) { /* use array cmap_loc[] for local col indices */
1540         if (icol[j] >= cstart && icol[j] <cend) {
1541           cmap_loc[icol[j] - cstart] = j+1;
1542         } else { /* use PetscTable for non-local col indices */
1543           ierr = PetscTableAdd(cmap,icol[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
1544         }
1545       }
1546     } else {
1547       cmap     = NULL;
1548       cmap_loc = NULL;
1549     }
1550     ierr = PetscCalloc1(C->rmap->n,&rmap_loc);CHKERRQ(ierr);
1551 #else
1552     if (!allcolumns) {
1553       ierr   = PetscCalloc1(C->cmap->N,&cmap);CHKERRQ(ierr);
1554       for (j=0; j<ncol; j++) cmap[icol[j]] = j+1;
1555     } else {
1556       cmap = NULL;
1557     }
1558 #endif
1559 
1560     /* Create lens for MatSeqAIJSetPreallocation() */
1561     ierr = PetscCalloc1(nrow,&lens);CHKERRQ(ierr);
1562 
1563     /* Compute lens from local part of C */
1564     for (j=0; j<nrow; j++) {
1565       row  = irow[j];
1566       proc = row2proc[j];
1567       if (proc == rank) {
1568         /* diagonal part A = c->A */
1569         ncols = ai[row-rstart+1] - ai[row-rstart];
1570         cols  = aj + ai[row-rstart];
1571         if (!allcolumns) {
1572           for (k=0; k<ncols; k++) {
1573 #if defined(PETSC_USE_CTABLE)
1574             tcol = cmap_loc[cols[k]];
1575 #else
1576             tcol = cmap[cols[k]+cstart];
1577 #endif
1578             if (tcol) lens[j]++;
1579           }
1580         } else { /* allcolumns */
1581           lens[j] = ncols;
1582         }
1583 
1584         /* off-diagonal part B = c->B */
1585         ncols = bi[row-rstart+1] - bi[row-rstart];
1586         cols  = bj + bi[row-rstart];
1587         if (!allcolumns) {
1588           for (k=0; k<ncols; k++) {
1589 #if defined(PETSC_USE_CTABLE)
1590             ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr);
1591 #else
1592             tcol = cmap[bmap[cols[k]]];
1593 #endif
1594             if (tcol) lens[j]++;
1595           }
1596         } else { /* allcolumns */
1597           lens[j] += ncols;
1598         }
1599       }
1600     }
1601 
1602     /* Create row map (rmap): global row of C -> local row of submat */
1603 #if defined(PETSC_USE_CTABLE)
1604     ierr = PetscTableCreate(nrow+1,C->rmap->N+1,&rmap);CHKERRQ(ierr);
1605     for (j=0; j<nrow; j++) {
1606       row  = irow[j];
1607       proc = row2proc[j];
1608       if (proc == rank) { /* a local row */
1609         rmap_loc[row - rstart] = j;
1610       } else {
1611         ierr = PetscTableAdd(rmap,irow[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
1612       }
1613     }
1614 #else
1615     ierr = PetscCalloc1(C->rmap->N,&rmap);CHKERRQ(ierr);
1616     for (j=0; j<nrow; j++) {
1617       rmap[irow[j]] = j;
1618     }
1619 #endif
1620 
1621     /* Update lens from offproc data */
1622     /* recv a->j is done */
1623     ierr    = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr);
1624     for (i=0; i<nrqs; i++) {
1625       proc    = pa[i];
1626       sbuf1_i = sbuf1[proc];
1627       /* jmax    = sbuf1_i[0]; if (jmax != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"jmax !=1"); */
1628       ct1     = 2 + 1;
1629       ct2     = 0;
1630       rbuf2_i = rbuf2[i]; /* received length of C->j */
1631       rbuf3_i = rbuf3[i]; /* received C->j */
1632 
1633       /* is_no  = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */
1634       max1   = sbuf1_i[2];
1635       for (k=0; k<max1; k++,ct1++) {
1636 #if defined(PETSC_USE_CTABLE)
1637         ierr = PetscTableFind(rmap,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr);
1638         row--;
1639         if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
1640 #else
1641         row = rmap[sbuf1_i[ct1]]; /* the row index in submat */
1642 #endif
1643         /* Now, store row index of submat in sbuf1_i[ct1] */
1644         sbuf1_i[ct1] = row;
1645 
1646         nnz = rbuf2_i[ct1];
1647         if (!allcolumns) {
1648           for (l=0; l<nnz; l++,ct2++) {
1649 #if defined(PETSC_USE_CTABLE)
1650             if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) {
1651               tcol = cmap_loc[rbuf3_i[ct2] - cstart];
1652             } else {
1653               ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
1654             }
1655 #else
1656             tcol = cmap[rbuf3_i[ct2]]; /* column index in submat */
1657 #endif
1658             if (tcol) lens[row]++;
1659           }
1660         } else { /* allcolumns */
1661           lens[row] += nnz;
1662         }
1663       }
1664     }
1665     ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);
1666     ierr = PetscFree4(r_waits3,s_waits3,r_status3,s_status3);CHKERRQ(ierr);
1667 
1668     /* Create the submatrices */
1669     ierr = MatCreate(PETSC_COMM_SELF,&submat);CHKERRQ(ierr);
1670     ierr = MatSetSizes(submat,nrow,ncol,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
1671 
1672     ierr = ISGetBlockSize(isrow[0],&i);CHKERRQ(ierr);
1673     ierr = ISGetBlockSize(iscol[0],&j);CHKERRQ(ierr);
1674     ierr = MatSetBlockSizes(submat,i,j);CHKERRQ(ierr);
1675     ierr = MatSetType(submat,((PetscObject)A)->type_name);CHKERRQ(ierr);
1676     ierr = MatSeqAIJSetPreallocation(submat,0,lens);CHKERRQ(ierr);
1677 
1678     /* create struct Mat_SubMat and attached it to submat */
1679     ierr = PetscNew(&smatis1);CHKERRQ(ierr);
1680     subc = (Mat_SeqAIJ*)submat->data;
1681     subc->submatis1 = smatis1;
1682 
1683     smatis1->id          = 0;
1684     smatis1->nrqs        = nrqs;
1685     smatis1->nrqr        = nrqr;
1686     smatis1->rbuf1       = rbuf1;
1687     smatis1->rbuf2       = rbuf2;
1688     smatis1->rbuf3       = rbuf3;
1689     smatis1->sbuf2       = sbuf2;
1690     smatis1->req_source2 = req_source2;
1691 
1692     smatis1->sbuf1       = sbuf1;
1693     smatis1->ptr         = ptr;
1694     smatis1->tmp         = tmp;
1695     smatis1->ctr         = ctr;
1696 
1697     smatis1->pa           = pa;
1698     smatis1->req_size     = req_size;
1699     smatis1->req_source1  = req_source1;
1700 
1701     smatis1->allcolumns  = allcolumns;
1702     smatis1->row2proc    = row2proc;
1703     smatis1->rmap        = rmap;
1704     smatis1->cmap        = cmap;
1705 #if defined(PETSC_USE_CTABLE)
1706     smatis1->rmap_loc    = rmap_loc;
1707     smatis1->cmap_loc    = cmap_loc;
1708 #endif
1709 
1710     smatis1->destroy     = submat->ops->destroy;
1711     submat->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices;
1712     submat->factortype   = C->factortype;
1713 
1714     /* compute rmax */
1715     rmax = 0;
1716     for (i=0; i<nrow; i++) rmax = PetscMax(rmax,lens[i]);
1717 
1718   } else { /* scall == MAT_REUSE_MATRIX */
1719     submat = submats[0];
1720     if (submat->rmap->n != nrow || submat->cmap->n != ncol) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
1721 
1722     subc    = (Mat_SeqAIJ*)submat->data;
1723     rmax    = subc->rmax;
1724     smatis1 = subc->submatis1;
1725     nrqs        = smatis1->nrqs;
1726     nrqr        = smatis1->nrqr;
1727     rbuf1       = smatis1->rbuf1;
1728     rbuf2       = smatis1->rbuf2;
1729     rbuf3       = smatis1->rbuf3;
1730     req_source2 = smatis1->req_source2;
1731 
1732     sbuf1     = smatis1->sbuf1;
1733     sbuf2     = smatis1->sbuf2;
1734     ptr       = smatis1->ptr;
1735     tmp       = smatis1->tmp;
1736     ctr       = smatis1->ctr;
1737 
1738     pa         = smatis1->pa;
1739     req_size   = smatis1->req_size;
1740     req_source1 = smatis1->req_source1;
1741 
1742     allcolumns = smatis1->allcolumns;
1743     row2proc   = smatis1->row2proc;
1744     rmap       = smatis1->rmap;
1745     cmap       = smatis1->cmap;
1746 #if defined(PETSC_USE_CTABLE)
1747     rmap_loc   = smatis1->rmap_loc;
1748     cmap_loc   = smatis1->cmap_loc;
1749 #endif
1750   }
1751 
1752   /* Post recv matrix values */
1753   ierr = PetscMalloc3(nrqs+1,&rbuf4, rmax,&subcols, rmax,&subvals);CHKERRQ(ierr);
1754   ierr = PetscMalloc4(nrqs+1,&r_waits4,nrqr+1,&s_waits4,nrqs+1,&r_status4,nrqr+1,&s_status4);CHKERRQ(ierr);
1755   ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr);
1756   for (i=0; i<nrqs; ++i) {
1757     ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr);
1758     ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr);
1759   }
1760 
1761   /* Allocate sending buffers for a->a, and send them off */
1762   ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr);
1763   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
1764   ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr);
1765   for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1];
1766 
1767   for (i=0; i<nrqr; i++) {
1768     rbuf1_i   = rbuf1[i];
1769     sbuf_aa_i = sbuf_aa[i];
1770     ct1       = 2*rbuf1_i[0]+1;
1771     ct2       = 0;
1772     /* max1=rbuf1_i[0]; if (max1 != 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"max1 !=1"); */
1773 
1774     kmax = rbuf1_i[2];
1775     for (k=0; k<kmax; k++,ct1++) {
1776       row = rbuf1_i[ct1] - rstart;
1777       nzA = ai[row+1] - ai[row];
1778       nzB = bi[row+1] - bi[row];
1779       ncols  = nzA + nzB;
1780       cworkB = bj + bi[row];
1781       vworkA = a_a + ai[row];
1782       vworkB = b_a + bi[row];
1783 
1784       /* load the column values for this row into vals*/
1785       vals = sbuf_aa_i + ct2;
1786 
1787       lwrite = 0;
1788       for (l=0; l<nzB; l++) {
1789         if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l];
1790       }
1791       for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l];
1792       for (l=0; l<nzB; l++) {
1793         if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l];
1794       }
1795 
1796       ct2 += ncols;
1797     }
1798     ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr);
1799   }
1800 
1801   /* Assemble submat */
1802   /* First assemble the local rows */
1803   for (j=0; j<nrow; j++) {
1804     row  = irow[j];
1805     proc = row2proc[j];
1806     if (proc == rank) {
1807       Crow = row - rstart;  /* local row index of C */
1808 #if defined(PETSC_USE_CTABLE)
1809       row = rmap_loc[Crow]; /* row index of submat */
1810 #else
1811       row = rmap[row];
1812 #endif
1813 
1814       if (allcolumns) {
1815         /* diagonal part A = c->A */
1816         ncols = ai[Crow+1] - ai[Crow];
1817         cols  = aj + ai[Crow];
1818         vals  = a->a + ai[Crow];
1819         i     = 0;
1820         for (k=0; k<ncols; k++) {
1821           subcols[i]   = cols[k] + cstart;
1822           subvals[i++] = vals[k];
1823         }
1824 
1825         /* off-diagonal part B = c->B */
1826         ncols = bi[Crow+1] - bi[Crow];
1827         cols  = bj + bi[Crow];
1828         vals  = b->a + bi[Crow];
1829         for (k=0; k<ncols; k++) {
1830           subcols[i]   = bmap[cols[k]];
1831           subvals[i++] = vals[k];
1832         }
1833 
1834         ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr);
1835 
1836       } else { /* !allcolumns */
1837 #if defined(PETSC_USE_CTABLE)
1838         /* diagonal part A = c->A */
1839         ncols = ai[Crow+1] - ai[Crow];
1840         cols  = aj + ai[Crow];
1841         vals  = a->a + ai[Crow];
1842         i     = 0;
1843         for (k=0; k<ncols; k++) {
1844           tcol = cmap_loc[cols[k]];
1845           if (tcol) {
1846             subcols[i]   = --tcol;
1847             subvals[i++] = vals[k];
1848           }
1849         }
1850 
1851         /* off-diagonal part B = c->B */
1852         ncols = bi[Crow+1] - bi[Crow];
1853         cols  = bj + bi[Crow];
1854         vals  = b->a + bi[Crow];
1855         for (k=0; k<ncols; k++) {
1856           ierr = PetscTableFind(cmap,bmap[cols[k]]+1,&tcol);CHKERRQ(ierr);
1857           if (tcol) {
1858             subcols[i]   = --tcol;
1859             subvals[i++] = vals[k];
1860           }
1861         }
1862 #else
1863         /* diagonal part A = c->A */
1864         ncols = ai[Crow+1] - ai[Crow];
1865         cols  = aj + ai[Crow];
1866         vals  = a->a + ai[Crow];
1867         i     = 0;
1868         for (k=0; k<ncols; k++) {
1869           tcol = cmap[cols[k]+cstart];
1870           if (tcol) {
1871             subcols[i]   = --tcol;
1872             subvals[i++] = vals[k];
1873           }
1874         }
1875 
1876         /* off-diagonal part B = c->B */
1877         ncols = bi[Crow+1] - bi[Crow];
1878         cols  = bj + bi[Crow];
1879         vals  = b->a + bi[Crow];
1880         for (k=0; k<ncols; k++) {
1881           tcol = cmap[bmap[cols[k]]];
1882           if (tcol) {
1883             subcols[i]   = --tcol;
1884             subvals[i++] = vals[k];
1885           }
1886         }
1887 #endif
1888         ierr = MatSetValues_SeqAIJ(submat,1,&row,i,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr);
1889       }
1890     }
1891   }
1892 
1893   /* Now assemble the off-proc rows */
1894   for (i=0; i<nrqs; i++) { /* for each requested message */
1895     /* recv values from other processes */
1896     ierr    = MPI_Waitany(nrqs,r_waits4,&idex,r_status4+i);CHKERRQ(ierr);
1897     proc    = pa[idex];
1898     sbuf1_i = sbuf1[proc];
1899     /* jmax    = sbuf1_i[0]; if (jmax != 1)SETERRQ1(PETSC_COMM_SELF,0,"jmax %d != 1",jmax); */
1900     ct1     = 2 + 1;
1901     ct2     = 0; /* count of received C->j */
1902     ct3     = 0; /* count of received C->j that will be inserted into submat */
1903     rbuf2_i = rbuf2[idex]; /* int** received length of C->j from other processes */
1904     rbuf3_i = rbuf3[idex]; /* int** received C->j from other processes */
1905     rbuf4_i = rbuf4[idex]; /* scalar** received C->a from other processes */
1906 
1907     /* is_no = sbuf1_i[2*j-1]; if (is_no != 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"is_no !=0"); */
1908     max1 = sbuf1_i[2];             /* num of rows */
1909     for (k=0; k<max1; k++,ct1++) { /* for each recved row */
1910       row = sbuf1_i[ct1]; /* row index of submat */
1911       if (!allcolumns) {
1912         idex = 0;
1913         if (scall == MAT_INITIAL_MATRIX) {
1914           nnz  = rbuf2_i[ct1]; /* num of C entries in this row */
1915           for (l=0; l<nnz; l++,ct2++) { /* for each recved column */
1916 #if defined(PETSC_USE_CTABLE)
1917             if (rbuf3_i[ct2] >= cstart && rbuf3_i[ct2] <cend) {
1918               tcol = cmap_loc[rbuf3_i[ct2] - cstart];
1919             } else {
1920               ierr = PetscTableFind(cmap,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
1921             }
1922 #else
1923             tcol = cmap[rbuf3_i[ct2]];
1924 #endif
1925             if (tcol) {
1926               subcols[idex]   = --tcol;
1927               subvals[idex++] = rbuf4_i[ct2];
1928 
1929               /* We receive an entire column of C, but a subset of it needs to be inserted into submat.
1930                For reuse, we replace received C->j with index that should be inserted to submat */
1931               rbuf3_i[ct3++] = ct2;
1932             }
1933           }
1934           ierr = MatSetValues_SeqAIJ(submat,1,&row,idex,subcols,subvals,INSERT_VALUES);CHKERRQ(ierr);
1935 
1936         } else { /* scall == MAT_REUSE_MATRIX */
1937           submat = submats[0];
1938           subc   = (Mat_SeqAIJ*)submat->data;
1939 
1940           nnz = subc->i[row+1] - subc->i[row]; /* num of submat entries in this row */
1941           for (l=0; l<nnz; l++) {
1942             ct2 = rbuf3_i[ct3++]; /* index of rbuf4_i[] which needs to be inserted into submat */
1943             subvals[idex++] = rbuf4_i[ct2];
1944           }
1945 
1946           bj = subc->j + subc->i[row];
1947           ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,bj,subvals,INSERT_VALUES);CHKERRQ(ierr);
1948         }
1949       } else { /* allcolumns */
1950         nnz  = rbuf2_i[ct1]; /* num of C entries in this row */
1951         ierr = MatSetValues_SeqAIJ(submat,1,&row,nnz,rbuf3_i+ct2,rbuf4_i+ct2,INSERT_VALUES);CHKERRQ(ierr);
1952         ct2 += nnz;
1953       }
1954     }
1955   }
1956 
1957   /* sending a->a are done */
1958   ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);
1959   ierr = PetscFree4(r_waits4,s_waits4,r_status4,s_status4);CHKERRQ(ierr);
1960 
1961   ierr = MatAssemblyBegin(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1962   ierr = MatAssemblyEnd(submat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1963   submats[0] = submat;
1964 
1965   /* Restore the indices */
1966   ierr = ISRestoreIndices(isrow[0],&irow);CHKERRQ(ierr);
1967   if (!allcolumns) {
1968     ierr = ISRestoreIndices(iscol[0],&icol);CHKERRQ(ierr);
1969   }
1970 
1971   /* Destroy allocated memory */
1972   for (i=0; i<nrqs; ++i) {
1973     ierr = PetscFree3(rbuf4[i],subcols,subvals);CHKERRQ(ierr);
1974   }
1975   ierr = PetscFree3(rbuf4,subcols,subvals);CHKERRQ(ierr);
1976   ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr);
1977   ierr = PetscFree(sbuf_aa);CHKERRQ(ierr);
1978 
1979   if (scall == MAT_INITIAL_MATRIX) {
1980     ierr = PetscFree(lens);CHKERRQ(ierr);
1981     ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr);
1982     ierr = PetscFree(sbuf_aj);CHKERRQ(ierr);
1983   }
1984   PetscFunctionReturn(0);
1985 }
1986 
1987 PetscErrorCode MatGetSubMatrices_MPIAIJ_SingleIS(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
1988 {
1989   PetscErrorCode ierr;
1990   PetscInt       ncol;
1991   PetscBool      colflag,allcolumns=PETSC_FALSE;
1992 
1993   PetscFunctionBegin;
1994   /* Allocate memory to hold all the submatrices */
1995   if (scall == MAT_INITIAL_MATRIX) {
1996     ierr = PetscMalloc1(1,submat);CHKERRQ(ierr);
1997   }
1998 
1999   /* Check for special case: each processor gets entire matrix columns */
2000   ierr = ISIdentity(iscol[0],&colflag);CHKERRQ(ierr);
2001   ierr = ISGetLocalSize(iscol[0],&ncol);CHKERRQ(ierr);
2002   if (colflag && ncol == C->cmap->N) allcolumns = PETSC_TRUE;
2003 
2004   ierr = MatGetSubMatrices_MPIAIJ_SingleIS_Local(C,ismax,isrow,iscol,scall,allcolumns,*submat);CHKERRQ(ierr);
2005   PetscFunctionReturn(0);
2006 }
2007 
2008 PetscErrorCode MatGetSubMatrices_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
2009 {
2010   PetscErrorCode ierr;
2011   PetscInt       nmax,nstages,i,pos,max_no,nrow,ncol,in[2],out[2];
2012   PetscBool      rowflag,colflag,wantallmatrix=PETSC_FALSE,*allcolumns;
2013 
2014   PetscFunctionBegin;
2015 #if 0
2016   /* Check for special case: each processor gets entire matrix */
2017   if (C->submat_singleis) { /* flag is set in PCSetUp_ASM() to skip several MPIU_Allreduce() */
2018     ierr = MatGetSubMatrices_MPIAIJ_SingleIS(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr);
2019     PetscFunctionReturn(0);
2020   }
2021 #endif
2022 
2023   if (ismax == 1 && C->rmap->N == C->cmap->N) {
2024     ierr = ISIdentity(*isrow,&rowflag);CHKERRQ(ierr);
2025     ierr = ISIdentity(*iscol,&colflag);CHKERRQ(ierr);
2026     ierr = ISGetLocalSize(*isrow,&nrow);CHKERRQ(ierr);
2027     ierr = ISGetLocalSize(*iscol,&ncol);CHKERRQ(ierr);
2028     if (rowflag && colflag && nrow == C->rmap->N && ncol == C->cmap->N) {
2029       wantallmatrix = PETSC_TRUE;
2030 
2031       ierr = PetscOptionsGetBool(((PetscObject)C)->options,((PetscObject)C)->prefix,"-use_fast_submatrix",&wantallmatrix,NULL);CHKERRQ(ierr);
2032     }
2033   }
2034 
2035   /* Determine the number of stages through which submatrices are done
2036      Each stage will extract nmax submatrices.
2037      nmax is determined by the matrix column dimension.
2038      If the original matrix has 20M columns, only one submatrix per stage is allowed, etc.
2039   */
2040   nmax = 20*1000000 / (C->cmap->N * sizeof(PetscInt));
2041   if (!nmax) nmax = 1;
2042   nstages = ismax/nmax + ((ismax % nmax) ? 1 : 0); /* local nstages */
2043 
2044   /* Collect global wantallmatrix and nstages */
2045   in[0] = -1*wantallmatrix;
2046   in[1] = nstages;
2047   ierr = MPIU_Allreduce(in,out,2,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr);
2048   wantallmatrix = (PetscBool)(-out[0]);
2049   nstages       = out[1]; /* Make sure every processor loops through the global nstages */
2050 
2051   if (wantallmatrix) {
2052     ierr = MatGetSubMatrix_MPIAIJ_All(C,MAT_GET_VALUES,scall,submat);CHKERRQ(ierr);
2053     PetscFunctionReturn(0);
2054   }
2055 
2056   /* Allocate memory to hold all the submatrices */
2057   if (scall == MAT_INITIAL_MATRIX) {
2058     ierr = PetscMalloc1(ismax+1,submat);CHKERRQ(ierr);
2059   }
2060 
2061   if (scall == MAT_REUSE_MATRIX && !ismax) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"n=0 is not supported for MatGetSubMatrices(mat,n,isrow,iscol,MAT_REUSE_MATRIX,...). Set n=1 with zero-length isrow and iscolumn instead");
2062 
2063   /* Check for special case: each processor gets entire matrix columns */
2064   ierr = PetscMalloc1(ismax+1,&allcolumns);CHKERRQ(ierr);
2065   for (i=0; i<ismax; i++) {
2066     ierr = ISIdentity(iscol[i],&colflag);CHKERRQ(ierr);
2067     ierr = ISGetLocalSize(iscol[i],&ncol);CHKERRQ(ierr);
2068     if (colflag && ncol == C->cmap->N) {
2069       allcolumns[i] = PETSC_TRUE;
2070     } else {
2071       allcolumns[i] = PETSC_FALSE;
2072     }
2073   }
2074 
2075   for (i=0,pos=0; i<nstages; i++) {
2076     if (pos+nmax <= ismax) max_no = nmax;
2077     else if (pos == ismax) max_no = 0;
2078     else                   max_no = ismax-pos;
2079     ierr = MatGetSubMatrices_MPIAIJ_Local(C,max_no,isrow+pos,iscol+pos,scall,allcolumns+pos,*submat+pos);CHKERRQ(ierr);
2080     pos += max_no;
2081   }
2082 
2083   ierr = PetscFree(allcolumns);CHKERRQ(ierr);
2084   PetscFunctionReturn(0);
2085 }
2086 
2087 /* -------------------------------------------------------------------------*/
2088 PetscErrorCode MatGetSubMatrices_MPIAIJ_Local(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,PetscBool *allcolumns,Mat *submats)
2089 {
2090   Mat_MPIAIJ     *c = (Mat_MPIAIJ*)C->data;
2091   Mat            A  = c->A;
2092   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data,*b = (Mat_SeqAIJ*)c->B->data,*subc;
2093   const PetscInt **icol,**irow;
2094   PetscInt       *nrow,*ncol,start;
2095   PetscErrorCode ierr;
2096   PetscMPIInt    rank,size,tag0,tag2,tag3,tag4,*w1,*w2,*w3,*w4,nrqr;
2097   PetscInt       **sbuf1,**sbuf2,i,j,k,l,ct1,ct2,**rbuf1,row,proc=-1;
2098   PetscInt       nrqs=0,msz,**ptr=NULL,*req_size=NULL,*ctr=NULL,*pa,*tmp=NULL,tcol;
2099   PetscInt       **rbuf3=NULL,*req_source1=NULL,*req_source2,**sbuf_aj,**rbuf2=NULL,max1,max2;
2100   PetscInt       **lens,is_no,ncols,*cols,mat_i,*mat_j,tmp2,jmax;
2101 #if defined(PETSC_USE_CTABLE)
2102   PetscTable     *cmap,cmap_i=NULL,*rmap,rmap_i;
2103 #else
2104   PetscInt       **cmap,*cmap_i=NULL,**rmap,*rmap_i;
2105 #endif
2106   const PetscInt *irow_i;
2107   PetscInt       ctr_j,*sbuf1_j,*sbuf_aj_i,*rbuf1_i,kmax,*lens_i;
2108   MPI_Request    *s_waits1,*r_waits1,*s_waits2,*r_waits2,*r_waits3;
2109   MPI_Request    *r_waits4,*s_waits3,*s_waits4;
2110   MPI_Status     *r_status1,*r_status2,*s_status1,*s_status3,*s_status2;
2111   MPI_Status     *r_status3,*r_status4,*s_status4;
2112   MPI_Comm       comm;
2113   PetscScalar    **rbuf4,**sbuf_aa,*vals,*mat_a,*sbuf_aa_i;
2114   PetscMPIInt    *onodes1,*olengths1,end;
2115   PetscInt       **row2proc,*row2proc_i;
2116   Mat_SubMat     **smats,*smat_i;
2117   PetscBool      *issorted;
2118 
2119   PetscFunctionBegin;
2120   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
2121   size = c->size;
2122   rank = c->rank;
2123 
2124   ierr = PetscMalloc5(ismax,&irow,ismax,&icol,ismax,&nrow,ismax,&ncol,ismax,&issorted);CHKERRQ(ierr);
2125   for (i=0; i<ismax; i++) {
2126     ierr = ISSorted(isrow[i],&issorted[i]);CHKERRQ(ierr);
2127 
2128     ierr = ISGetIndices(isrow[i],&irow[i]);CHKERRQ(ierr);
2129     ierr = ISGetLocalSize(isrow[i],&nrow[i]);CHKERRQ(ierr);
2130     if (allcolumns[i]) {
2131       icol[i] = NULL;
2132       ncol[i] = C->cmap->N;
2133     } else {
2134       ierr = ISGetIndices(iscol[i],&icol[i]);CHKERRQ(ierr);
2135       ierr = ISGetLocalSize(iscol[i],&ncol[i]);CHKERRQ(ierr);
2136     }
2137   }
2138 
2139   ierr = PetscMalloc1(ismax,&smats);CHKERRQ(ierr);
2140   ierr = PetscMalloc1(ismax,&row2proc);CHKERRQ(ierr);
2141   ierr = PetscMalloc2(ismax,&cmap,ismax,&rmap);CHKERRQ(ierr);
2142 
2143   if (scall == MAT_REUSE_MATRIX) {
2144     /* Assumes new rows are same length as the old rows */
2145     for (i=0; i<ismax; i++) {
2146       subc = (Mat_SeqAIJ*)(submats[i]->data);
2147       if ((submats[i]->rmap->n != nrow[i]) || (submats[i]->cmap->n != ncol[i])) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. wrong size");
2148 
2149       /* Initial matrix as if empty */
2150       ierr = PetscMemzero(subc->ilen,submats[i]->rmap->n*sizeof(PetscInt));CHKERRQ(ierr);
2151 
2152       /* Initial matrix as if empty */
2153       submats[i]->factortype = C->factortype;
2154 
2155       smat_i   = subc->submatis1;
2156       smats[i] = smat_i;
2157 
2158       nrqs        = smat_i->nrqs;
2159       nrqr        = smat_i->nrqr;
2160       rbuf1       = smat_i->rbuf1;
2161       rbuf2       = smat_i->rbuf2;
2162       rbuf3       = smat_i->rbuf3;
2163       req_source2 = smat_i->req_source2;
2164 
2165       sbuf1     = smat_i->sbuf1;
2166       sbuf2     = smat_i->sbuf2;
2167       ptr       = smat_i->ptr;
2168       tmp       = smat_i->tmp;
2169       ctr       = smat_i->ctr;
2170 
2171       pa          = smat_i->pa;
2172       req_size    = smat_i->req_size;
2173       req_source1 = smat_i->req_source1;
2174 
2175       allcolumns[i] = smat_i->allcolumns;
2176       row2proc[i]   = smat_i->row2proc;
2177       rmap[i]       = smat_i->rmap;
2178       cmap[i]       = smat_i->cmap;
2179     }
2180   } else { /* scall == MAT_INITIAL_MATRIX */
2181     /* Get some new tags to keep the communication clean */
2182     ierr = PetscObjectGetNewTag((PetscObject)C,&tag2);CHKERRQ(ierr);
2183     ierr = PetscObjectGetNewTag((PetscObject)C,&tag3);CHKERRQ(ierr);
2184 
2185     /* evaluate communication - mesg to who, length of mesg, and buffer space
2186      required. Based on this, buffers are allocated, and data copied into them*/
2187     ierr = PetscCalloc4(size,&w1,size,&w2,size,&w3,size,&w4);CHKERRQ(ierr);   /* mesg size, initialize work vectors */
2188 
2189     for (i=0; i<ismax; i++) {
2190       jmax   = nrow[i];
2191       irow_i = irow[i];
2192 
2193       ierr   = PetscMalloc1(jmax,&row2proc_i);CHKERRQ(ierr);
2194       row2proc[i] = row2proc_i;
2195 
2196       if (issorted[i]) proc = 0;
2197       for (j=0; j<jmax; j++) {
2198         if (!issorted[i]) proc = 0;
2199         row = irow_i[j];
2200         while (row >= C->rmap->range[proc+1]) proc++;
2201         w4[proc]++;
2202         row2proc_i[j] = proc; /* map row index to proc */
2203       }
2204       for (j=0; j<size; j++) {
2205         if (w4[j]) { w1[j] += w4[j];  w3[j]++; w4[j] = 0;}
2206       }
2207     }
2208 
2209     nrqs     = 0;              /* no of outgoing messages */
2210     msz      = 0;              /* total mesg length (for all procs) */
2211     w1[rank] = 0;              /* no mesg sent to self */
2212     w3[rank] = 0;
2213     for (i=0; i<size; i++) {
2214       if (w1[i])  { w2[i] = 1; nrqs++;} /* there exists a message to proc i */
2215     }
2216     ierr = PetscMalloc1(nrqs+1,&pa);CHKERRQ(ierr); /*(proc -array)*/
2217     for (i=0,j=0; i<size; i++) {
2218       if (w1[i]) { pa[j] = i; j++; }
2219     }
2220 
2221     /* Each message would have a header = 1 + 2*(no of IS) + data */
2222     for (i=0; i<nrqs; i++) {
2223       j      = pa[i];
2224       w1[j] += w2[j] + 2* w3[j];
2225       msz   += w1[j];
2226     }
2227     ierr = PetscInfo2(0,"Number of outgoing messages %D Total message length %D\n",nrqs,msz);CHKERRQ(ierr);
2228 
2229     /* Determine the number of messages to expect, their lengths, from from-ids */
2230     ierr = PetscGatherNumberOfMessages(comm,w2,w1,&nrqr);CHKERRQ(ierr);
2231     ierr = PetscGatherMessageLengths(comm,nrqs,nrqr,w1,&onodes1,&olengths1);CHKERRQ(ierr);
2232 
2233     /* Now post the Irecvs corresponding to these messages */
2234     tag0 = ((PetscObject)C)->tag;
2235     ierr = PetscPostIrecvInt(comm,tag0,nrqr,onodes1,olengths1,&rbuf1,&r_waits1);CHKERRQ(ierr);
2236 
2237     ierr = PetscFree(onodes1);CHKERRQ(ierr);
2238     ierr = PetscFree(olengths1);CHKERRQ(ierr);
2239 
2240     /* Allocate Memory for outgoing messages */
2241     ierr = PetscMalloc4(size,&sbuf1,size,&ptr,2*msz,&tmp,size,&ctr);CHKERRQ(ierr);
2242     ierr = PetscMemzero(sbuf1,size*sizeof(PetscInt*));CHKERRQ(ierr);
2243     ierr = PetscMemzero(ptr,size*sizeof(PetscInt*));CHKERRQ(ierr);
2244 
2245     {
2246       PetscInt *iptr = tmp;
2247       k    = 0;
2248       for (i=0; i<nrqs; i++) {
2249         j        = pa[i];
2250         iptr    += k;
2251         sbuf1[j] = iptr;
2252         k        = w1[j];
2253       }
2254     }
2255 
2256     /* Form the outgoing messages. Initialize the header space */
2257     for (i=0; i<nrqs; i++) {
2258       j           = pa[i];
2259       sbuf1[j][0] = 0;
2260       ierr        = PetscMemzero(sbuf1[j]+1,2*w3[j]*sizeof(PetscInt));CHKERRQ(ierr);
2261       ptr[j]      = sbuf1[j] + 2*w3[j] + 1;
2262     }
2263 
2264     /* Parse the isrow and copy data into outbuf */
2265     for (i=0; i<ismax; i++) {
2266       row2proc_i = row2proc[i];
2267       ierr   = PetscMemzero(ctr,size*sizeof(PetscInt));CHKERRQ(ierr);
2268       irow_i = irow[i];
2269       jmax   = nrow[i];
2270       for (j=0; j<jmax; j++) {  /* parse the indices of each IS */
2271         proc = row2proc_i[j];
2272         if (proc != rank) { /* copy to the outgoing buf*/
2273           ctr[proc]++;
2274           *ptr[proc] = irow_i[j];
2275           ptr[proc]++;
2276         }
2277       }
2278       /* Update the headers for the current IS */
2279       for (j=0; j<size; j++) { /* Can Optimise this loop too */
2280         if ((ctr_j = ctr[j])) {
2281           sbuf1_j        = sbuf1[j];
2282           k              = ++sbuf1_j[0];
2283           sbuf1_j[2*k]   = ctr_j;
2284           sbuf1_j[2*k-1] = i;
2285         }
2286       }
2287     }
2288 
2289     /*  Now  post the sends */
2290     ierr = PetscMalloc1(nrqs+1,&s_waits1);CHKERRQ(ierr);
2291     for (i=0; i<nrqs; ++i) {
2292       j    = pa[i];
2293       ierr = MPI_Isend(sbuf1[j],w1[j],MPIU_INT,j,tag0,comm,s_waits1+i);CHKERRQ(ierr);
2294     }
2295 
2296     /* Post Receives to capture the buffer size */
2297     ierr = PetscMalloc1(nrqs+1,&r_waits2);CHKERRQ(ierr);
2298     ierr = PetscMalloc3(nrqs+1,&req_source2,nrqs+1,&rbuf2,nrqs+1,&rbuf3);CHKERRQ(ierr);
2299     rbuf2[0] = tmp + msz;
2300     for (i=1; i<nrqs; ++i) {
2301       rbuf2[i] = rbuf2[i-1]+w1[pa[i-1]];
2302     }
2303     for (i=0; i<nrqs; ++i) {
2304       j    = pa[i];
2305       ierr = MPI_Irecv(rbuf2[i],w1[j],MPIU_INT,j,tag2,comm,r_waits2+i);CHKERRQ(ierr);
2306     }
2307 
2308     /* Send to other procs the buf size they should allocate */
2309     /* Receive messages*/
2310     ierr = PetscMalloc1(nrqr+1,&s_waits2);CHKERRQ(ierr);
2311     ierr = PetscMalloc1(nrqr+1,&r_status1);CHKERRQ(ierr);
2312     ierr = PetscMalloc3(nrqr,&sbuf2,nrqr,&req_size,nrqr,&req_source1);CHKERRQ(ierr);
2313     {
2314       PetscInt   *sAi = a->i,*sBi = b->i,id,rstart = C->rmap->rstart;
2315       PetscInt   *sbuf2_i;
2316 
2317       ierr = MPI_Waitall(nrqr,r_waits1,r_status1);CHKERRQ(ierr);
2318       for (i=0; i<nrqr; ++i) {
2319         req_size[i] = 0;
2320         rbuf1_i        = rbuf1[i];
2321         start          = 2*rbuf1_i[0] + 1;
2322         ierr           = MPI_Get_count(r_status1+i,MPIU_INT,&end);CHKERRQ(ierr);
2323         ierr           = PetscMalloc1(end+1,&sbuf2[i]);CHKERRQ(ierr);
2324         sbuf2_i        = sbuf2[i];
2325         for (j=start; j<end; j++) {
2326           id              = rbuf1_i[j] - rstart;
2327           ncols           = sAi[id+1] - sAi[id] + sBi[id+1] - sBi[id];
2328           sbuf2_i[j]      = ncols;
2329           req_size[i] += ncols;
2330         }
2331         req_source1[i] = r_status1[i].MPI_SOURCE;
2332         /* form the header */
2333         sbuf2_i[0] = req_size[i];
2334         for (j=1; j<start; j++) sbuf2_i[j] = rbuf1_i[j];
2335 
2336         ierr = MPI_Isend(sbuf2_i,end,MPIU_INT,req_source1[i],tag2,comm,s_waits2+i);CHKERRQ(ierr);
2337       }
2338     }
2339     ierr = PetscFree(r_status1);CHKERRQ(ierr);
2340     ierr = PetscFree(r_waits1);CHKERRQ(ierr);
2341     ierr = PetscFree4(w1,w2,w3,w4);CHKERRQ(ierr);
2342 
2343     /* Receive messages*/
2344     ierr = PetscMalloc1(nrqs+1,&r_waits3);CHKERRQ(ierr);
2345     ierr = PetscMalloc1(nrqs+1,&r_status2);CHKERRQ(ierr);
2346 
2347     ierr = MPI_Waitall(nrqs,r_waits2,r_status2);CHKERRQ(ierr);
2348     for (i=0; i<nrqs; ++i) {
2349       ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf3[i]);CHKERRQ(ierr);
2350       req_source2[i] = r_status2[i].MPI_SOURCE;
2351       ierr = MPI_Irecv(rbuf3[i],rbuf2[i][0],MPIU_INT,req_source2[i],tag3,comm,r_waits3+i);CHKERRQ(ierr);
2352     }
2353     ierr = PetscFree(r_status2);CHKERRQ(ierr);
2354     ierr = PetscFree(r_waits2);CHKERRQ(ierr);
2355 
2356     /* Wait on sends1 and sends2 */
2357     ierr = PetscMalloc1(nrqs+1,&s_status1);CHKERRQ(ierr);
2358     ierr = PetscMalloc1(nrqr+1,&s_status2);CHKERRQ(ierr);
2359 
2360     if (nrqs) {ierr = MPI_Waitall(nrqs,s_waits1,s_status1);CHKERRQ(ierr);}
2361     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits2,s_status2);CHKERRQ(ierr);}
2362     ierr = PetscFree(s_status1);CHKERRQ(ierr);
2363     ierr = PetscFree(s_status2);CHKERRQ(ierr);
2364     ierr = PetscFree(s_waits1);CHKERRQ(ierr);
2365     ierr = PetscFree(s_waits2);CHKERRQ(ierr);
2366 
2367     /* Now allocate sending buffers for a->j, and send them off */
2368     ierr = PetscMalloc1(nrqr+1,&sbuf_aj);CHKERRQ(ierr);
2369     for (i=0,j=0; i<nrqr; i++) j += req_size[i];
2370     ierr = PetscMalloc1(j+1,&sbuf_aj[0]);CHKERRQ(ierr);
2371     for (i=1; i<nrqr; i++) sbuf_aj[i] = sbuf_aj[i-1] + req_size[i-1];
2372 
2373     ierr = PetscMalloc1(nrqr+1,&s_waits3);CHKERRQ(ierr);
2374     {
2375       PetscInt nzA,nzB,*a_i = a->i,*b_i = b->i,lwrite;
2376       PetscInt *cworkA,*cworkB,cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray;
2377       PetscInt cend = C->cmap->rend;
2378       PetscInt *a_j = a->j,*b_j = b->j,ctmp;
2379 
2380       for (i=0; i<nrqr; i++) {
2381         rbuf1_i   = rbuf1[i];
2382         sbuf_aj_i = sbuf_aj[i];
2383         ct1       = 2*rbuf1_i[0] + 1;
2384         ct2       = 0;
2385         for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
2386           kmax = rbuf1[i][2*j];
2387           for (k=0; k<kmax; k++,ct1++) {
2388             row    = rbuf1_i[ct1] - rstart;
2389             nzA    = a_i[row+1] - a_i[row]; nzB = b_i[row+1] - b_i[row];
2390             ncols  = nzA + nzB;
2391             cworkA = a_j + a_i[row]; cworkB = b_j + b_i[row];
2392 
2393             /* load the column indices for this row into sorted cols */
2394             cols = sbuf_aj_i + ct2;
2395 
2396             lwrite = 0;
2397             for (l=0; l<nzB; l++) {
2398               if ((ctmp = bmap[cworkB[l]]) < cstart) cols[lwrite++] = ctmp;
2399             }
2400             for (l=0; l<nzA; l++) cols[lwrite++] = cstart + cworkA[l];
2401             for (l=0; l<nzB; l++) {
2402               if ((ctmp = bmap[cworkB[l]]) >= cend) cols[lwrite++] = ctmp;
2403             }
2404 
2405             ct2 += ncols;
2406           }
2407         }
2408         ierr = MPI_Isend(sbuf_aj_i,req_size[i],MPIU_INT,req_source1[i],tag3,comm,s_waits3+i);CHKERRQ(ierr);
2409       }
2410     }
2411     ierr = PetscMalloc1(nrqs+1,&r_status3);CHKERRQ(ierr);
2412     ierr = PetscMalloc1(nrqr+1,&s_status3);CHKERRQ(ierr);
2413 
2414     /* create col map: global col of C -> local col of submatrices */
2415     {
2416       const PetscInt *icol_i;
2417 #if defined(PETSC_USE_CTABLE)
2418       for (i=0; i<ismax; i++) {
2419         if (!allcolumns[i]) {
2420           ierr = PetscTableCreate(ncol[i]+1,C->cmap->N+1,&cmap[i]);CHKERRQ(ierr);
2421 
2422           jmax   = ncol[i];
2423           icol_i = icol[i];
2424           cmap_i = cmap[i];
2425           for (j=0; j<jmax; j++) {
2426             ierr = PetscTableAdd(cmap[i],icol_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
2427           }
2428         } else cmap[i] = NULL;
2429       }
2430 #else
2431       for (i=0; i<ismax; i++) {
2432         if (!allcolumns[i]) {
2433           ierr   = PetscCalloc1(C->cmap->N,&cmap[i]);CHKERRQ(ierr);
2434           jmax   = ncol[i];
2435           icol_i = icol[i];
2436           cmap_i = cmap[i];
2437           for (j=0; j<jmax; j++) {
2438             cmap_i[icol_i[j]] = j+1;
2439           }
2440         } else cmap[i] = NULL;
2441       }
2442 #endif
2443     }
2444 
2445     /* Create lens which is required for MatCreate... */
2446     for (i=0,j=0; i<ismax; i++) j += nrow[i];
2447     ierr = PetscMalloc1(ismax,&lens);CHKERRQ(ierr);
2448 
2449     if (ismax) {
2450       ierr = PetscCalloc1(j,&lens[0]);CHKERRQ(ierr);
2451     }
2452     for (i=1; i<ismax; i++) lens[i] = lens[i-1] + nrow[i-1];
2453 
2454     /* Update lens from local data */
2455     for (i=0; i<ismax; i++) {
2456       row2proc_i = row2proc[i];
2457       jmax = nrow[i];
2458       if (!allcolumns[i]) cmap_i = cmap[i];
2459       irow_i = irow[i];
2460       lens_i = lens[i];
2461       for (j=0; j<jmax; j++) {
2462         row = irow_i[j];
2463         proc = row2proc_i[j];
2464         if (proc == rank) {
2465           ierr = MatGetRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr);
2466           if (!allcolumns[i]) {
2467             for (k=0; k<ncols; k++) {
2468 #if defined(PETSC_USE_CTABLE)
2469               ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr);
2470 #else
2471               tcol = cmap_i[cols[k]];
2472 #endif
2473               if (tcol) lens_i[j]++;
2474             }
2475           } else { /* allcolumns */
2476             lens_i[j] = ncols;
2477           }
2478           ierr = MatRestoreRow_MPIAIJ(C,row,&ncols,&cols,0);CHKERRQ(ierr);
2479         }
2480       }
2481     }
2482 
2483     /* Create row map: global row of C -> local row of submatrices */
2484 #if defined(PETSC_USE_CTABLE)
2485     for (i=0; i<ismax; i++) {
2486       ierr   = PetscTableCreate(nrow[i]+1,C->rmap->N+1,&rmap[i]);CHKERRQ(ierr);
2487       irow_i = irow[i];
2488       jmax   = nrow[i];
2489       for (j=0; j<jmax; j++) {
2490       ierr = PetscTableAdd(rmap[i],irow_i[j]+1,j+1,INSERT_VALUES);CHKERRQ(ierr);
2491       }
2492     }
2493 #else
2494     for (i=0; i<ismax; i++) {
2495       ierr   = PetscCalloc1(C->rmap->N,&rmap[i]);CHKERRQ(ierr);
2496       rmap_i = rmap[i];
2497       irow_i = irow[i];
2498       jmax   = nrow[i];
2499       for (j=0; j<jmax; j++) {
2500         rmap_i[irow_i[j]] = j;
2501       }
2502     }
2503 #endif
2504 
2505     /* Update lens from offproc data */
2506     {
2507     PetscInt *rbuf2_i,*rbuf3_i,*sbuf1_i;
2508 
2509     ierr    = MPI_Waitall(nrqs,r_waits3,r_status3);CHKERRQ(ierr);
2510     for (tmp2=0; tmp2<nrqs; tmp2++) {
2511       sbuf1_i = sbuf1[pa[tmp2]];
2512       jmax    = sbuf1_i[0];
2513       ct1     = 2*jmax+1;
2514       ct2     = 0;
2515       rbuf2_i = rbuf2[tmp2];
2516       rbuf3_i = rbuf3[tmp2];
2517       for (j=1; j<=jmax; j++) {
2518         is_no  = sbuf1_i[2*j-1];
2519         max1   = sbuf1_i[2*j];
2520         lens_i = lens[is_no];
2521         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
2522         rmap_i = rmap[is_no];
2523         for (k=0; k<max1; k++,ct1++) {
2524 #if defined(PETSC_USE_CTABLE)
2525           ierr = PetscTableFind(rmap_i,sbuf1_i[ct1]+1,&row);CHKERRQ(ierr);
2526           row--;
2527           if (row < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"row not found in table");
2528 #else
2529           row = rmap_i[sbuf1_i[ct1]]; /* the val in the new matrix to be */
2530 #endif
2531           max2 = rbuf2_i[ct1];
2532           for (l=0; l<max2; l++,ct2++) {
2533             if (!allcolumns[is_no]) {
2534 #if defined(PETSC_USE_CTABLE)
2535               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
2536 #else
2537               tcol = cmap_i[rbuf3_i[ct2]];
2538 #endif
2539               if (tcol) lens_i[row]++;
2540             } else { /* allcolumns */
2541               lens_i[row]++; /* lens_i[row] += max2 ? */
2542             }
2543           }
2544         }
2545       }
2546     }
2547     }
2548     ierr = PetscFree(r_status3);CHKERRQ(ierr);
2549     ierr = PetscFree(r_waits3);CHKERRQ(ierr);
2550     if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits3,s_status3);CHKERRQ(ierr);}
2551     ierr = PetscFree(s_status3);CHKERRQ(ierr);
2552     ierr = PetscFree(s_waits3);CHKERRQ(ierr);
2553 
2554     /* Create the submatrices */
2555     for (i=0; i<ismax; i++) {
2556       PetscInt    rbs,cbs;
2557 
2558       ierr = ISGetBlockSize(isrow[i],&rbs);CHKERRQ(ierr);
2559       ierr = ISGetBlockSize(iscol[i],&cbs);CHKERRQ(ierr);
2560 
2561       ierr = MatCreate(PETSC_COMM_SELF,submats+i);CHKERRQ(ierr);
2562       ierr = MatSetSizes(submats[i],nrow[i],ncol[i],PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
2563 
2564       ierr = MatSetBlockSizes(submats[i],rbs,cbs);CHKERRQ(ierr);
2565       ierr = MatSetType(submats[i],((PetscObject)A)->type_name);CHKERRQ(ierr);
2566       ierr = MatSeqAIJSetPreallocation(submats[i],0,lens[i]);CHKERRQ(ierr);
2567 
2568       /* create struct Mat_SubMat and attached it to submat */
2569       ierr = PetscNew(&smat_i);CHKERRQ(ierr);
2570       subc = (Mat_SeqAIJ*)submats[i]->data;
2571       subc->submatis1 = smat_i;
2572       smats[i]        = smat_i;
2573 
2574       smat_i->destroy          = submats[i]->ops->destroy;
2575       submats[i]->ops->destroy = MatDestroy_MPIAIJ_MatGetSubmatrices;
2576       submats[i]->factortype   = C->factortype;
2577 
2578       smat_i->id          = i;
2579       smat_i->nrqs        = nrqs;
2580       smat_i->nrqr        = nrqr;
2581       smat_i->rbuf1       = rbuf1;
2582       smat_i->rbuf2       = rbuf2;
2583       smat_i->rbuf3       = rbuf3;
2584       smat_i->sbuf2       = sbuf2;
2585       smat_i->req_source2 = req_source2;
2586 
2587       smat_i->sbuf1       = sbuf1;
2588       smat_i->ptr         = ptr;
2589       smat_i->tmp         = tmp;
2590       smat_i->ctr         = ctr;
2591 
2592       smat_i->pa           = pa;
2593       smat_i->req_size     = req_size;
2594       smat_i->req_source1  = req_source1;
2595 
2596       smat_i->allcolumns  = allcolumns[i];
2597       smat_i->row2proc    = row2proc[i];
2598       smat_i->rmap        = rmap[i];
2599       smat_i->cmap        = cmap[i];
2600     }
2601 
2602     if (ismax) {ierr = PetscFree(lens[0]);CHKERRQ(ierr);}
2603     ierr = PetscFree(lens);CHKERRQ(ierr);
2604     ierr = PetscFree(sbuf_aj[0]);CHKERRQ(ierr);
2605     ierr = PetscFree(sbuf_aj);CHKERRQ(ierr);
2606 
2607   } /* endof scall == MAT_INITIAL_MATRIX */
2608 
2609   /* Post recv matrix values */
2610   ierr = PetscObjectGetNewTag((PetscObject)C,&tag4);CHKERRQ(ierr);
2611   ierr = PetscMalloc1(nrqs+1,&rbuf4);CHKERRQ(ierr);
2612   ierr = PetscMalloc1(nrqs+1,&r_waits4);CHKERRQ(ierr);
2613   ierr = PetscMalloc1(nrqs+1,&r_status4);CHKERRQ(ierr);
2614   ierr = PetscMalloc1(nrqr+1,&s_status4);CHKERRQ(ierr);
2615   for (i=0; i<nrqs; ++i) {
2616     ierr = PetscMalloc1(rbuf2[i][0]+1,&rbuf4[i]);CHKERRQ(ierr);
2617     ierr = MPI_Irecv(rbuf4[i],rbuf2[i][0],MPIU_SCALAR,req_source2[i],tag4,comm,r_waits4+i);CHKERRQ(ierr);
2618   }
2619 
2620   /* Allocate sending buffers for a->a, and send them off */
2621   ierr = PetscMalloc1(nrqr+1,&sbuf_aa);CHKERRQ(ierr);
2622   for (i=0,j=0; i<nrqr; i++) j += req_size[i];
2623   ierr = PetscMalloc1(j+1,&sbuf_aa[0]);CHKERRQ(ierr);
2624   for (i=1; i<nrqr; i++) sbuf_aa[i] = sbuf_aa[i-1] + req_size[i-1];
2625 
2626   ierr = PetscMalloc1(nrqr+1,&s_waits4);CHKERRQ(ierr);
2627   {
2628     PetscInt    nzA,nzB,*a_i = a->i,*b_i = b->i, *cworkB,lwrite;
2629     PetscInt    cstart = C->cmap->rstart,rstart = C->rmap->rstart,*bmap = c->garray;
2630     PetscInt    cend   = C->cmap->rend;
2631     PetscInt    *b_j   = b->j;
2632     PetscScalar *vworkA,*vworkB,*a_a = a->a,*b_a = b->a;
2633 
2634     for (i=0; i<nrqr; i++) {
2635       rbuf1_i   = rbuf1[i];
2636       sbuf_aa_i = sbuf_aa[i];
2637       ct1       = 2*rbuf1_i[0]+1;
2638       ct2       = 0;
2639       for (j=1,max1=rbuf1_i[0]; j<=max1; j++) {
2640         kmax = rbuf1_i[2*j];
2641         for (k=0; k<kmax; k++,ct1++) {
2642           row    = rbuf1_i[ct1] - rstart;
2643           nzA    = a_i[row+1] - a_i[row];     nzB = b_i[row+1] - b_i[row];
2644           ncols  = nzA + nzB;
2645           cworkB = b_j + b_i[row];
2646           vworkA = a_a + a_i[row];
2647           vworkB = b_a + b_i[row];
2648 
2649           /* load the column values for this row into vals*/
2650           vals = sbuf_aa_i+ct2;
2651 
2652           lwrite = 0;
2653           for (l=0; l<nzB; l++) {
2654             if ((bmap[cworkB[l]]) < cstart) vals[lwrite++] = vworkB[l];
2655           }
2656           for (l=0; l<nzA; l++) vals[lwrite++] = vworkA[l];
2657           for (l=0; l<nzB; l++) {
2658             if ((bmap[cworkB[l]]) >= cend) vals[lwrite++] = vworkB[l];
2659           }
2660 
2661           ct2 += ncols;
2662         }
2663       }
2664       ierr = MPI_Isend(sbuf_aa_i,req_size[i],MPIU_SCALAR,req_source1[i],tag4,comm,s_waits4+i);CHKERRQ(ierr);
2665     }
2666   }
2667 
2668   if (!ismax) {
2669     ierr = PetscFree(rbuf1[0]);CHKERRQ(ierr);
2670     ierr = PetscFree(rbuf1);CHKERRQ(ierr);
2671   }
2672 
2673   /* Assemble the matrices */
2674   /* First assemble the local rows */
2675   {
2676     PetscInt    ilen_row,*imat_ilen,*imat_j,*imat_i,old_row;
2677     PetscScalar *imat_a;
2678 
2679     for (i=0; i<ismax; i++) {
2680       row2proc_i = row2proc[i];
2681       subc      = (Mat_SeqAIJ*)submats[i]->data;
2682       imat_ilen = subc->ilen;
2683       imat_j    = subc->j;
2684       imat_i    = subc->i;
2685       imat_a    = subc->a;
2686 
2687       if (!allcolumns[i]) cmap_i = cmap[i];
2688       rmap_i = rmap[i];
2689       irow_i = irow[i];
2690       jmax   = nrow[i];
2691       for (j=0; j<jmax; j++) {
2692         row  = irow_i[j];
2693         proc = row2proc_i[j];
2694         if (proc == rank) {
2695           old_row = row;
2696 #if defined(PETSC_USE_CTABLE)
2697           ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr);
2698           row--;
2699 #else
2700           row = rmap_i[row];
2701 #endif
2702           ilen_row = imat_ilen[row];
2703           ierr     = MatGetRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr);
2704           mat_i    = imat_i[row];
2705           mat_a    = imat_a + mat_i;
2706           mat_j    = imat_j + mat_i;
2707           if (!allcolumns[i]) {
2708             for (k=0; k<ncols; k++) {
2709 #if defined(PETSC_USE_CTABLE)
2710               ierr = PetscTableFind(cmap_i,cols[k]+1,&tcol);CHKERRQ(ierr);
2711 #else
2712               tcol = cmap_i[cols[k]];
2713 #endif
2714               if (tcol) {
2715                 *mat_j++ = tcol - 1;
2716                 *mat_a++ = vals[k];
2717                 ilen_row++;
2718               }
2719             }
2720           } else { /* allcolumns */
2721             for (k=0; k<ncols; k++) {
2722               *mat_j++ = cols[k];  /* global col index! */
2723               *mat_a++ = vals[k];
2724               ilen_row++;
2725             }
2726           }
2727           ierr = MatRestoreRow_MPIAIJ(C,old_row,&ncols,&cols,&vals);CHKERRQ(ierr);
2728 
2729           imat_ilen[row] = ilen_row;
2730         }
2731       }
2732     }
2733   }
2734 
2735   /*   Now assemble the off proc rows*/
2736   {
2737     PetscInt    *sbuf1_i,*rbuf2_i,*rbuf3_i,*imat_ilen,ilen;
2738     PetscInt    *imat_j,*imat_i;
2739     PetscScalar *imat_a,*rbuf4_i;
2740 
2741     ierr    = MPI_Waitall(nrqs,r_waits4,r_status4);CHKERRQ(ierr);
2742     for (tmp2=0; tmp2<nrqs; tmp2++) {
2743       sbuf1_i = sbuf1[pa[tmp2]];
2744       jmax    = sbuf1_i[0];
2745       ct1     = 2*jmax + 1;
2746       ct2     = 0;
2747       rbuf2_i = rbuf2[tmp2];
2748       rbuf3_i = rbuf3[tmp2];
2749       rbuf4_i = rbuf4[tmp2];
2750       for (j=1; j<=jmax; j++) {
2751         is_no     = sbuf1_i[2*j-1];
2752         rmap_i    = rmap[is_no];
2753         if (!allcolumns[is_no]) cmap_i = cmap[is_no];
2754         subc      = (Mat_SeqAIJ*)submats[is_no]->data;
2755         imat_ilen = subc->ilen;
2756         imat_j    = subc->j;
2757         imat_i    = subc->i;
2758         imat_a    = subc->a;
2759         max1      = sbuf1_i[2*j];
2760         for (k=0; k<max1; k++,ct1++) {
2761           row = sbuf1_i[ct1];
2762 #if defined(PETSC_USE_CTABLE)
2763           ierr = PetscTableFind(rmap_i,row+1,&row);CHKERRQ(ierr);
2764           row--;
2765 #else
2766           row = rmap_i[row];
2767 #endif
2768           ilen  = imat_ilen[row];
2769           mat_i = imat_i[row];
2770           mat_a = imat_a + mat_i;
2771           mat_j = imat_j + mat_i;
2772           max2  = rbuf2_i[ct1];
2773           if (!allcolumns[is_no]) {
2774             for (l=0; l<max2; l++,ct2++) {
2775 
2776 #if defined(PETSC_USE_CTABLE)
2777               ierr = PetscTableFind(cmap_i,rbuf3_i[ct2]+1,&tcol);CHKERRQ(ierr);
2778 #else
2779               tcol = cmap_i[rbuf3_i[ct2]];
2780 #endif
2781               if (tcol) {
2782                 *mat_j++ = tcol - 1;
2783                 *mat_a++ = rbuf4_i[ct2];
2784                 ilen++;
2785               }
2786             }
2787           } else { /* allcolumns */
2788             for (l=0; l<max2; l++,ct2++) {
2789               *mat_j++ = rbuf3_i[ct2]; /* same global column index of C */
2790               *mat_a++ = rbuf4_i[ct2];
2791               ilen++;
2792             }
2793           }
2794           imat_ilen[row] = ilen;
2795         }
2796       }
2797     }
2798   }
2799 
2800   ierr = PetscFree(r_status4);CHKERRQ(ierr);
2801   ierr = PetscFree(r_waits4);CHKERRQ(ierr);
2802   if (nrqr) {ierr = MPI_Waitall(nrqr,s_waits4,s_status4);CHKERRQ(ierr);}
2803   ierr = PetscFree(s_waits4);CHKERRQ(ierr);
2804   ierr = PetscFree(s_status4);CHKERRQ(ierr);
2805 
2806   /* Restore the indices */
2807   for (i=0; i<ismax; i++) {
2808     ierr = ISRestoreIndices(isrow[i],irow+i);CHKERRQ(ierr);
2809     if (!allcolumns[i]) {
2810       ierr = ISRestoreIndices(iscol[i],icol+i);CHKERRQ(ierr);
2811     }
2812   }
2813 
2814   for (i=0; i<ismax; i++) {
2815     ierr = MatAssemblyBegin(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2816     ierr = MatAssemblyEnd(submats[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2817   }
2818 
2819   /* Destroy allocated memory */
2820   if (!ismax) {
2821     ierr = PetscFree(pa);CHKERRQ(ierr);
2822 
2823     ierr = PetscFree4(sbuf1,ptr,tmp,ctr);CHKERRQ(ierr);
2824     for (i=0; i<nrqr; ++i) {
2825       ierr = PetscFree(sbuf2[i]);CHKERRQ(ierr);
2826     }
2827     for (i=0; i<nrqs; ++i) {
2828       ierr = PetscFree(rbuf3[i]);CHKERRQ(ierr);
2829     }
2830 
2831     ierr = PetscFree3(sbuf2,req_size,req_source1);CHKERRQ(ierr);
2832     ierr = PetscFree3(req_source2,rbuf2,rbuf3);CHKERRQ(ierr);
2833   }
2834 
2835   ierr = PetscFree(sbuf_aa[0]);CHKERRQ(ierr);
2836   ierr = PetscFree(sbuf_aa);CHKERRQ(ierr);
2837   ierr = PetscFree5(irow,icol,nrow,ncol,issorted);CHKERRQ(ierr);
2838 
2839   for (i=0; i<nrqs; ++i) {
2840     ierr = PetscFree(rbuf4[i]);CHKERRQ(ierr);
2841   }
2842   ierr = PetscFree(rbuf4);CHKERRQ(ierr);
2843 
2844   ierr = PetscFree2(cmap,rmap);CHKERRQ(ierr);
2845   ierr = PetscFree(row2proc);CHKERRQ(ierr);
2846   ierr = PetscFree(smats);CHKERRQ(ierr);
2847   PetscFunctionReturn(0);
2848 }
2849 
2850 /*
2851  Permute A & B into C's *local* index space using rowemb,dcolemb for A and rowemb,ocolemb for B.
2852  Embeddings are supposed to be injections and the above implies that the range of rowemb is a subset
2853  of [0,m), dcolemb is in [0,n) and ocolemb is in [N-n).
2854  If pattern == DIFFERENT_NONZERO_PATTERN, C is preallocated according to A&B.
2855  After that B's columns are mapped into C's global column space, so that C is in the "disassembled"
2856  state, and needs to be "assembled" later by compressing B's column space.
2857 
2858  This function may be called in lieu of preallocation, so C should not be expected to be preallocated.
2859  Following this call, C->A & C->B have been created, even if empty.
2860  */
2861 PetscErrorCode MatSetSeqMats_MPIAIJ(Mat C,IS rowemb,IS dcolemb,IS ocolemb,MatStructure pattern,Mat A,Mat B)
2862 {
2863   /* If making this function public, change the error returned in this function away from _PLIB. */
2864   PetscErrorCode ierr;
2865   Mat_MPIAIJ     *aij;
2866   Mat_SeqAIJ     *Baij;
2867   PetscBool      seqaij,Bdisassembled;
2868   PetscInt       m,n,*nz,i,j,ngcol,col,rstart,rend,shift,count;
2869   PetscScalar    v;
2870   const PetscInt *rowindices,*colindices;
2871 
2872   PetscFunctionBegin;
2873   /* Check to make sure the component matrices (and embeddings) are compatible with C. */
2874   if (A) {
2875     ierr = PetscObjectTypeCompare((PetscObject)A,MATSEQAIJ,&seqaij);CHKERRQ(ierr);
2876     if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diagonal matrix is of wrong type");
2877     if (rowemb) {
2878       ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr);
2879       if (m != A->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with diag matrix row size %D",m,A->rmap->n);
2880     } else {
2881       if (C->rmap->n != A->rmap->n) {
2882 	SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is row-incompatible with the MPIAIJ matrix");
2883       }
2884     }
2885     if (dcolemb) {
2886       ierr = ISGetLocalSize(dcolemb,&n);CHKERRQ(ierr);
2887       if (n != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag col IS of size %D is incompatible with diag matrix col size %D",n,A->cmap->n);
2888     } else {
2889       if (C->cmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Diag seq matrix is col-incompatible with the MPIAIJ matrix");
2890     }
2891   }
2892   if (B) {
2893     ierr = PetscObjectTypeCompare((PetscObject)B,MATSEQAIJ,&seqaij);CHKERRQ(ierr);
2894     if (!seqaij) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diagonal matrix is of wrong type");
2895     if (rowemb) {
2896       ierr = ISGetLocalSize(rowemb,&m);CHKERRQ(ierr);
2897       if (m != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Row IS of size %D is incompatible with off-diag matrix row size %D",m,A->rmap->n);
2898     } else {
2899       if (C->rmap->n != B->rmap->n) {
2900 	SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is row-incompatible with the MPIAIJ matrix");
2901       }
2902     }
2903     if (ocolemb) {
2904       ierr = ISGetLocalSize(ocolemb,&n);CHKERRQ(ierr);
2905       if (n != B->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag col IS of size %D is incompatible with off-diag matrix col size %D",n,B->cmap->n);
2906     } else {
2907       if (C->cmap->N - C->cmap->n != B->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Off-diag seq matrix is col-incompatible with the MPIAIJ matrix");
2908     }
2909   }
2910 
2911   aij    = (Mat_MPIAIJ*)(C->data);
2912   if (!aij->A) {
2913     /* Mimic parts of MatMPIAIJSetPreallocation() */
2914     ierr   = MatCreate(PETSC_COMM_SELF,&aij->A);CHKERRQ(ierr);
2915     ierr   = MatSetSizes(aij->A,C->rmap->n,C->cmap->n,C->rmap->n,C->cmap->n);CHKERRQ(ierr);
2916     ierr   = MatSetBlockSizesFromMats(aij->A,C,C);CHKERRQ(ierr);
2917     ierr   = MatSetType(aij->A,MATSEQAIJ);CHKERRQ(ierr);
2918     ierr   = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->A);CHKERRQ(ierr);
2919   }
2920   if (A) {
2921     ierr   = MatSetSeqMat_SeqAIJ(aij->A,rowemb,dcolemb,pattern,A);CHKERRQ(ierr);
2922   } else {
2923     ierr = MatSetUp(aij->A);CHKERRQ(ierr);
2924   }
2925   if (B) { /* Destroy the old matrix or the column map, depending on the sparsity pattern. */
2926     /*
2927       If pattern == DIFFERENT_NONZERO_PATTERN, we reallocate B and
2928       need to "disassemble" B -- convert it to using C's global indices.
2929       To insert the values we take the safer, albeit more expensive, route of MatSetValues().
2930 
2931       If pattern == SUBSET_NONZERO_PATTERN, we do not "disassemble" B and do not reallocate;
2932       we MatZeroValues(B) first, so there may be a bunch of zeros that, perhaps, could be compacted out.
2933 
2934       TODO: Put B's values into aij->B's aij structure in place using the embedding ISs?
2935       At least avoid calling MatSetValues() and the implied searches?
2936     */
2937 
2938     if (B && pattern == DIFFERENT_NONZERO_PATTERN) {
2939 #if defined(PETSC_USE_CTABLE)
2940       ierr = PetscTableDestroy(&aij->colmap);CHKERRQ(ierr);
2941 #else
2942       ierr = PetscFree(aij->colmap);CHKERRQ(ierr);
2943       /* A bit of a HACK: ideally we should deal with case aij->B all in one code block below. */
2944       if (aij->B) {
2945         ierr = PetscLogObjectMemory((PetscObject)C,-aij->B->cmap->n*sizeof(PetscInt));CHKERRQ(ierr);
2946       }
2947 #endif
2948       ngcol = 0;
2949       if (aij->lvec) {
2950 	ierr = VecGetSize(aij->lvec,&ngcol);CHKERRQ(ierr);
2951       }
2952       if (aij->garray) {
2953 	ierr = PetscFree(aij->garray);CHKERRQ(ierr);
2954 	ierr = PetscLogObjectMemory((PetscObject)C,-ngcol*sizeof(PetscInt));CHKERRQ(ierr);
2955       }
2956       ierr = VecDestroy(&aij->lvec);CHKERRQ(ierr);
2957       ierr = VecScatterDestroy(&aij->Mvctx);CHKERRQ(ierr);
2958     }
2959     if (aij->B && B && pattern == DIFFERENT_NONZERO_PATTERN) {
2960       ierr = MatDestroy(&aij->B);CHKERRQ(ierr);
2961     }
2962     if (aij->B && B && pattern == SUBSET_NONZERO_PATTERN) {
2963       ierr = MatZeroEntries(aij->B);CHKERRQ(ierr);
2964     }
2965   }
2966   Bdisassembled = PETSC_FALSE;
2967   if (!aij->B) {
2968     ierr = MatCreate(PETSC_COMM_SELF,&aij->B);CHKERRQ(ierr);
2969     ierr = PetscLogObjectParent((PetscObject)C,(PetscObject)aij->B);CHKERRQ(ierr);
2970     ierr = MatSetSizes(aij->B,C->rmap->n,C->cmap->N,C->rmap->n,C->cmap->N);CHKERRQ(ierr);
2971     ierr = MatSetBlockSizesFromMats(aij->B,B,B);CHKERRQ(ierr);
2972     ierr = MatSetType(aij->B,MATSEQAIJ);CHKERRQ(ierr);
2973     Bdisassembled = PETSC_TRUE;
2974   }
2975   if (B) {
2976     Baij = (Mat_SeqAIJ*)(B->data);
2977     if (pattern == DIFFERENT_NONZERO_PATTERN) {
2978       ierr = PetscMalloc1(B->rmap->n,&nz);CHKERRQ(ierr);
2979       for (i=0; i<B->rmap->n; i++) {
2980 	nz[i] = Baij->i[i+1] - Baij->i[i];
2981       }
2982       ierr = MatSeqAIJSetPreallocation(aij->B,0,nz);CHKERRQ(ierr);
2983       ierr = PetscFree(nz);CHKERRQ(ierr);
2984     }
2985 
2986     ierr  = PetscLayoutGetRange(C->rmap,&rstart,&rend);CHKERRQ(ierr);
2987     shift = rend-rstart;
2988     count = 0;
2989     rowindices = NULL;
2990     colindices = NULL;
2991     if (rowemb) {
2992       ierr = ISGetIndices(rowemb,&rowindices);CHKERRQ(ierr);
2993     }
2994     if (ocolemb) {
2995       ierr = ISGetIndices(ocolemb,&colindices);CHKERRQ(ierr);
2996     }
2997     for (i=0; i<B->rmap->n; i++) {
2998       PetscInt row;
2999       row = i;
3000       if (rowindices) row = rowindices[i];
3001       for (j=Baij->i[i]; j<Baij->i[i+1]; j++) {
3002 	col  = Baij->j[count];
3003 	if (colindices) col = colindices[col];
3004 	if (Bdisassembled && col>=rstart) col += shift;
3005 	v    = Baij->a[count];
3006 	ierr = MatSetValues(aij->B,1,&row,1,&col,&v,INSERT_VALUES);CHKERRQ(ierr);
3007 	++count;
3008       }
3009     }
3010     /* No assembly for aij->B is necessary. */
3011     /* FIXME: set aij->B's nonzerostate correctly. */
3012   } else {
3013     ierr = MatSetUp(aij->B);CHKERRQ(ierr);
3014   }
3015   C->preallocated  = PETSC_TRUE;
3016   C->was_assembled = PETSC_FALSE;
3017   C->assembled     = PETSC_FALSE;
3018    /*
3019       C will need to be assembled so that aij->B can be compressed into local form in MatSetUpMultiply_MPIAIJ().
3020       Furthermore, its nonzerostate will need to be based on that of aij->A's and aij->B's.
3021    */
3022   PetscFunctionReturn(0);
3023 }
3024 
3025 /*
3026   B uses local indices with column indices ranging between 0 and N-n; they  must be interpreted using garray.
3027  */
3028 PetscErrorCode MatGetSeqMats_MPIAIJ(Mat C,Mat *A,Mat *B)
3029 {
3030   Mat_MPIAIJ *aij = (Mat_MPIAIJ*) (C->data);
3031 
3032   PetscFunctionBegin;
3033   PetscValidPointer(A,2);
3034   PetscValidPointer(B,3);
3035   /* FIXME: make sure C is assembled */
3036   *A = aij->A;
3037   *B = aij->B;
3038   /* Note that we don't incref *A and *B, so be careful! */
3039   PetscFunctionReturn(0);
3040 }
3041 
3042 /*
3043   Extract MPI submatrices encoded by pairs of IS that may live on subcomms of C.
3044   NOT SCALABLE due to the use of ISGetNonlocalIS() (see below).
3045 */
3046 PetscErrorCode MatGetSubMatricesMPI_MPIXAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[],
3047                                                  PetscErrorCode(*getsubmats_seq)(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat**),
3048 					         PetscErrorCode(*getlocalmats)(Mat,Mat*,Mat*),
3049 					         PetscErrorCode(*setseqmat)(Mat,IS,IS,MatStructure,Mat),
3050 					         PetscErrorCode(*setseqmats)(Mat,IS,IS,IS,MatStructure,Mat,Mat))
3051 {
3052   PetscErrorCode ierr;
3053   PetscMPIInt    isize,flag;
3054   PetscInt       i,ii,cismax,ispar;
3055   Mat            *A,*B;
3056   IS             *isrow_p,*iscol_p,*cisrow,*ciscol,*ciscol_p;
3057 
3058   PetscFunctionBegin;
3059   if (!ismax) PetscFunctionReturn(0);
3060 
3061   for (i = 0, cismax = 0; i < ismax; ++i) {
3062     PetscMPIInt isize;
3063     ierr = MPI_Comm_compare(((PetscObject)isrow[i])->comm,((PetscObject)iscol[i])->comm,&flag);CHKERRQ(ierr);
3064     if (flag != MPI_IDENT) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Row and column index sets must have the same communicator");
3065     ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm, &isize);CHKERRQ(ierr);
3066     if (isize > 1) ++cismax;
3067   }
3068 
3069   /*
3070      If cismax is zero on all C's ranks, then and only then can we use purely sequential matrix extraction.
3071      ispar counts the number of parallel ISs across C's comm.
3072   */
3073   ierr = MPIU_Allreduce(&cismax,&ispar,1,MPIU_INT,MPI_MAX,PetscObjectComm((PetscObject)C));CHKERRQ(ierr);
3074   if (!ispar) { /* Sequential ISs only across C's comm, so can call the sequential matrix extraction subroutine. */
3075     ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,scall,submat);CHKERRQ(ierr);
3076     PetscFunctionReturn(0);
3077   }
3078 
3079   /* if (ispar) */
3080   /*
3081     Construct the "complements" -- the off-processor indices -- of the iscol ISs for parallel ISs only.
3082     These are used to extract the off-diag portion of the resulting parallel matrix.
3083     The row IS for the off-diag portion is the same as for the diag portion,
3084     so we merely alias (without increfing) the row IS, while skipping those that are sequential.
3085   */
3086   ierr = PetscMalloc2(cismax,&cisrow,cismax,&ciscol);CHKERRQ(ierr);
3087   ierr = PetscMalloc1(cismax,&ciscol_p);CHKERRQ(ierr);
3088   for (i = 0, ii = 0; i < ismax; ++i) {
3089     ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr);
3090     if (isize > 1) {
3091       /*
3092 	 TODO: This is the part that's ***NOT SCALABLE***.
3093 	 To fix this we need to extract just the indices of C's nonzero columns
3094 	 that lie on the intersection of isrow[i] and ciscol[ii] -- the nonlocal
3095 	 part of iscol[i] -- without actually computing ciscol[ii]. This also has
3096 	 to be done without serializing on the IS list, so, most likely, it is best
3097 	 done by rewriting MatGetSubMatrices_MPIAIJ() directly.
3098       */
3099       ierr = ISGetNonlocalIS(iscol[i],&(ciscol[ii]));CHKERRQ(ierr);
3100       /* Now we have to
3101 	 (a) make sure ciscol[ii] is sorted, since, even if the off-proc indices
3102 	     were sorted on each rank, concatenated they might no longer be sorted;
3103 	 (b) Use ISSortPermutation() to construct ciscol_p, the mapping from the
3104 	     indices in the nondecreasing order to the original index positions.
3105 	 If ciscol[ii] is strictly increasing, the permutation IS is NULL.
3106       */
3107       ierr = ISSortPermutation(ciscol[ii],PETSC_FALSE,ciscol_p+ii);CHKERRQ(ierr);
3108       ierr = ISSort(ciscol[ii]);CHKERRQ(ierr);
3109       ++ii;
3110     }
3111   }
3112   ierr = PetscMalloc2(ismax,&isrow_p,ismax,&iscol_p);CHKERRQ(ierr);
3113   for (i = 0, ii = 0; i < ismax; ++i) {
3114     PetscInt       j,issize;
3115     const PetscInt *indices;
3116 
3117     /*
3118        Permute the indices into a nondecreasing order. Reject row and col indices with duplicates.
3119      */
3120     ierr = ISSortPermutation(isrow[i],PETSC_FALSE,isrow_p+i);CHKERRQ(ierr);
3121     ierr = ISSort(isrow[i]);CHKERRQ(ierr);
3122     ierr = ISGetLocalSize(isrow[i],&issize);CHKERRQ(ierr);
3123     ierr = ISGetIndices(isrow[i],&indices);CHKERRQ(ierr);
3124     for (j = 1; j < issize; ++j) {
3125       if (indices[j] == indices[j-1]) {
3126 	SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in row IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]);
3127       }
3128     }
3129     ierr = ISRestoreIndices(isrow[i],&indices);CHKERRQ(ierr);
3130 
3131 
3132     ierr = ISSortPermutation(iscol[i],PETSC_FALSE,iscol_p+i);CHKERRQ(ierr);
3133     ierr = ISSort(iscol[i]);CHKERRQ(ierr);
3134     ierr = ISGetLocalSize(iscol[i],&issize);CHKERRQ(ierr);
3135     ierr = ISGetIndices(iscol[i],&indices);CHKERRQ(ierr);
3136     for (j = 1; j < issize; ++j) {
3137       if (indices[j-1] == indices[j]) {
3138 	SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Repeated indices in col IS %D: indices at %D and %D are both %D",i,j-1,j,indices[j]);
3139       }
3140     }
3141     ierr = ISRestoreIndices(iscol[i],&indices);CHKERRQ(ierr);
3142     ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr);
3143     if (isize > 1) {
3144       cisrow[ii] = isrow[i];
3145       ++ii;
3146     }
3147   }
3148   /*
3149     Allocate the necessary arrays to hold the resulting parallel matrices as well as the intermediate
3150     array of sequential matrices underlying the resulting parallel matrices.
3151     Which arrays to allocate is based on the value of MatReuse scall and whether ISs are sorted and/or
3152     contain duplicates.
3153 
3154     There are as many diag matrices as there are original index sets. There are only as many parallel
3155     and off-diag matrices, as there are parallel (comm size > 1) index sets.
3156 
3157     ARRAYS that can hold Seq matrices get allocated in any event -- either here or by getsubmats_seq():
3158     - If the array of MPI matrices already exists and is being reused, we need to allocate the array
3159       and extract the underlying seq matrices into it to serve as placeholders, into which getsubmats_seq
3160       will deposite the extracted diag and off-diag parts. Thus, we allocate the A&B arrays and fill them
3161       with A[i] and B[ii] extracted from the corresponding MPI submat.
3162     - However, if the rows, A's column indices or B's column indices are not sorted, the extracted A[i] & B[ii]
3163       will have a different order from what getsubmats_seq expects.  To handle this case -- indicated
3164       by a nonzero isrow_p[i], iscol_p[i], or ciscol_p[ii] -- we duplicate A[i] --> AA[i], B[ii] --> BB[ii]
3165       (retrieve composed AA[i] or BB[ii]) and reuse them here. AA[i] and BB[ii] are then used to permute its
3166       values into A[i] and B[ii] sitting inside the corresponding submat.
3167     - If no reuse is taking place then getsubmats_seq will allocate the A&B arrays and create the corresponding
3168       A[i], B[ii], AA[i] or BB[ii] matrices.
3169   */
3170   /* Parallel matrix array is allocated here only if no reuse is taking place. If reused, it is passed in by the caller. */
3171   if (scall == MAT_INITIAL_MATRIX) {
3172     ierr = PetscMalloc1(ismax,submat);CHKERRQ(ierr);
3173   }
3174 
3175   /* Now obtain the sequential A and B submatrices separately. */
3176   /* scall=MAT_REUSE_MATRIX is not handled yet, because getsubmats_seq() requires reuse of A and B */
3177   ierr = (*getsubmats_seq)(C,ismax,isrow,iscol,MAT_INITIAL_MATRIX,&A);CHKERRQ(ierr);
3178   ierr = (*getsubmats_seq)(C,cismax,cisrow,ciscol,MAT_INITIAL_MATRIX,&B);CHKERRQ(ierr);
3179 
3180   /*
3181     If scall == MAT_REUSE_MATRIX AND the permutations are NULL, we are done, since the sequential
3182     matrices A & B have been extracted directly into the parallel matrices containing them, or
3183     simply into the sequential matrix identical with the corresponding A (if isize == 1).
3184     Note that in that case colmap doesn't need to be rebuilt, since the matrices are expected
3185     to have the same sparsity pattern.
3186     Otherwise, A and/or B have to be properly embedded into C's index spaces and the correct colmap
3187     must be constructed for C. This is done by setseqmat(s).
3188   */
3189   for (i = 0, ii = 0; i < ismax; ++i) {
3190     /*
3191        TODO: cache ciscol, permutation ISs and maybe cisrow? What about isrow & iscol?
3192        That way we can avoid sorting and computing permutations when reusing.
3193        To this end:
3194         - remove the old cache, if it exists, when extracting submatrices with MAT_INITIAL_MATRIX
3195 	- if caching arrays to hold the ISs, make and compose a container for them so that it can
3196 	  be destroyed upon destruction of C (use PetscContainerUserDestroy() to clear out the contents).
3197     */
3198     MatStructure pattern;
3199     pattern = DIFFERENT_NONZERO_PATTERN;
3200 
3201     ierr = MPI_Comm_size(((PetscObject)isrow[i])->comm,&isize);CHKERRQ(ierr);
3202     /* Construct submat[i] from the Seq pieces A (and B, if necessary). */
3203     if (isize > 1) {
3204       if (scall == MAT_INITIAL_MATRIX) {
3205 	ierr = MatCreate(((PetscObject)isrow[i])->comm,(*submat)+i);CHKERRQ(ierr);
3206 	ierr = MatSetSizes((*submat)[i],A[i]->rmap->n,A[i]->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
3207 	ierr = MatSetType((*submat)[i],MATMPIAIJ);CHKERRQ(ierr);
3208 	ierr = PetscLayoutSetUp((*submat)[i]->rmap);CHKERRQ(ierr);
3209 	ierr = PetscLayoutSetUp((*submat)[i]->cmap);CHKERRQ(ierr);
3210       }
3211       /*
3212 	For each parallel isrow[i], insert the extracted sequential matrices into the parallel matrix.
3213       */
3214       {
3215 	Mat AA,BB;
3216         AA = A[i];
3217         BB = B[ii];
3218 	if (AA || BB) {
3219 	  ierr = setseqmats((*submat)[i],isrow_p[i],iscol_p[i],ciscol_p[ii],pattern,AA,BB);CHKERRQ(ierr);
3220 	  ierr = MatAssemblyBegin((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3221 	  ierr = MatAssemblyEnd((*submat)[i],MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3222 	}
3223 
3224         ierr = MatDestroy(&AA);CHKERRQ(ierr);
3225         ierr = MatDestroy(&BB);CHKERRQ(ierr);
3226       }
3227       ierr = ISDestroy(ciscol+ii);CHKERRQ(ierr);
3228       ierr = ISDestroy(ciscol_p+ii);CHKERRQ(ierr);
3229       ++ii;
3230     } else { /* if (isize == 1) */
3231       if (scall == MAT_REUSE_MATRIX) {
3232         ierr = MatDestroy(&(*submat)[i]);CHKERRQ(ierr);
3233       }
3234       if (isrow_p[i] || iscol_p[i]) {
3235         ierr = MatDuplicate(A[i],MAT_DO_NOT_COPY_VALUES,(*submat)+i);CHKERRQ(ierr);
3236         ierr = setseqmat((*submat)[i],isrow_p[i],iscol_p[i],pattern,A[i]);CHKERRQ(ierr);
3237 	/* Otherwise A is extracted straight into (*submats)[i]. */
3238 	/* TODO: Compose A[i] on (*submat([i] for future use, if ((isrow_p[i] || iscol_p[i]) && MAT_INITIAL_MATRIX). */
3239 	ierr = MatDestroy(A+i);CHKERRQ(ierr);
3240       } else (*submat)[i] = A[i];
3241     }
3242     ierr = ISDestroy(&isrow_p[i]);CHKERRQ(ierr);
3243     ierr = ISDestroy(&iscol_p[i]);CHKERRQ(ierr);
3244   }
3245   ierr = PetscFree2(cisrow,ciscol);CHKERRQ(ierr);
3246   ierr = PetscFree2(isrow_p,iscol_p);CHKERRQ(ierr);
3247   ierr = PetscFree(ciscol_p);CHKERRQ(ierr);
3248   ierr = PetscFree(A);CHKERRQ(ierr);
3249   ierr = PetscFree(B);CHKERRQ(ierr);
3250   PetscFunctionReturn(0);
3251 }
3252 
3253 PetscErrorCode MatGetSubMatricesMPI_MPIAIJ(Mat C,PetscInt ismax,const IS isrow[],const IS iscol[],MatReuse scall,Mat *submat[])
3254 {
3255   PetscErrorCode ierr;
3256 
3257   PetscFunctionBegin;
3258   ierr = MatGetSubMatricesMPI_MPIXAIJ(C,ismax,isrow,iscol,scall,submat,MatGetSubMatrices_MPIAIJ,MatGetSeqMats_MPIAIJ,MatSetSeqMat_SeqAIJ,MatSetSeqMats_MPIAIJ);CHKERRQ(ierr);
3259   PetscFunctionReturn(0);
3260 }
3261