xref: /petsc/src/mat/impls/aij/mpi/mpiaij.c (revision e106eecf280d2eadc5a817a2dc6d6d2b72b33833)
1 #define PETSCMAT_DLL
2 
3 #include "../src/mat/impls/aij/mpi/mpiaij.h"   /*I "petscmat.h" I*/
4 #include "../src/inline/spops.h"
5 
6 #undef __FUNCT__
7 #define __FUNCT__ "MatDistribute_MPIAIJ"
8 /*
9     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
10     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
11 
12     Only for square matrices
13 */
14 PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
15 {
16   PetscMPIInt    rank,size;
17   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz,*gmataj,cnt,row,*ld;
18   PetscErrorCode ierr;
19   Mat            mat;
20   Mat_SeqAIJ     *gmata;
21   PetscMPIInt    tag;
22   MPI_Status     status;
23   PetscTruth     aij;
24   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;
25 
26   PetscFunctionBegin;
27   CHKMEMQ;
28   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
29   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
30   if (!rank) {
31     ierr = PetscTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);CHKERRQ(ierr);
32     if (!aij) SETERRQ1(PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
33   }
34   if (reuse == MAT_INITIAL_MATRIX) {
35     ierr = MatCreate(comm,&mat);CHKERRQ(ierr);
36     ierr = MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
37     ierr = MatSetType(mat,MATAIJ);CHKERRQ(ierr);
38     ierr = PetscMalloc((size+1)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
39     ierr = PetscMalloc2(m,PetscInt,&dlens,m,PetscInt,&olens);CHKERRQ(ierr);
40     ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
41     rowners[0] = 0;
42     for (i=2; i<=size; i++) {
43       rowners[i] += rowners[i-1];
44     }
45     rstart = rowners[rank];
46     rend   = rowners[rank+1];
47     ierr   = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
48     if (!rank) {
49       gmata = (Mat_SeqAIJ*) gmat->data;
50       /* send row lengths to all processors */
51       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
52       for (i=1; i<size; i++) {
53 	ierr = MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr);
54       }
55       /* determine number diagonal and off-diagonal counts */
56       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
57       ierr = PetscMalloc(m*sizeof(PetscInt),&ld);CHKERRQ(ierr);
58       ierr = PetscMemzero(ld,m*sizeof(PetscInt));CHKERRQ(ierr);
59       jj = 0;
60       for (i=0; i<m; i++) {
61 	for (j=0; j<dlens[i]; j++) {
62           if (gmata->j[jj] < rstart) ld[i]++;
63 	  if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
64 	  jj++;
65 	}
66       }
67       /* send column indices to other processes */
68       for (i=1; i<size; i++) {
69 	nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
70 	ierr = MPI_Send(&nz,1,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
71 	ierr = MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
72       }
73 
74       /* send numerical values to other processes */
75       for (i=1; i<size; i++) {
76         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
77         ierr = MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
78       }
79       gmataa = gmata->a;
80       gmataj = gmata->j;
81 
82     } else {
83       /* receive row lengths */
84       ierr = MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
85       /* receive column indices */
86       ierr = MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
87       ierr = PetscMalloc2(nz,PetscScalar,&gmataa,nz,PetscInt,&gmataj);CHKERRQ(ierr);
88       ierr = MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
89       /* determine number diagonal and off-diagonal counts */
90       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
91       ierr = PetscMalloc(m*sizeof(PetscInt),&ld);CHKERRQ(ierr);
92       ierr = PetscMemzero(ld,m*sizeof(PetscInt));CHKERRQ(ierr);
93       jj = 0;
94       for (i=0; i<m; i++) {
95 	for (j=0; j<dlens[i]; j++) {
96           if (gmataj[jj] < rstart) ld[i]++;
97 	  if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
98 	  jj++;
99 	}
100       }
101       /* receive numerical values */
102       ierr = PetscMemzero(gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr);
103       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
104     }
105     /* set preallocation */
106     for (i=0; i<m; i++) {
107       dlens[i] -= olens[i];
108     }
109     ierr = MatSeqAIJSetPreallocation(mat,0,dlens);CHKERRQ(ierr);
110     ierr = MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);CHKERRQ(ierr);
111 
112     for (i=0; i<m; i++) {
113       dlens[i] += olens[i];
114     }
115     cnt  = 0;
116     for (i=0; i<m; i++) {
117       row  = rstart + i;
118       ierr = MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);CHKERRQ(ierr);
119       cnt += dlens[i];
120     }
121     if (rank) {
122       ierr = PetscFree2(gmataa,gmataj);CHKERRQ(ierr);
123     }
124     ierr = PetscFree2(dlens,olens);CHKERRQ(ierr);
125     ierr = PetscFree(rowners);CHKERRQ(ierr);
126     ((Mat_MPIAIJ*)(mat->data))->ld = ld;
127     *inmat = mat;
128   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
129     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
130     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
131     mat   = *inmat;
132     ierr  = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
133     if (!rank) {
134       /* send numerical values to other processes */
135       gmata = (Mat_SeqAIJ*) gmat->data;
136       ierr   = MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);CHKERRQ(ierr);
137       gmataa = gmata->a;
138       for (i=1; i<size; i++) {
139         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
140         ierr = MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
141       }
142       nz   = gmata->i[rowners[1]]-gmata->i[rowners[0]];
143     } else {
144       /* receive numerical values from process 0*/
145       nz   = Ad->nz + Ao->nz;
146       ierr = PetscMalloc(nz*sizeof(PetscScalar),&gmataa);CHKERRQ(ierr); gmataarestore = gmataa;
147       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
148     }
149     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
150     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
151     ad = Ad->a;
152     ao = Ao->a;
153     if (mat->rmap->n) {
154       i  = 0;
155       nz = ld[i];                                   ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
156       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
157     }
158     for (i=1; i<mat->rmap->n; i++) {
159       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
160       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
161     }
162     i--;
163     if (mat->rmap->n) {
164       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
165     }
166     if (rank) {
167       ierr = PetscFree(gmataarestore);CHKERRQ(ierr);
168     }
169   }
170   ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
171   ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
172   CHKMEMQ;
173   PetscFunctionReturn(0);
174 }
175 
176 /*
177   Local utility routine that creates a mapping from the global column
178 number to the local number in the off-diagonal part of the local
179 storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
180 a slightly higher hash table cost; without it it is not scalable (each processor
181 has an order N integer array but is fast to acess.
182 */
183 #undef __FUNCT__
184 #define __FUNCT__ "CreateColmap_MPIAIJ_Private"
185 PetscErrorCode CreateColmap_MPIAIJ_Private(Mat mat)
186 {
187   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
188   PetscErrorCode ierr;
189   PetscInt       n = aij->B->cmap->n,i;
190 
191   PetscFunctionBegin;
192 #if defined (PETSC_USE_CTABLE)
193   ierr = PetscTableCreate(n,&aij->colmap);CHKERRQ(ierr);
194   for (i=0; i<n; i++){
195     ierr = PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1);CHKERRQ(ierr);
196   }
197 #else
198   ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscInt),&aij->colmap);CHKERRQ(ierr);
199   ierr = PetscLogObjectMemory(mat,mat->cmap->N*sizeof(PetscInt));CHKERRQ(ierr);
200   ierr = PetscMemzero(aij->colmap,mat->cmap->N*sizeof(PetscInt));CHKERRQ(ierr);
201   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
202 #endif
203   PetscFunctionReturn(0);
204 }
205 
206 
207 #define CHUNKSIZE   15
208 #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv) \
209 { \
210     if (col <= lastcol1) low1 = 0; else high1 = nrow1; \
211     lastcol1 = col;\
212     while (high1-low1 > 5) { \
213       t = (low1+high1)/2; \
214       if (rp1[t] > col) high1 = t; \
215       else             low1  = t; \
216     } \
217       for (_i=low1; _i<high1; _i++) { \
218         if (rp1[_i] > col) break; \
219         if (rp1[_i] == col) { \
220           if (addv == ADD_VALUES) ap1[_i] += value;   \
221           else                    ap1[_i] = value; \
222           goto a_noinsert; \
223         } \
224       }  \
225       if (value == 0.0 && ignorezeroentries) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
226       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}		\
227       if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
228       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
229       N = nrow1++ - 1; a->nz++; high1++; \
230       /* shift up all the later entries in this row */ \
231       for (ii=N; ii>=_i; ii--) { \
232         rp1[ii+1] = rp1[ii]; \
233         ap1[ii+1] = ap1[ii]; \
234       } \
235       rp1[_i] = col;  \
236       ap1[_i] = value;  \
237       a_noinsert: ; \
238       ailen[row] = nrow1; \
239 }
240 
241 
242 #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv) \
243 { \
244     if (col <= lastcol2) low2 = 0; else high2 = nrow2; \
245     lastcol2 = col;\
246     while (high2-low2 > 5) { \
247       t = (low2+high2)/2; \
248       if (rp2[t] > col) high2 = t; \
249       else             low2  = t; \
250     } \
251     for (_i=low2; _i<high2; _i++) {		\
252       if (rp2[_i] > col) break;			\
253       if (rp2[_i] == col) {			      \
254 	if (addv == ADD_VALUES) ap2[_i] += value;     \
255 	else                    ap2[_i] = value;      \
256 	goto b_noinsert;			      \
257       }						      \
258     }							      \
259     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
260     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}		\
261     if (nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%D, %D) into matrix", row, col); \
262     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
263     N = nrow2++ - 1; b->nz++; high2++;					\
264     /* shift up all the later entries in this row */			\
265     for (ii=N; ii>=_i; ii--) {						\
266       rp2[ii+1] = rp2[ii];						\
267       ap2[ii+1] = ap2[ii];						\
268     }									\
269     rp2[_i] = col;							\
270     ap2[_i] = value;							\
271     b_noinsert: ;								\
272     bilen[row] = nrow2;							\
273 }
274 
275 #undef __FUNCT__
276 #define __FUNCT__ "MatSetValuesRow_MPIAIJ"
277 PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
278 {
279   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
280   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
281   PetscErrorCode ierr;
282   PetscInt       l,*garray = mat->garray,diag;
283 
284   PetscFunctionBegin;
285   /* code only works for square matrices A */
286 
287   /* find size of row to the left of the diagonal part */
288   ierr = MatGetOwnershipRange(A,&diag,0);CHKERRQ(ierr);
289   row  = row - diag;
290   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
291     if (garray[b->j[b->i[row]+l]] > diag) break;
292   }
293   ierr = PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));CHKERRQ(ierr);
294 
295   /* diagonal part */
296   ierr = PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));CHKERRQ(ierr);
297 
298   /* right of diagonal part */
299   ierr = PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));CHKERRQ(ierr);
300   PetscFunctionReturn(0);
301 }
302 
303 #undef __FUNCT__
304 #define __FUNCT__ "MatSetValues_MPIAIJ"
305 PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
306 {
307   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
308   PetscScalar    value;
309   PetscErrorCode ierr;
310   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
311   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
312   PetscTruth     roworiented = aij->roworiented;
313 
314   /* Some Variables required in the macro */
315   Mat            A = aij->A;
316   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
317   PetscInt       *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
318   MatScalar      *aa = a->a;
319   PetscTruth     ignorezeroentries = a->ignorezeroentries;
320   Mat            B = aij->B;
321   Mat_SeqAIJ     *b = (Mat_SeqAIJ*)B->data;
322   PetscInt       *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
323   MatScalar      *ba = b->a;
324 
325   PetscInt       *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
326   PetscInt       nonew = a->nonew;
327   MatScalar      *ap1,*ap2;
328 
329   PetscFunctionBegin;
330   for (i=0; i<m; i++) {
331     if (im[i] < 0) continue;
332 #if defined(PETSC_USE_DEBUG)
333     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
334 #endif
335     if (im[i] >= rstart && im[i] < rend) {
336       row      = im[i] - rstart;
337       lastcol1 = -1;
338       rp1      = aj + ai[row];
339       ap1      = aa + ai[row];
340       rmax1    = aimax[row];
341       nrow1    = ailen[row];
342       low1     = 0;
343       high1    = nrow1;
344       lastcol2 = -1;
345       rp2      = bj + bi[row];
346       ap2      = ba + bi[row];
347       rmax2    = bimax[row];
348       nrow2    = bilen[row];
349       low2     = 0;
350       high2    = nrow2;
351 
352       for (j=0; j<n; j++) {
353         if (v) {if (roworiented) value = v[i*n+j]; else value = v[i+j*m];} else value = 0.0;
354         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
355         if (in[j] >= cstart && in[j] < cend){
356           col = in[j] - cstart;
357           MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
358         } else if (in[j] < 0) continue;
359 #if defined(PETSC_USE_DEBUG)
360         else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
361 #endif
362         else {
363           if (mat->was_assembled) {
364             if (!aij->colmap) {
365               ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
366             }
367 #if defined (PETSC_USE_CTABLE)
368             ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
369 	    col--;
370 #else
371             col = aij->colmap[in[j]] - 1;
372 #endif
373             if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
374               ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
375               col =  in[j];
376               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
377               B = aij->B;
378               b = (Mat_SeqAIJ*)B->data;
379               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
380               rp2      = bj + bi[row];
381               ap2      = ba + bi[row];
382               rmax2    = bimax[row];
383               nrow2    = bilen[row];
384               low2     = 0;
385               high2    = nrow2;
386               bm       = aij->B->rmap->n;
387               ba = b->a;
388             }
389           } else col = in[j];
390           MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
391         }
392       }
393     } else {
394       if (!aij->donotstash) {
395         if (roworiented) {
396           if (ignorezeroentries && v[i*n] == 0.0) continue;
397           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
398         } else {
399           if (ignorezeroentries && v[i] == 0.0) continue;
400           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
401         }
402       }
403     }
404   }
405   PetscFunctionReturn(0);
406 }
407 
408 #undef __FUNCT__
409 #define __FUNCT__ "MatGetValues_MPIAIJ"
410 PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
411 {
412   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
413   PetscErrorCode ierr;
414   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
415   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
416 
417   PetscFunctionBegin;
418   for (i=0; i<m; i++) {
419     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
420     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
421     if (idxm[i] >= rstart && idxm[i] < rend) {
422       row = idxm[i] - rstart;
423       for (j=0; j<n; j++) {
424         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
425         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
426         if (idxn[j] >= cstart && idxn[j] < cend){
427           col = idxn[j] - cstart;
428           ierr = MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
429         } else {
430           if (!aij->colmap) {
431             ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
432           }
433 #if defined (PETSC_USE_CTABLE)
434           ierr = PetscTableFind(aij->colmap,idxn[j]+1,&col);CHKERRQ(ierr);
435           col --;
436 #else
437           col = aij->colmap[idxn[j]] - 1;
438 #endif
439           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
440           else {
441             ierr = MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
442           }
443         }
444       }
445     } else {
446       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
447     }
448   }
449   PetscFunctionReturn(0);
450 }
451 
452 #undef __FUNCT__
453 #define __FUNCT__ "MatAssemblyBegin_MPIAIJ"
454 PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
455 {
456   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
457   PetscErrorCode ierr;
458   PetscInt       nstash,reallocs;
459   InsertMode     addv;
460 
461   PetscFunctionBegin;
462   if (aij->donotstash) {
463     PetscFunctionReturn(0);
464   }
465 
466   /* make sure all processors are either in INSERTMODE or ADDMODE */
467   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,((PetscObject)mat)->comm);CHKERRQ(ierr);
468   if (addv == (ADD_VALUES|INSERT_VALUES)) {
469     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
470   }
471   mat->insertmode = addv; /* in case this processor had no cache */
472 
473   ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr);
474   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
475   ierr = PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
476   PetscFunctionReturn(0);
477 }
478 
479 #undef __FUNCT__
480 #define __FUNCT__ "MatAssemblyEnd_MPIAIJ"
481 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
482 {
483   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
484   Mat_SeqAIJ     *a=(Mat_SeqAIJ *)aij->A->data;
485   PetscErrorCode ierr;
486   PetscMPIInt    n;
487   PetscInt       i,j,rstart,ncols,flg;
488   PetscInt       *row,*col;
489   PetscTruth     other_disassembled;
490   PetscScalar    *val;
491   InsertMode     addv = mat->insertmode;
492 
493   /* do not use 'b = (Mat_SeqAIJ *)aij->B->data' as B can be reset in disassembly */
494   PetscFunctionBegin;
495   if (!aij->donotstash) {
496     while (1) {
497       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
498       if (!flg) break;
499 
500       for (i=0; i<n;) {
501         /* Now identify the consecutive vals belonging to the same row */
502         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
503         if (j < n) ncols = j-i;
504         else       ncols = n-i;
505         /* Now assemble all these values with a single function call */
506         ierr = MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
507         i = j;
508       }
509     }
510     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
511   }
512   a->compressedrow.use     = PETSC_FALSE;
513   ierr = MatAssemblyBegin(aij->A,mode);CHKERRQ(ierr);
514   ierr = MatAssemblyEnd(aij->A,mode);CHKERRQ(ierr);
515 
516   /* determine if any processor has disassembled, if so we must
517      also disassemble ourselfs, in order that we may reassemble. */
518   /*
519      if nonzero structure of submatrix B cannot change then we know that
520      no processor disassembled thus we can skip this stuff
521   */
522   if (!((Mat_SeqAIJ*)aij->B->data)->nonew)  {
523     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,((PetscObject)mat)->comm);CHKERRQ(ierr);
524     if (mat->was_assembled && !other_disassembled) {
525       ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
526     }
527   }
528   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
529     ierr = MatSetUpMultiply_MPIAIJ(mat);CHKERRQ(ierr);
530   }
531   ierr = MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);CHKERRQ(ierr);
532   ((Mat_SeqAIJ *)aij->B->data)->compressedrow.use = PETSC_TRUE; /* b->compressedrow.use */
533   ierr = MatAssemblyBegin(aij->B,mode);CHKERRQ(ierr);
534   ierr = MatAssemblyEnd(aij->B,mode);CHKERRQ(ierr);
535 
536   ierr = PetscFree(aij->rowvalues);CHKERRQ(ierr);
537   aij->rowvalues = 0;
538 
539   /* used by MatAXPY() */
540   a->xtoy = 0; ((Mat_SeqAIJ *)aij->B->data)->xtoy = 0;  /* b->xtoy = 0 */
541   a->XtoY = 0; ((Mat_SeqAIJ *)aij->B->data)->XtoY = 0;  /* b->XtoY = 0 */
542 
543   PetscFunctionReturn(0);
544 }
545 
546 #undef __FUNCT__
547 #define __FUNCT__ "MatZeroEntries_MPIAIJ"
548 PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
549 {
550   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;
551   PetscErrorCode ierr;
552 
553   PetscFunctionBegin;
554   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
555   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
556   PetscFunctionReturn(0);
557 }
558 
559 #undef __FUNCT__
560 #define __FUNCT__ "MatZeroRows_MPIAIJ"
561 PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag)
562 {
563   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;
564   PetscErrorCode ierr;
565   PetscMPIInt    size = l->size,imdex,n,rank = l->rank,tag = ((PetscObject)A)->tag,lastidx = -1;
566   PetscInt       i,*owners = A->rmap->range;
567   PetscInt       *nprocs,j,idx,nsends,row;
568   PetscInt       nmax,*svalues,*starts,*owner,nrecvs;
569   PetscInt       *rvalues,count,base,slen,*source;
570   PetscInt       *lens,*lrows,*values,rstart=A->rmap->rstart;
571   MPI_Comm       comm = ((PetscObject)A)->comm;
572   MPI_Request    *send_waits,*recv_waits;
573   MPI_Status     recv_status,*send_status;
574 #if defined(PETSC_DEBUG)
575   PetscTruth     found = PETSC_FALSE;
576 #endif
577 
578   PetscFunctionBegin;
579   /*  first count number of contributors to each processor */
580   ierr = PetscMalloc(2*size*sizeof(PetscInt),&nprocs);CHKERRQ(ierr);
581   ierr = PetscMemzero(nprocs,2*size*sizeof(PetscInt));CHKERRQ(ierr);
582   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&owner);CHKERRQ(ierr); /* see note*/
583   j = 0;
584   for (i=0; i<N; i++) {
585     if (lastidx > (idx = rows[i])) j = 0;
586     lastidx = idx;
587     for (; j<size; j++) {
588       if (idx >= owners[j] && idx < owners[j+1]) {
589         nprocs[2*j]++;
590         nprocs[2*j+1] = 1;
591         owner[i] = j;
592 #if defined(PETSC_DEBUG)
593         found = PETSC_TRUE;
594 #endif
595         break;
596       }
597     }
598 #if defined(PETSC_DEBUG)
599     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
600     found = PETSC_FALSE;
601 #endif
602   }
603   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
604 
605   /* inform other processors of number of messages and max length*/
606   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
607 
608   /* post receives:   */
609   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(PetscInt),&rvalues);CHKERRQ(ierr);
610   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
611   for (i=0; i<nrecvs; i++) {
612     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPIU_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
613   }
614 
615   /* do sends:
616       1) starts[i] gives the starting index in svalues for stuff going to
617          the ith processor
618   */
619   ierr = PetscMalloc((N+1)*sizeof(PetscInt),&svalues);CHKERRQ(ierr);
620   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
621   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&starts);CHKERRQ(ierr);
622   starts[0] = 0;
623   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
624   for (i=0; i<N; i++) {
625     svalues[starts[owner[i]]++] = rows[i];
626   }
627 
628   starts[0] = 0;
629   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
630   count = 0;
631   for (i=0; i<size; i++) {
632     if (nprocs[2*i+1]) {
633       ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPIU_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
634     }
635   }
636   ierr = PetscFree(starts);CHKERRQ(ierr);
637 
638   base = owners[rank];
639 
640   /*  wait on receives */
641   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(PetscInt),&lens);CHKERRQ(ierr);
642   source = lens + nrecvs;
643   count  = nrecvs; slen = 0;
644   while (count) {
645     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
646     /* unpack receives into our local space */
647     ierr = MPI_Get_count(&recv_status,MPIU_INT,&n);CHKERRQ(ierr);
648     source[imdex]  = recv_status.MPI_SOURCE;
649     lens[imdex]    = n;
650     slen          += n;
651     count--;
652   }
653   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
654 
655   /* move the data into the send scatter */
656   ierr = PetscMalloc((slen+1)*sizeof(PetscInt),&lrows);CHKERRQ(ierr);
657   count = 0;
658   for (i=0; i<nrecvs; i++) {
659     values = rvalues + i*nmax;
660     for (j=0; j<lens[i]; j++) {
661       lrows[count++] = values[j] - base;
662     }
663   }
664   ierr = PetscFree(rvalues);CHKERRQ(ierr);
665   ierr = PetscFree(lens);CHKERRQ(ierr);
666   ierr = PetscFree(owner);CHKERRQ(ierr);
667   ierr = PetscFree(nprocs);CHKERRQ(ierr);
668 
669   /* actually zap the local rows */
670   /*
671         Zero the required rows. If the "diagonal block" of the matrix
672      is square and the user wishes to set the diagonal we use separate
673      code so that MatSetValues() is not called for each diagonal allocating
674      new memory, thus calling lots of mallocs and slowing things down.
675 
676        Contributed by: Matthew Knepley
677   */
678   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
679   ierr = MatZeroRows(l->B,slen,lrows,0.0);CHKERRQ(ierr);
680   if ((diag != 0.0) && (l->A->rmap->N == l->A->cmap->N)) {
681     ierr      = MatZeroRows(l->A,slen,lrows,diag);CHKERRQ(ierr);
682   } else if (diag != 0.0) {
683     ierr = MatZeroRows(l->A,slen,lrows,0.0);CHKERRQ(ierr);
684     if (((Mat_SeqAIJ*)l->A->data)->nonew) {
685       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options\n\
686 MAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
687     }
688     for (i = 0; i < slen; i++) {
689       row  = lrows[i] + rstart;
690       ierr = MatSetValues(A,1,&row,1,&row,&diag,INSERT_VALUES);CHKERRQ(ierr);
691     }
692     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
693     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
694   } else {
695     ierr = MatZeroRows(l->A,slen,lrows,0.0);CHKERRQ(ierr);
696   }
697   ierr = PetscFree(lrows);CHKERRQ(ierr);
698 
699   /* wait on sends */
700   if (nsends) {
701     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
702     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
703     ierr = PetscFree(send_status);CHKERRQ(ierr);
704   }
705   ierr = PetscFree(send_waits);CHKERRQ(ierr);
706   ierr = PetscFree(svalues);CHKERRQ(ierr);
707 
708   PetscFunctionReturn(0);
709 }
710 
711 #undef __FUNCT__
712 #define __FUNCT__ "MatMult_MPIAIJ"
713 PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
714 {
715   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
716   PetscErrorCode ierr;
717   PetscInt       nt;
718 
719   PetscFunctionBegin;
720   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
721   if (nt != A->cmap->n) {
722     SETERRQ2(PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
723   }
724   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
725   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
726   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
727   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
728   PetscFunctionReturn(0);
729 }
730 
731 #undef __FUNCT__
732 #define __FUNCT__ "MatMultAdd_MPIAIJ"
733 PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
734 {
735   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
736   PetscErrorCode ierr;
737 
738   PetscFunctionBegin;
739   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
740   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
741   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
742   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
743   PetscFunctionReturn(0);
744 }
745 
746 #undef __FUNCT__
747 #define __FUNCT__ "MatMultTranspose_MPIAIJ"
748 PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
749 {
750   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
751   PetscErrorCode ierr;
752   PetscTruth     merged;
753 
754   PetscFunctionBegin;
755   ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr);
756   /* do nondiagonal part */
757   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
758   if (!merged) {
759     /* send it on its way */
760     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
761     /* do local part */
762     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
763     /* receive remote parts: note this assumes the values are not actually */
764     /* added in yy until the next line, */
765     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
766   } else {
767     /* do local part */
768     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
769     /* send it on its way */
770     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
771     /* values actually were received in the Begin() but we need to call this nop */
772     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
773   }
774   PetscFunctionReturn(0);
775 }
776 
777 EXTERN_C_BEGIN
778 #undef __FUNCT__
779 #define __FUNCT__ "MatIsTranspose_MPIAIJ"
780 PetscErrorCode PETSCMAT_DLLEXPORT MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscTruth *f)
781 {
782   MPI_Comm       comm;
783   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ *) Amat->data, *Bij;
784   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
785   IS             Me,Notme;
786   PetscErrorCode ierr;
787   PetscInt       M,N,first,last,*notme,i;
788   PetscMPIInt    size;
789 
790   PetscFunctionBegin;
791 
792   /* Easy test: symmetric diagonal block */
793   Bij = (Mat_MPIAIJ *) Bmat->data; Bdia = Bij->A;
794   ierr = MatIsTranspose(Adia,Bdia,tol,f);CHKERRQ(ierr);
795   if (!*f) PetscFunctionReturn(0);
796   ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr);
797   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
798   if (size == 1) PetscFunctionReturn(0);
799 
800   /* Hard test: off-diagonal block. This takes a MatGetSubMatrix. */
801   ierr = MatGetSize(Amat,&M,&N);CHKERRQ(ierr);
802   ierr = MatGetOwnershipRange(Amat,&first,&last);CHKERRQ(ierr);
803   ierr = PetscMalloc((N-last+first)*sizeof(PetscInt),&notme);CHKERRQ(ierr);
804   for (i=0; i<first; i++) notme[i] = i;
805   for (i=last; i<M; i++) notme[i-last+first] = i;
806   ierr = ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,&Notme);CHKERRQ(ierr);
807   ierr = ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);CHKERRQ(ierr);
808   ierr = MatGetSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);CHKERRQ(ierr);
809   Aoff = Aoffs[0];
810   ierr = MatGetSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);CHKERRQ(ierr);
811   Boff = Boffs[0];
812   ierr = MatIsTranspose(Aoff,Boff,tol,f);CHKERRQ(ierr);
813   ierr = MatDestroyMatrices(1,&Aoffs);CHKERRQ(ierr);
814   ierr = MatDestroyMatrices(1,&Boffs);CHKERRQ(ierr);
815   ierr = ISDestroy(Me);CHKERRQ(ierr);
816   ierr = ISDestroy(Notme);CHKERRQ(ierr);
817 
818   PetscFunctionReturn(0);
819 }
820 EXTERN_C_END
821 
822 #undef __FUNCT__
823 #define __FUNCT__ "MatMultTransposeAdd_MPIAIJ"
824 PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
825 {
826   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
827   PetscErrorCode ierr;
828 
829   PetscFunctionBegin;
830   /* do nondiagonal part */
831   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
832   /* send it on its way */
833   ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
834   /* do local part */
835   ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
836   /* receive remote parts */
837   ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
838   PetscFunctionReturn(0);
839 }
840 
841 /*
842   This only works correctly for square matrices where the subblock A->A is the
843    diagonal block
844 */
845 #undef __FUNCT__
846 #define __FUNCT__ "MatGetDiagonal_MPIAIJ"
847 PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
848 {
849   PetscErrorCode ierr;
850   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
851 
852   PetscFunctionBegin;
853   if (A->rmap->N != A->cmap->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
854   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) {
855     SETERRQ(PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
856   }
857   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
858   PetscFunctionReturn(0);
859 }
860 
861 #undef __FUNCT__
862 #define __FUNCT__ "MatScale_MPIAIJ"
863 PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
864 {
865   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
866   PetscErrorCode ierr;
867 
868   PetscFunctionBegin;
869   ierr = MatScale(a->A,aa);CHKERRQ(ierr);
870   ierr = MatScale(a->B,aa);CHKERRQ(ierr);
871   PetscFunctionReturn(0);
872 }
873 
874 #undef __FUNCT__
875 #define __FUNCT__ "MatDestroy_MPIAIJ"
876 PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
877 {
878   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
879   PetscErrorCode ierr;
880 
881   PetscFunctionBegin;
882 #if defined(PETSC_USE_LOG)
883   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
884 #endif
885   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
886   ierr = MatDestroy(aij->A);CHKERRQ(ierr);
887   ierr = MatDestroy(aij->B);CHKERRQ(ierr);
888 #if defined (PETSC_USE_CTABLE)
889   if (aij->colmap) {ierr = PetscTableDestroy(aij->colmap);CHKERRQ(ierr);}
890 #else
891   ierr = PetscFree(aij->colmap);CHKERRQ(ierr);
892 #endif
893   ierr = PetscFree(aij->garray);CHKERRQ(ierr);
894   if (aij->lvec)   {ierr = VecDestroy(aij->lvec);CHKERRQ(ierr);}
895   if (aij->Mvctx)  {ierr = VecScatterDestroy(aij->Mvctx);CHKERRQ(ierr);}
896   ierr = PetscFree(aij->rowvalues);CHKERRQ(ierr);
897   ierr = PetscFree(aij->ld);CHKERRQ(ierr);
898   ierr = PetscFree(aij);CHKERRQ(ierr);
899 
900   ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr);
901   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C","",PETSC_NULL);CHKERRQ(ierr);
902   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C","",PETSC_NULL);CHKERRQ(ierr);
903   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatGetDiagonalBlock_C","",PETSC_NULL);CHKERRQ(ierr);
904   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C","",PETSC_NULL);CHKERRQ(ierr);
905   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C","",PETSC_NULL);CHKERRQ(ierr);
906   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C","",PETSC_NULL);CHKERRQ(ierr);
907   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C","",PETSC_NULL);CHKERRQ(ierr);
908   PetscFunctionReturn(0);
909 }
910 
911 #undef __FUNCT__
912 #define __FUNCT__ "MatView_MPIAIJ_Binary"
913 PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
914 {
915   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
916   Mat_SeqAIJ*       A = (Mat_SeqAIJ*)aij->A->data;
917   Mat_SeqAIJ*       B = (Mat_SeqAIJ*)aij->B->data;
918   PetscErrorCode    ierr;
919   PetscMPIInt       rank,size,tag = ((PetscObject)viewer)->tag;
920   int               fd;
921   PetscInt          nz,header[4],*row_lengths,*range=0,rlen,i;
922   PetscInt          nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz;
923   PetscScalar       *column_values;
924 
925   PetscFunctionBegin;
926   ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
927   ierr = MPI_Comm_size(((PetscObject)mat)->comm,&size);CHKERRQ(ierr);
928   nz   = A->nz + B->nz;
929   if (!rank) {
930     header[0] = MAT_FILE_COOKIE;
931     header[1] = mat->rmap->N;
932     header[2] = mat->cmap->N;
933     ierr = MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
934     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
935     ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
936     /* get largest number of rows any processor has */
937     rlen = mat->rmap->n;
938     range = mat->rmap->range;
939     for (i=1; i<size; i++) {
940       rlen = PetscMax(rlen,range[i+1] - range[i]);
941     }
942   } else {
943     ierr = MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
944     rlen = mat->rmap->n;
945   }
946 
947   /* load up the local row counts */
948   ierr = PetscMalloc((rlen+1)*sizeof(PetscInt),&row_lengths);CHKERRQ(ierr);
949   for (i=0; i<mat->rmap->n; i++) {
950     row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
951   }
952 
953   /* store the row lengths to the file */
954   if (!rank) {
955     MPI_Status status;
956     ierr = PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
957     for (i=1; i<size; i++) {
958       rlen = range[i+1] - range[i];
959       ierr = MPI_Recv(row_lengths,rlen,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
960       ierr = PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
961     }
962   } else {
963     ierr = MPI_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
964   }
965   ierr = PetscFree(row_lengths);CHKERRQ(ierr);
966 
967   /* load up the local column indices */
968   nzmax = nz; /* )th processor needs space a largest processor needs */
969   ierr = MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,((PetscObject)mat)->comm);CHKERRQ(ierr);
970   ierr = PetscMalloc((nzmax+1)*sizeof(PetscInt),&column_indices);CHKERRQ(ierr);
971   cnt  = 0;
972   for (i=0; i<mat->rmap->n; i++) {
973     for (j=B->i[i]; j<B->i[i+1]; j++) {
974       if ( (col = garray[B->j[j]]) > cstart) break;
975       column_indices[cnt++] = col;
976     }
977     for (k=A->i[i]; k<A->i[i+1]; k++) {
978       column_indices[cnt++] = A->j[k] + cstart;
979     }
980     for (; j<B->i[i+1]; j++) {
981       column_indices[cnt++] = garray[B->j[j]];
982     }
983   }
984   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
985 
986   /* store the column indices to the file */
987   if (!rank) {
988     MPI_Status status;
989     ierr = PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
990     for (i=1; i<size; i++) {
991       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
992       if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
993       ierr = MPI_Recv(column_indices,rnz,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
994       ierr = PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
995     }
996   } else {
997     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
998     ierr = MPI_Send(column_indices,nz,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
999   }
1000   ierr = PetscFree(column_indices);CHKERRQ(ierr);
1001 
1002   /* load up the local column values */
1003   ierr = PetscMalloc((nzmax+1)*sizeof(PetscScalar),&column_values);CHKERRQ(ierr);
1004   cnt  = 0;
1005   for (i=0; i<mat->rmap->n; i++) {
1006     for (j=B->i[i]; j<B->i[i+1]; j++) {
1007       if ( garray[B->j[j]] > cstart) break;
1008       column_values[cnt++] = B->a[j];
1009     }
1010     for (k=A->i[i]; k<A->i[i+1]; k++) {
1011       column_values[cnt++] = A->a[k];
1012     }
1013     for (; j<B->i[i+1]; j++) {
1014       column_values[cnt++] = B->a[j];
1015     }
1016   }
1017   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1018 
1019   /* store the column values to the file */
1020   if (!rank) {
1021     MPI_Status status;
1022     ierr = PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1023     for (i=1; i<size; i++) {
1024       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
1025       if (rnz > nzmax) SETERRQ2(PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1026       ierr = MPI_Recv(column_values,rnz,MPIU_SCALAR,i,tag,((PetscObject)mat)->comm,&status);CHKERRQ(ierr);
1027       ierr = PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1028     }
1029   } else {
1030     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
1031     ierr = MPI_Send(column_values,nz,MPIU_SCALAR,0,tag,((PetscObject)mat)->comm);CHKERRQ(ierr);
1032   }
1033   ierr = PetscFree(column_values);CHKERRQ(ierr);
1034   PetscFunctionReturn(0);
1035 }
1036 
1037 #undef __FUNCT__
1038 #define __FUNCT__ "MatView_MPIAIJ_ASCIIorDraworSocket"
1039 PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1040 {
1041   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1042   PetscErrorCode    ierr;
1043   PetscMPIInt       rank = aij->rank,size = aij->size;
1044   PetscTruth        isdraw,iascii,isbinary;
1045   PetscViewer       sviewer;
1046   PetscViewerFormat format;
1047 
1048   PetscFunctionBegin;
1049   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1050   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
1051   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
1052   if (iascii) {
1053     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
1054     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1055       MatInfo    info;
1056       PetscTruth inodes;
1057 
1058       ierr = MPI_Comm_rank(((PetscObject)mat)->comm,&rank);CHKERRQ(ierr);
1059       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
1060       ierr = MatInodeGetInodeSizes(aij->A,PETSC_NULL,(PetscInt **)&inodes,PETSC_NULL);CHKERRQ(ierr);
1061       if (!inodes) {
1062         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1063 					      rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1064       } else {
1065         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1066 		    rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1067       }
1068       ierr = MatGetInfo(aij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
1069       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1070       ierr = MatGetInfo(aij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
1071       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1072       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
1073       ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr);
1074       ierr = VecScatterView(aij->Mvctx,viewer);CHKERRQ(ierr);
1075       PetscFunctionReturn(0);
1076     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1077       PetscInt   inodecount,inodelimit,*inodes;
1078       ierr = MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);CHKERRQ(ierr);
1079       if (inodes) {
1080         ierr = PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);CHKERRQ(ierr);
1081       } else {
1082         ierr = PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");CHKERRQ(ierr);
1083       }
1084       PetscFunctionReturn(0);
1085     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1086       PetscFunctionReturn(0);
1087     }
1088   } else if (isbinary) {
1089     if (size == 1) {
1090       ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1091       ierr = MatView(aij->A,viewer);CHKERRQ(ierr);
1092     } else {
1093       ierr = MatView_MPIAIJ_Binary(mat,viewer);CHKERRQ(ierr);
1094     }
1095     PetscFunctionReturn(0);
1096   } else if (isdraw) {
1097     PetscDraw  draw;
1098     PetscTruth isnull;
1099     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
1100     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
1101   }
1102 
1103   if (size == 1) {
1104     ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1105     ierr = MatView(aij->A,viewer);CHKERRQ(ierr);
1106   } else {
1107     /* assemble the entire matrix onto first processor. */
1108     Mat         A;
1109     Mat_SeqAIJ  *Aloc;
1110     PetscInt    M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1111     MatScalar   *a;
1112 
1113     if (mat->rmap->N > 1024) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"ASCII matrix output not allowed for matrices with more than 512 rows, use binary format instead");
1114 
1115     ierr = MatCreate(((PetscObject)mat)->comm,&A);CHKERRQ(ierr);
1116     if (!rank) {
1117       ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr);
1118     } else {
1119       ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr);
1120     }
1121     /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1122     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
1123     ierr = MatMPIAIJSetPreallocation(A,0,PETSC_NULL,0,PETSC_NULL);CHKERRQ(ierr);
1124     ierr = PetscLogObjectParent(mat,A);CHKERRQ(ierr);
1125 
1126     /* copy over the A part */
1127     Aloc = (Mat_SeqAIJ*)aij->A->data;
1128     m = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1129     row = mat->rmap->rstart;
1130     for (i=0; i<ai[m]; i++) {aj[i] += mat->cmap->rstart ;}
1131     for (i=0; i<m; i++) {
1132       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);CHKERRQ(ierr);
1133       row++; a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1134     }
1135     aj = Aloc->j;
1136     for (i=0; i<ai[m]; i++) {aj[i] -= mat->cmap->rstart;}
1137 
1138     /* copy over the B part */
1139     Aloc = (Mat_SeqAIJ*)aij->B->data;
1140     m    = aij->B->rmap->n;  ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1141     row  = mat->rmap->rstart;
1142     ierr = PetscMalloc((ai[m]+1)*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1143     ct   = cols;
1144     for (i=0; i<ai[m]; i++) {cols[i] = aij->garray[aj[i]];}
1145     for (i=0; i<m; i++) {
1146       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);CHKERRQ(ierr);
1147       row++; a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1148     }
1149     ierr = PetscFree(ct);CHKERRQ(ierr);
1150     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1151     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1152     /*
1153        Everyone has to call to draw the matrix since the graphics waits are
1154        synchronized across all processors that share the PetscDraw object
1155     */
1156     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
1157     if (!rank) {
1158       ierr = PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1159       ierr = MatView(((Mat_MPIAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
1160     }
1161     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
1162     ierr = MatDestroy(A);CHKERRQ(ierr);
1163   }
1164   PetscFunctionReturn(0);
1165 }
1166 
1167 #undef __FUNCT__
1168 #define __FUNCT__ "MatView_MPIAIJ"
1169 PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1170 {
1171   PetscErrorCode ierr;
1172   PetscTruth     iascii,isdraw,issocket,isbinary;
1173 
1174   PetscFunctionBegin;
1175   ierr  = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&iascii);CHKERRQ(ierr);
1176   ierr  = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1177   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
1178   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
1179   if (iascii || isdraw || isbinary || issocket) {
1180     ierr = MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
1181   } else {
1182     SETERRQ1(PETSC_ERR_SUP,"Viewer type %s not supported by MPIAIJ matrices",((PetscObject)viewer)->type_name);
1183   }
1184   PetscFunctionReturn(0);
1185 }
1186 
1187 #undef __FUNCT__
1188 #define __FUNCT__ "MatRelax_MPIAIJ"
1189 PetscErrorCode MatRelax_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1190 {
1191   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1192   PetscErrorCode ierr;
1193   Vec            bb1;
1194 
1195   PetscFunctionBegin;
1196   ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
1197 
1198   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
1199     if (flag & SOR_ZERO_INITIAL_GUESS) {
1200       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);CHKERRQ(ierr);
1201       its--;
1202     }
1203 
1204     while (its--) {
1205       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1206       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1207 
1208       /* update rhs: bb1 = bb - B*x */
1209       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1210       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1211 
1212       /* local sweep */
1213       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);CHKERRQ(ierr);
1214     }
1215   } else if (flag & SOR_LOCAL_FORWARD_SWEEP){
1216     if (flag & SOR_ZERO_INITIAL_GUESS) {
1217       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1218       its--;
1219     }
1220     while (its--) {
1221       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1222       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1223 
1224       /* update rhs: bb1 = bb - B*x */
1225       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1226       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1227 
1228       /* local sweep */
1229       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1230     }
1231   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP){
1232     if (flag & SOR_ZERO_INITIAL_GUESS) {
1233       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1234       its--;
1235     }
1236     while (its--) {
1237       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1238       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1239 
1240       /* update rhs: bb1 = bb - B*x */
1241       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1242       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1243 
1244       /* local sweep */
1245       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
1246     }
1247   } else {
1248     SETERRQ(PETSC_ERR_SUP,"Parallel SOR not supported");
1249   }
1250 
1251   ierr = VecDestroy(bb1);CHKERRQ(ierr);
1252   PetscFunctionReturn(0);
1253 }
1254 
1255 #undef __FUNCT__
1256 #define __FUNCT__ "MatPermute_MPIAIJ"
1257 PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1258 {
1259   MPI_Comm       comm,pcomm;
1260   PetscInt       first,local_size,nrows;
1261   const PetscInt *rows;
1262   int            ntids;
1263   IS             crowp,growp,irowp,lrowp,lcolp,icolp;
1264   PetscErrorCode ierr;
1265 
1266   PetscFunctionBegin;
1267   ierr = PetscObjectGetComm((PetscObject)A,&comm); CHKERRQ(ierr);
1268   /* make a collective version of 'rowp' */
1269   ierr = PetscObjectGetComm((PetscObject)rowp,&pcomm); CHKERRQ(ierr);
1270   if (pcomm==comm) {
1271     crowp = rowp;
1272   } else {
1273     ierr = ISGetSize(rowp,&nrows); CHKERRQ(ierr);
1274     ierr = ISGetIndices(rowp,&rows); CHKERRQ(ierr);
1275     ierr = ISCreateGeneral(comm,nrows,rows,&crowp); CHKERRQ(ierr);
1276     ierr = ISRestoreIndices(rowp,&rows); CHKERRQ(ierr);
1277   }
1278   /* collect the global row permutation and invert it */
1279   ierr = ISAllGather(crowp,&growp); CHKERRQ(ierr);
1280   ierr = ISSetPermutation(growp); CHKERRQ(ierr);
1281   if (pcomm!=comm) {
1282     ierr = ISDestroy(crowp); CHKERRQ(ierr);
1283   }
1284   ierr = ISInvertPermutation(growp,PETSC_DECIDE,&irowp);CHKERRQ(ierr);
1285   /* get the local target indices */
1286   ierr = MatGetOwnershipRange(A,&first,PETSC_NULL); CHKERRQ(ierr);
1287   ierr = MatGetLocalSize(A,&local_size,PETSC_NULL); CHKERRQ(ierr);
1288   ierr = ISGetIndices(irowp,&rows); CHKERRQ(ierr);
1289   ierr = ISCreateGeneral(MPI_COMM_SELF,local_size,rows+first,&lrowp); CHKERRQ(ierr);
1290   ierr = ISRestoreIndices(irowp,&rows); CHKERRQ(ierr);
1291   ierr = ISDestroy(irowp); CHKERRQ(ierr);
1292   /* the column permutation is so much easier;
1293      make a local version of 'colp' and invert it */
1294   ierr = PetscObjectGetComm((PetscObject)colp,&pcomm); CHKERRQ(ierr);
1295   ierr = MPI_Comm_size(pcomm,&ntids); CHKERRQ(ierr);
1296   if (ntids==1) {
1297     lcolp = colp;
1298   } else {
1299     ierr = ISGetSize(colp,&nrows); CHKERRQ(ierr);
1300     ierr = ISGetIndices(colp,&rows); CHKERRQ(ierr);
1301     ierr = ISCreateGeneral(MPI_COMM_SELF,nrows,rows,&lcolp); CHKERRQ(ierr);
1302   }
1303   ierr = ISInvertPermutation(lcolp,PETSC_DECIDE,&icolp); CHKERRQ(ierr);
1304   ierr = ISSetPermutation(lcolp); CHKERRQ(ierr);
1305   if (ntids>1) {
1306     ierr = ISRestoreIndices(colp,&rows); CHKERRQ(ierr);
1307     ierr = ISDestroy(lcolp); CHKERRQ(ierr);
1308   }
1309   /* now we just get the submatrix */
1310   ierr = MatGetSubMatrix(A,lrowp,icolp,local_size,MAT_INITIAL_MATRIX,B); CHKERRQ(ierr);
1311   /* clean up */
1312   ierr = ISDestroy(lrowp); CHKERRQ(ierr);
1313   ierr = ISDestroy(icolp); CHKERRQ(ierr);
1314   PetscFunctionReturn(0);
1315 }
1316 
1317 #undef __FUNCT__
1318 #define __FUNCT__ "MatGetInfo_MPIAIJ"
1319 PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1320 {
1321   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1322   Mat            A = mat->A,B = mat->B;
1323   PetscErrorCode ierr;
1324   PetscReal      isend[5],irecv[5];
1325 
1326   PetscFunctionBegin;
1327   info->block_size     = 1.0;
1328   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1329   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1330   isend[3] = info->memory;  isend[4] = info->mallocs;
1331   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1332   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1333   isend[3] += info->memory;  isend[4] += info->mallocs;
1334   if (flag == MAT_LOCAL) {
1335     info->nz_used      = isend[0];
1336     info->nz_allocated = isend[1];
1337     info->nz_unneeded  = isend[2];
1338     info->memory       = isend[3];
1339     info->mallocs      = isend[4];
1340   } else if (flag == MAT_GLOBAL_MAX) {
1341     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,((PetscObject)matin)->comm);CHKERRQ(ierr);
1342     info->nz_used      = irecv[0];
1343     info->nz_allocated = irecv[1];
1344     info->nz_unneeded  = irecv[2];
1345     info->memory       = irecv[3];
1346     info->mallocs      = irecv[4];
1347   } else if (flag == MAT_GLOBAL_SUM) {
1348     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,((PetscObject)matin)->comm);CHKERRQ(ierr);
1349     info->nz_used      = irecv[0];
1350     info->nz_allocated = irecv[1];
1351     info->nz_unneeded  = irecv[2];
1352     info->memory       = irecv[3];
1353     info->mallocs      = irecv[4];
1354   }
1355   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1356   info->fill_ratio_needed = 0;
1357   info->factor_mallocs    = 0;
1358 
1359   PetscFunctionReturn(0);
1360 }
1361 
1362 #undef __FUNCT__
1363 #define __FUNCT__ "MatSetOption_MPIAIJ"
1364 PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscTruth flg)
1365 {
1366   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1367   PetscErrorCode ierr;
1368 
1369   PetscFunctionBegin;
1370   switch (op) {
1371   case MAT_NEW_NONZERO_LOCATIONS:
1372   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1373   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1374   case MAT_KEEP_ZEROED_ROWS:
1375   case MAT_NEW_NONZERO_LOCATION_ERR:
1376   case MAT_USE_INODES:
1377   case MAT_IGNORE_ZERO_ENTRIES:
1378     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1379     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1380     break;
1381   case MAT_ROW_ORIENTED:
1382     a->roworiented = flg;
1383     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1384     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1385     break;
1386   case MAT_NEW_DIAGONALS:
1387     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
1388     break;
1389   case MAT_IGNORE_OFF_PROC_ENTRIES:
1390     a->donotstash = PETSC_TRUE;
1391     break;
1392   case MAT_SYMMETRIC:
1393     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1394     break;
1395   case MAT_STRUCTURALLY_SYMMETRIC:
1396   case MAT_HERMITIAN:
1397   case MAT_SYMMETRY_ETERNAL:
1398     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1399     break;
1400   default:
1401     SETERRQ1(PETSC_ERR_SUP,"unknown option %d",op);
1402   }
1403   PetscFunctionReturn(0);
1404 }
1405 
1406 #undef __FUNCT__
1407 #define __FUNCT__ "MatGetRow_MPIAIJ"
1408 PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1409 {
1410   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1411   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1412   PetscErrorCode ierr;
1413   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1414   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1415   PetscInt       *cmap,*idx_p;
1416 
1417   PetscFunctionBegin;
1418   if (mat->getrowactive) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1419   mat->getrowactive = PETSC_TRUE;
1420 
1421   if (!mat->rowvalues && (idx || v)) {
1422     /*
1423         allocate enough space to hold information from the longest row.
1424     */
1425     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1426     PetscInt     max = 1,tmp;
1427     for (i=0; i<matin->rmap->n; i++) {
1428       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1429       if (max < tmp) { max = tmp; }
1430     }
1431     ierr = PetscMalloc(max*(sizeof(PetscInt)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
1432     mat->rowindices = (PetscInt*)(mat->rowvalues + max);
1433   }
1434 
1435   if (row < rstart || row >= rend) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Only local rows")
1436   lrow = row - rstart;
1437 
1438   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1439   if (!v)   {pvA = 0; pvB = 0;}
1440   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1441   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1442   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1443   nztot = nzA + nzB;
1444 
1445   cmap  = mat->garray;
1446   if (v  || idx) {
1447     if (nztot) {
1448       /* Sort by increasing column numbers, assuming A and B already sorted */
1449       PetscInt imark = -1;
1450       if (v) {
1451         *v = v_p = mat->rowvalues;
1452         for (i=0; i<nzB; i++) {
1453           if (cmap[cworkB[i]] < cstart)   v_p[i] = vworkB[i];
1454           else break;
1455         }
1456         imark = i;
1457         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1458         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1459       }
1460       if (idx) {
1461         *idx = idx_p = mat->rowindices;
1462         if (imark > -1) {
1463           for (i=0; i<imark; i++) {
1464             idx_p[i] = cmap[cworkB[i]];
1465           }
1466         } else {
1467           for (i=0; i<nzB; i++) {
1468             if (cmap[cworkB[i]] < cstart)   idx_p[i] = cmap[cworkB[i]];
1469             else break;
1470           }
1471           imark = i;
1472         }
1473         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1474         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1475       }
1476     } else {
1477       if (idx) *idx = 0;
1478       if (v)   *v   = 0;
1479     }
1480   }
1481   *nz = nztot;
1482   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1483   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1484   PetscFunctionReturn(0);
1485 }
1486 
1487 #undef __FUNCT__
1488 #define __FUNCT__ "MatRestoreRow_MPIAIJ"
1489 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1490 {
1491   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1492 
1493   PetscFunctionBegin;
1494   if (!aij->getrowactive) {
1495     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1496   }
1497   aij->getrowactive = PETSC_FALSE;
1498   PetscFunctionReturn(0);
1499 }
1500 
1501 #undef __FUNCT__
1502 #define __FUNCT__ "MatNorm_MPIAIJ"
1503 PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1504 {
1505   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1506   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1507   PetscErrorCode ierr;
1508   PetscInt       i,j,cstart = mat->cmap->rstart;
1509   PetscReal      sum = 0.0;
1510   MatScalar      *v;
1511 
1512   PetscFunctionBegin;
1513   if (aij->size == 1) {
1514     ierr =  MatNorm(aij->A,type,norm);CHKERRQ(ierr);
1515   } else {
1516     if (type == NORM_FROBENIUS) {
1517       v = amat->a;
1518       for (i=0; i<amat->nz; i++) {
1519 #if defined(PETSC_USE_COMPLEX)
1520         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1521 #else
1522         sum += (*v)*(*v); v++;
1523 #endif
1524       }
1525       v = bmat->a;
1526       for (i=0; i<bmat->nz; i++) {
1527 #if defined(PETSC_USE_COMPLEX)
1528         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1529 #else
1530         sum += (*v)*(*v); v++;
1531 #endif
1532       }
1533       ierr = MPI_Allreduce(&sum,norm,1,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr);
1534       *norm = sqrt(*norm);
1535     } else if (type == NORM_1) { /* max column norm */
1536       PetscReal *tmp,*tmp2;
1537       PetscInt  *jj,*garray = aij->garray;
1538       ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp);CHKERRQ(ierr);
1539       ierr = PetscMalloc((mat->cmap->N+1)*sizeof(PetscReal),&tmp2);CHKERRQ(ierr);
1540       ierr = PetscMemzero(tmp,mat->cmap->N*sizeof(PetscReal));CHKERRQ(ierr);
1541       *norm = 0.0;
1542       v = amat->a; jj = amat->j;
1543       for (j=0; j<amat->nz; j++) {
1544         tmp[cstart + *jj++ ] += PetscAbsScalar(*v);  v++;
1545       }
1546       v = bmat->a; jj = bmat->j;
1547       for (j=0; j<bmat->nz; j++) {
1548         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1549       }
1550       ierr = MPI_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPI_SUM,((PetscObject)mat)->comm);CHKERRQ(ierr);
1551       for (j=0; j<mat->cmap->N; j++) {
1552         if (tmp2[j] > *norm) *norm = tmp2[j];
1553       }
1554       ierr = PetscFree(tmp);CHKERRQ(ierr);
1555       ierr = PetscFree(tmp2);CHKERRQ(ierr);
1556     } else if (type == NORM_INFINITY) { /* max row norm */
1557       PetscReal ntemp = 0.0;
1558       for (j=0; j<aij->A->rmap->n; j++) {
1559         v = amat->a + amat->i[j];
1560         sum = 0.0;
1561         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1562           sum += PetscAbsScalar(*v); v++;
1563         }
1564         v = bmat->a + bmat->i[j];
1565         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1566           sum += PetscAbsScalar(*v); v++;
1567         }
1568         if (sum > ntemp) ntemp = sum;
1569       }
1570       ierr = MPI_Allreduce(&ntemp,norm,1,MPIU_REAL,MPI_MAX,((PetscObject)mat)->comm);CHKERRQ(ierr);
1571     } else {
1572       SETERRQ(PETSC_ERR_SUP,"No support for two norm");
1573     }
1574   }
1575   PetscFunctionReturn(0);
1576 }
1577 
1578 #undef __FUNCT__
1579 #define __FUNCT__ "MatTranspose_MPIAIJ"
1580 PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1581 {
1582   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1583   Mat_SeqAIJ     *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1584   PetscErrorCode ierr;
1585   PetscInt       M = A->rmap->N,N = A->cmap->N,ma,na,mb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i,*d_nnz;
1586   PetscInt       cstart=A->cmap->rstart,ncol;
1587   Mat            B;
1588   MatScalar      *array;
1589 
1590   PetscFunctionBegin;
1591   if (reuse == MAT_REUSE_MATRIX && A == *matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1592 
1593   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n;
1594   ai = Aloc->i; aj = Aloc->j;
1595   bi = Bloc->i; bj = Bloc->j;
1596   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1597     /* compute d_nnz for preallocation; o_nnz is approximated by d_nnz to avoid communication */
1598     ierr = PetscMalloc((1+na)*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr);
1599     ierr = PetscMemzero(d_nnz,(1+na)*sizeof(PetscInt));CHKERRQ(ierr);
1600     for (i=0; i<ai[ma]; i++){
1601       d_nnz[aj[i]] ++;
1602       aj[i] += cstart; /* global col index to be used by MatSetValues() */
1603     }
1604 
1605     ierr = MatCreate(((PetscObject)A)->comm,&B);CHKERRQ(ierr);
1606     ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr);
1607     ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr);
1608     ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,d_nnz);CHKERRQ(ierr);
1609     ierr = PetscFree(d_nnz);CHKERRQ(ierr);
1610   } else {
1611     B = *matout;
1612   }
1613 
1614   /* copy over the A part */
1615   array = Aloc->a;
1616   row = A->rmap->rstart;
1617   for (i=0; i<ma; i++) {
1618     ncol = ai[i+1]-ai[i];
1619     ierr = MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1620     row++; array += ncol; aj += ncol;
1621   }
1622   aj = Aloc->j;
1623   for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
1624 
1625   /* copy over the B part */
1626   ierr = PetscMalloc(bi[mb]*sizeof(PetscInt),&cols);CHKERRQ(ierr);
1627   ierr = PetscMemzero(cols,bi[mb]*sizeof(PetscInt));CHKERRQ(ierr);
1628   array = Bloc->a;
1629   row = A->rmap->rstart;
1630   for (i=0; i<bi[mb]; i++) {cols[i] = a->garray[bj[i]];}
1631   cols_tmp = cols;
1632   for (i=0; i<mb; i++) {
1633     ncol = bi[i+1]-bi[i];
1634     ierr = MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1635     row++; array += ncol; cols_tmp += ncol;
1636   }
1637   ierr = PetscFree(cols);CHKERRQ(ierr);
1638 
1639   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1640   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1641   if (reuse == MAT_INITIAL_MATRIX || *matout != A) {
1642     *matout = B;
1643   } else {
1644     ierr = MatHeaderCopy(A,B);CHKERRQ(ierr);
1645   }
1646   PetscFunctionReturn(0);
1647 }
1648 
1649 #undef __FUNCT__
1650 #define __FUNCT__ "MatDiagonalScale_MPIAIJ"
1651 PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1652 {
1653   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1654   Mat            a = aij->A,b = aij->B;
1655   PetscErrorCode ierr;
1656   PetscInt       s1,s2,s3;
1657 
1658   PetscFunctionBegin;
1659   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1660   if (rr) {
1661     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1662     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1663     /* Overlap communication with computation. */
1664     ierr = VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1665   }
1666   if (ll) {
1667     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1668     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1669     ierr = (*b->ops->diagonalscale)(b,ll,0);CHKERRQ(ierr);
1670   }
1671   /* scale  the diagonal block */
1672   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1673 
1674   if (rr) {
1675     /* Do a scatter end and then right scale the off-diagonal block */
1676     ierr = VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1677     ierr = (*b->ops->diagonalscale)(b,0,aij->lvec);CHKERRQ(ierr);
1678   }
1679 
1680   PetscFunctionReturn(0);
1681 }
1682 
1683 #undef __FUNCT__
1684 #define __FUNCT__ "MatSetBlockSize_MPIAIJ"
1685 PetscErrorCode MatSetBlockSize_MPIAIJ(Mat A,PetscInt bs)
1686 {
1687   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
1688   PetscErrorCode ierr;
1689 
1690   PetscFunctionBegin;
1691   ierr = MatSetBlockSize(a->A,bs);CHKERRQ(ierr);
1692   ierr = MatSetBlockSize(a->B,bs);CHKERRQ(ierr);
1693   PetscFunctionReturn(0);
1694 }
1695 #undef __FUNCT__
1696 #define __FUNCT__ "MatSetUnfactored_MPIAIJ"
1697 PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
1698 {
1699   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
1700   PetscErrorCode ierr;
1701 
1702   PetscFunctionBegin;
1703   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1704   PetscFunctionReturn(0);
1705 }
1706 
1707 #undef __FUNCT__
1708 #define __FUNCT__ "MatEqual_MPIAIJ"
1709 PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscTruth *flag)
1710 {
1711   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
1712   Mat            a,b,c,d;
1713   PetscTruth     flg;
1714   PetscErrorCode ierr;
1715 
1716   PetscFunctionBegin;
1717   a = matA->A; b = matA->B;
1718   c = matB->A; d = matB->B;
1719 
1720   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1721   if (flg) {
1722     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1723   }
1724   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,((PetscObject)A)->comm);CHKERRQ(ierr);
1725   PetscFunctionReturn(0);
1726 }
1727 
1728 #undef __FUNCT__
1729 #define __FUNCT__ "MatCopy_MPIAIJ"
1730 PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
1731 {
1732   PetscErrorCode ierr;
1733   Mat_MPIAIJ     *a = (Mat_MPIAIJ *)A->data;
1734   Mat_MPIAIJ     *b = (Mat_MPIAIJ *)B->data;
1735 
1736   PetscFunctionBegin;
1737   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
1738   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
1739     /* because of the column compression in the off-processor part of the matrix a->B,
1740        the number of columns in a->B and b->B may be different, hence we cannot call
1741        the MatCopy() directly on the two parts. If need be, we can provide a more
1742        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
1743        then copying the submatrices */
1744     ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr);
1745   } else {
1746     ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr);
1747     ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr);
1748   }
1749   PetscFunctionReturn(0);
1750 }
1751 
1752 #undef __FUNCT__
1753 #define __FUNCT__ "MatSetUpPreallocation_MPIAIJ"
1754 PetscErrorCode MatSetUpPreallocation_MPIAIJ(Mat A)
1755 {
1756   PetscErrorCode ierr;
1757 
1758   PetscFunctionBegin;
1759   ierr =  MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1760   PetscFunctionReturn(0);
1761 }
1762 
1763 #include "petscblaslapack.h"
1764 #undef __FUNCT__
1765 #define __FUNCT__ "MatAXPY_MPIAIJ"
1766 PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
1767 {
1768   PetscErrorCode ierr;
1769   PetscInt       i;
1770   Mat_MPIAIJ     *xx = (Mat_MPIAIJ *)X->data,*yy = (Mat_MPIAIJ *)Y->data;
1771   PetscBLASInt   bnz,one=1;
1772   Mat_SeqAIJ     *x,*y;
1773 
1774   PetscFunctionBegin;
1775   if (str == SAME_NONZERO_PATTERN) {
1776     PetscScalar alpha = a;
1777     x = (Mat_SeqAIJ *)xx->A->data;
1778     y = (Mat_SeqAIJ *)yy->A->data;
1779     bnz = PetscBLASIntCast(x->nz);
1780     BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1781     x = (Mat_SeqAIJ *)xx->B->data;
1782     y = (Mat_SeqAIJ *)yy->B->data;
1783     bnz = PetscBLASIntCast(x->nz);
1784     BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one);
1785   } else if (str == SUBSET_NONZERO_PATTERN) {
1786     ierr = MatAXPY_SeqAIJ(yy->A,a,xx->A,str);CHKERRQ(ierr);
1787 
1788     x = (Mat_SeqAIJ *)xx->B->data;
1789     y = (Mat_SeqAIJ *)yy->B->data;
1790     if (y->xtoy && y->XtoY != xx->B) {
1791       ierr = PetscFree(y->xtoy);CHKERRQ(ierr);
1792       ierr = MatDestroy(y->XtoY);CHKERRQ(ierr);
1793     }
1794     if (!y->xtoy) { /* get xtoy */
1795       ierr = MatAXPYGetxtoy_Private(xx->B->rmap->n,x->i,x->j,xx->garray,y->i,y->j,yy->garray,&y->xtoy);CHKERRQ(ierr);
1796       y->XtoY = xx->B;
1797       ierr = PetscObjectReference((PetscObject)xx->B);CHKERRQ(ierr);
1798     }
1799     for (i=0; i<x->nz; i++) y->a[y->xtoy[i]] += a*(x->a[i]);
1800   } else {
1801     ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr);
1802   }
1803   PetscFunctionReturn(0);
1804 }
1805 
1806 EXTERN PetscErrorCode PETSCMAT_DLLEXPORT MatConjugate_SeqAIJ(Mat);
1807 
1808 #undef __FUNCT__
1809 #define __FUNCT__ "MatConjugate_MPIAIJ"
1810 PetscErrorCode PETSCMAT_DLLEXPORT MatConjugate_MPIAIJ(Mat mat)
1811 {
1812 #if defined(PETSC_USE_COMPLEX)
1813   PetscErrorCode ierr;
1814   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
1815 
1816   PetscFunctionBegin;
1817   ierr = MatConjugate_SeqAIJ(aij->A);CHKERRQ(ierr);
1818   ierr = MatConjugate_SeqAIJ(aij->B);CHKERRQ(ierr);
1819 #else
1820   PetscFunctionBegin;
1821 #endif
1822   PetscFunctionReturn(0);
1823 }
1824 
1825 #undef __FUNCT__
1826 #define __FUNCT__ "MatRealPart_MPIAIJ"
1827 PetscErrorCode MatRealPart_MPIAIJ(Mat A)
1828 {
1829   Mat_MPIAIJ   *a = (Mat_MPIAIJ*)A->data;
1830   PetscErrorCode ierr;
1831 
1832   PetscFunctionBegin;
1833   ierr = MatRealPart(a->A);CHKERRQ(ierr);
1834   ierr = MatRealPart(a->B);CHKERRQ(ierr);
1835   PetscFunctionReturn(0);
1836 }
1837 
1838 #undef __FUNCT__
1839 #define __FUNCT__ "MatImaginaryPart_MPIAIJ"
1840 PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
1841 {
1842   Mat_MPIAIJ   *a = (Mat_MPIAIJ*)A->data;
1843   PetscErrorCode ierr;
1844 
1845   PetscFunctionBegin;
1846   ierr = MatImaginaryPart(a->A);CHKERRQ(ierr);
1847   ierr = MatImaginaryPart(a->B);CHKERRQ(ierr);
1848   PetscFunctionReturn(0);
1849 }
1850 
1851 #ifdef PETSC_HAVE_PBGL
1852 
1853 #include <boost/parallel/mpi/bsp_process_group.hpp>
1854 #include <boost/graph/distributed/ilu_default_graph.hpp>
1855 #include <boost/graph/distributed/ilu_0_block.hpp>
1856 #include <boost/graph/distributed/ilu_preconditioner.hpp>
1857 #include <boost/graph/distributed/petsc/interface.hpp>
1858 #include <boost/multi_array.hpp>
1859 #include <boost/parallel/distributed_property_map->hpp>
1860 
1861 #undef __FUNCT__
1862 #define __FUNCT__ "MatILUFactorSymbolic_MPIAIJ"
1863 /*
1864   This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1865 */
1866 PetscErrorCode MatILUFactorSymbolic_MPIAIJ(Mat fact,Mat A, IS isrow, IS iscol, const MatFactorInfo *info)
1867 {
1868   namespace petsc = boost::distributed::petsc;
1869 
1870   namespace graph_dist = boost::graph::distributed;
1871   using boost::graph::distributed::ilu_default::process_group_type;
1872   using boost::graph::ilu_permuted;
1873 
1874   PetscTruth      row_identity, col_identity;
1875   PetscContainer  c;
1876   PetscInt        m, n, M, N;
1877   PetscErrorCode  ierr;
1878 
1879   PetscFunctionBegin;
1880   if (info->levels != 0) SETERRQ(PETSC_ERR_SUP,"Only levels = 0 supported for parallel ilu");
1881   ierr = ISIdentity(isrow, &row_identity);CHKERRQ(ierr);
1882   ierr = ISIdentity(iscol, &col_identity);CHKERRQ(ierr);
1883   if (!row_identity || !col_identity) {
1884     SETERRQ(PETSC_ERR_ARG_WRONG,"Row and column permutations must be identity for parallel ILU");
1885   }
1886 
1887   process_group_type pg;
1888   typedef graph_dist::ilu_default::ilu_level_graph_type  lgraph_type;
1889   lgraph_type*   lgraph_p = new lgraph_type(petsc::num_global_vertices(A), pg, petsc::matrix_distribution(A, pg));
1890   lgraph_type&   level_graph = *lgraph_p;
1891   graph_dist::ilu_default::graph_type&            graph(level_graph.graph);
1892 
1893   petsc::read_matrix(A, graph, get(boost::edge_weight, graph));
1894   ilu_permuted(level_graph);
1895 
1896   /* put together the new matrix */
1897   ierr = MatCreate(((PetscObject)A)->comm, fact);CHKERRQ(ierr);
1898   ierr = MatGetLocalSize(A, &m, &n);CHKERRQ(ierr);
1899   ierr = MatGetSize(A, &M, &N);CHKERRQ(ierr);
1900   ierr = MatSetSizes(fact, m, n, M, N);CHKERRQ(ierr);
1901   ierr = MatSetType(fact, ((PetscObject)A)->type_name);CHKERRQ(ierr);
1902   ierr = MatAssemblyBegin(fact, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1903   ierr = MatAssemblyEnd(fact, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1904 
1905   ierr = PetscContainerCreate(((PetscObject)A)->comm, &c);
1906   ierr = PetscContainerSetPointer(c, lgraph_p);
1907   ierr = PetscObjectCompose((PetscObject) (fact), "graph", (PetscObject) c);
1908   PetscFunctionReturn(0);
1909 }
1910 
1911 #undef __FUNCT__
1912 #define __FUNCT__ "MatLUFactorNumeric_MPIAIJ"
1913 PetscErrorCode MatLUFactorNumeric_MPIAIJ(Mat B,Mat A, const MatFactorInfo *info)
1914 {
1915   PetscFunctionBegin;
1916   PetscFunctionReturn(0);
1917 }
1918 
1919 #undef __FUNCT__
1920 #define __FUNCT__ "MatSolve_MPIAIJ"
1921 /*
1922   This uses the parallel ILU factorization of Peter Gottschling <pgottsch@osl.iu.edu>
1923 */
1924 PetscErrorCode MatSolve_MPIAIJ(Mat A, Vec b, Vec x)
1925 {
1926   namespace graph_dist = boost::graph::distributed;
1927 
1928   typedef graph_dist::ilu_default::ilu_level_graph_type  lgraph_type;
1929   lgraph_type*   lgraph_p;
1930   PetscContainer c;
1931   PetscErrorCode ierr;
1932 
1933   PetscFunctionBegin;
1934   ierr = PetscObjectQuery((PetscObject) A, "graph", (PetscObject *) &c);CHKERRQ(ierr);
1935   ierr = PetscContainerGetPointer(c, (void **) &lgraph_p);CHKERRQ(ierr);
1936   ierr = VecCopy(b, x); CHKERRQ(ierr);
1937 
1938   PetscScalar* array_x;
1939   ierr = VecGetArray(x, &array_x);CHKERRQ(ierr);
1940   PetscInt sx;
1941   ierr = VecGetSize(x, &sx);CHKERRQ(ierr);
1942 
1943   PetscScalar* array_b;
1944   ierr = VecGetArray(b, &array_b);CHKERRQ(ierr);
1945   PetscInt sb;
1946   ierr = VecGetSize(b, &sb);CHKERRQ(ierr);
1947 
1948   lgraph_type&   level_graph = *lgraph_p;
1949   graph_dist::ilu_default::graph_type&            graph(level_graph.graph);
1950 
1951   typedef boost::multi_array_ref<PetscScalar, 1> array_ref_type;
1952   array_ref_type                                 ref_b(array_b, boost::extents[num_vertices(graph)]),
1953                                                  ref_x(array_x, boost::extents[num_vertices(graph)]);
1954 
1955   typedef boost::iterator_property_map<array_ref_type::iterator,
1956                                 boost::property_map<graph_dist::ilu_default::graph_type, boost::vertex_index_t>::type>  gvector_type;
1957   gvector_type                                   vector_b(ref_b.begin(), get(boost::vertex_index, graph)),
1958                                                  vector_x(ref_x.begin(), get(boost::vertex_index, graph));
1959 
1960   ilu_set_solve(*lgraph_p, vector_b, vector_x);
1961 
1962   PetscFunctionReturn(0);
1963 }
1964 #endif
1965 
1966 typedef struct { /* used by MatGetRedundantMatrix() for reusing matredundant */
1967   PetscInt       nzlocal,nsends,nrecvs;
1968   PetscMPIInt    *send_rank;
1969   PetscInt       *sbuf_nz,*sbuf_j,**rbuf_j;
1970   PetscScalar    *sbuf_a,**rbuf_a;
1971   PetscErrorCode (*MatDestroy)(Mat);
1972 } Mat_Redundant;
1973 
1974 #undef __FUNCT__
1975 #define __FUNCT__ "PetscContainerDestroy_MatRedundant"
1976 PetscErrorCode PetscContainerDestroy_MatRedundant(void *ptr)
1977 {
1978   PetscErrorCode       ierr;
1979   Mat_Redundant        *redund=(Mat_Redundant*)ptr;
1980   PetscInt             i;
1981 
1982   PetscFunctionBegin;
1983   ierr = PetscFree(redund->send_rank);CHKERRQ(ierr);
1984   ierr = PetscFree(redund->sbuf_j);CHKERRQ(ierr);
1985   ierr = PetscFree(redund->sbuf_a);CHKERRQ(ierr);
1986   for (i=0; i<redund->nrecvs; i++){
1987     ierr = PetscFree(redund->rbuf_j[i]);CHKERRQ(ierr);
1988     ierr = PetscFree(redund->rbuf_a[i]);CHKERRQ(ierr);
1989   }
1990   ierr = PetscFree3(redund->sbuf_nz,redund->rbuf_j,redund->rbuf_a);CHKERRQ(ierr);
1991   ierr = PetscFree(redund);CHKERRQ(ierr);
1992   PetscFunctionReturn(0);
1993 }
1994 
1995 #undef __FUNCT__
1996 #define __FUNCT__ "MatDestroy_MatRedundant"
1997 PetscErrorCode MatDestroy_MatRedundant(Mat A)
1998 {
1999   PetscErrorCode  ierr;
2000   PetscContainer  container;
2001   Mat_Redundant   *redund=PETSC_NULL;
2002 
2003   PetscFunctionBegin;
2004   ierr = PetscObjectQuery((PetscObject)A,"Mat_Redundant",(PetscObject *)&container);CHKERRQ(ierr);
2005   if (container) {
2006     ierr = PetscContainerGetPointer(container,(void **)&redund);CHKERRQ(ierr);
2007   } else {
2008     SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2009   }
2010   A->ops->destroy = redund->MatDestroy;
2011   ierr = PetscObjectCompose((PetscObject)A,"Mat_Redundant",0);CHKERRQ(ierr);
2012   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
2013   ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
2014   PetscFunctionReturn(0);
2015 }
2016 
2017 #undef __FUNCT__
2018 #define __FUNCT__ "MatGetRedundantMatrix_MPIAIJ"
2019 PetscErrorCode MatGetRedundantMatrix_MPIAIJ(Mat mat,PetscInt nsubcomm,MPI_Comm subcomm,PetscInt mlocal_sub,MatReuse reuse,Mat *matredundant)
2020 {
2021   PetscMPIInt    rank,size;
2022   MPI_Comm       comm=((PetscObject)mat)->comm;
2023   PetscErrorCode ierr;
2024   PetscInt       nsends=0,nrecvs=0,i,rownz_max=0;
2025   PetscMPIInt    *send_rank=PETSC_NULL,*recv_rank=PETSC_NULL;
2026   PetscInt       *rowrange=mat->rmap->range;
2027   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2028   Mat            A=aij->A,B=aij->B,C=*matredundant;
2029   Mat_SeqAIJ     *a=(Mat_SeqAIJ*)A->data,*b=(Mat_SeqAIJ*)B->data;
2030   PetscScalar    *sbuf_a;
2031   PetscInt       nzlocal=a->nz+b->nz;
2032   PetscInt       j,cstart=mat->cmap->rstart,cend=mat->cmap->rend,row,nzA,nzB,ncols,*cworkA,*cworkB;
2033   PetscInt       rstart=mat->rmap->rstart,rend=mat->rmap->rend,*bmap=aij->garray,M,N;
2034   PetscInt       *cols,ctmp,lwrite,*rptr,l,*sbuf_j;
2035   MatScalar      *aworkA,*aworkB;
2036   PetscScalar    *vals;
2037   PetscMPIInt    tag1,tag2,tag3,imdex;
2038   MPI_Request    *s_waits1=PETSC_NULL,*s_waits2=PETSC_NULL,*s_waits3=PETSC_NULL,
2039                  *r_waits1=PETSC_NULL,*r_waits2=PETSC_NULL,*r_waits3=PETSC_NULL;
2040   MPI_Status     recv_status,*send_status;
2041   PetscInt       *sbuf_nz=PETSC_NULL,*rbuf_nz=PETSC_NULL,count;
2042   PetscInt       **rbuf_j=PETSC_NULL;
2043   PetscScalar    **rbuf_a=PETSC_NULL;
2044   Mat_Redundant  *redund=PETSC_NULL;
2045   PetscContainer container;
2046 
2047   PetscFunctionBegin;
2048   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2049   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2050 
2051   if (reuse == MAT_REUSE_MATRIX) {
2052     ierr = MatGetSize(C,&M,&N);CHKERRQ(ierr);
2053     if (M != N || M != mat->rmap->N) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong global size");
2054     ierr = MatGetLocalSize(C,&M,&N);CHKERRQ(ierr);
2055     if (M != N || M != mlocal_sub) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong local size");
2056     ierr = PetscObjectQuery((PetscObject)C,"Mat_Redundant",(PetscObject *)&container);CHKERRQ(ierr);
2057     if (container) {
2058       ierr = PetscContainerGetPointer(container,(void **)&redund);CHKERRQ(ierr);
2059     } else {
2060       SETERRQ(PETSC_ERR_PLIB,"Container does not exit");
2061     }
2062     if (nzlocal != redund->nzlocal) SETERRQ(PETSC_ERR_ARG_SIZ,"Cannot reuse matrix. Wrong nzlocal");
2063 
2064     nsends    = redund->nsends;
2065     nrecvs    = redund->nrecvs;
2066     send_rank = redund->send_rank; recv_rank = send_rank + size;
2067     sbuf_nz   = redund->sbuf_nz;     rbuf_nz = sbuf_nz + nsends;
2068     sbuf_j    = redund->sbuf_j;
2069     sbuf_a    = redund->sbuf_a;
2070     rbuf_j    = redund->rbuf_j;
2071     rbuf_a    = redund->rbuf_a;
2072   }
2073 
2074   if (reuse == MAT_INITIAL_MATRIX){
2075     PetscMPIInt  subrank,subsize;
2076     PetscInt     nleftover,np_subcomm;
2077     /* get the destination processors' id send_rank, nsends and nrecvs */
2078     ierr = MPI_Comm_rank(subcomm,&subrank);CHKERRQ(ierr);
2079     ierr = MPI_Comm_size(subcomm,&subsize);CHKERRQ(ierr);
2080     ierr = PetscMalloc((2*size+1)*sizeof(PetscMPIInt),&send_rank);
2081     recv_rank = send_rank + size;
2082     np_subcomm = size/nsubcomm;
2083     nleftover  = size - nsubcomm*np_subcomm;
2084     nsends = 0; nrecvs = 0;
2085     for (i=0; i<size; i++){ /* i=rank*/
2086       if (subrank == i/nsubcomm && rank != i){ /* my_subrank == other's subrank */
2087         send_rank[nsends] = i; nsends++;
2088         recv_rank[nrecvs++] = i;
2089       }
2090     }
2091     if (rank >= size - nleftover){/* this proc is a leftover processor */
2092       i = size-nleftover-1;
2093       j = 0;
2094       while (j < nsubcomm - nleftover){
2095         send_rank[nsends++] = i;
2096         i--; j++;
2097       }
2098     }
2099 
2100     if (nleftover && subsize == size/nsubcomm && subrank==subsize-1){ /* this proc recvs from leftover processors */
2101       for (i=0; i<nleftover; i++){
2102         recv_rank[nrecvs++] = size-nleftover+i;
2103       }
2104     }
2105 
2106     /* allocate sbuf_j, sbuf_a */
2107     i = nzlocal + rowrange[rank+1] - rowrange[rank] + 2;
2108     ierr = PetscMalloc(i*sizeof(PetscInt),&sbuf_j);CHKERRQ(ierr);
2109     ierr = PetscMalloc((nzlocal+1)*sizeof(PetscScalar),&sbuf_a);CHKERRQ(ierr);
2110   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2111 
2112   /* copy mat's local entries into the buffers */
2113   if (reuse == MAT_INITIAL_MATRIX){
2114     rownz_max = 0;
2115     rptr = sbuf_j;
2116     cols = sbuf_j + rend-rstart + 1;
2117     vals = sbuf_a;
2118     rptr[0] = 0;
2119     for (i=0; i<rend-rstart; i++){
2120       row = i + rstart;
2121       nzA    = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2122       ncols  = nzA + nzB;
2123       cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2124       aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2125       /* load the column indices for this row into cols */
2126       lwrite = 0;
2127       for (l=0; l<nzB; l++) {
2128         if ((ctmp = bmap[cworkB[l]]) < cstart){
2129           vals[lwrite]   = aworkB[l];
2130           cols[lwrite++] = ctmp;
2131         }
2132       }
2133       for (l=0; l<nzA; l++){
2134         vals[lwrite]   = aworkA[l];
2135         cols[lwrite++] = cstart + cworkA[l];
2136       }
2137       for (l=0; l<nzB; l++) {
2138         if ((ctmp = bmap[cworkB[l]]) >= cend){
2139           vals[lwrite]   = aworkB[l];
2140           cols[lwrite++] = ctmp;
2141         }
2142       }
2143       vals += ncols;
2144       cols += ncols;
2145       rptr[i+1] = rptr[i] + ncols;
2146       if (rownz_max < ncols) rownz_max = ncols;
2147     }
2148     if (rptr[rend-rstart] != a->nz + b->nz) SETERRQ4(1, "rptr[%d] %d != %d + %d",rend-rstart,rptr[rend-rstart+1],a->nz,b->nz);
2149   } else { /* only copy matrix values into sbuf_a */
2150     rptr = sbuf_j;
2151     vals = sbuf_a;
2152     rptr[0] = 0;
2153     for (i=0; i<rend-rstart; i++){
2154       row = i + rstart;
2155       nzA    = a->i[i+1] - a->i[i]; nzB = b->i[i+1] - b->i[i];
2156       ncols  = nzA + nzB;
2157       cworkA = a->j + a->i[i]; cworkB = b->j + b->i[i];
2158       aworkA = a->a + a->i[i]; aworkB = b->a + b->i[i];
2159       lwrite = 0;
2160       for (l=0; l<nzB; l++) {
2161         if ((ctmp = bmap[cworkB[l]]) < cstart) vals[lwrite++] = aworkB[l];
2162       }
2163       for (l=0; l<nzA; l++) vals[lwrite++] = aworkA[l];
2164       for (l=0; l<nzB; l++) {
2165         if ((ctmp = bmap[cworkB[l]]) >= cend) vals[lwrite++] = aworkB[l];
2166       }
2167       vals += ncols;
2168       rptr[i+1] = rptr[i] + ncols;
2169     }
2170   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2171 
2172   /* send nzlocal to others, and recv other's nzlocal */
2173   /*--------------------------------------------------*/
2174   if (reuse == MAT_INITIAL_MATRIX){
2175     ierr = PetscMalloc2(3*(nsends + nrecvs)+1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);CHKERRQ(ierr);
2176     s_waits2 = s_waits3 + nsends;
2177     s_waits1 = s_waits2 + nsends;
2178     r_waits1 = s_waits1 + nsends;
2179     r_waits2 = r_waits1 + nrecvs;
2180     r_waits3 = r_waits2 + nrecvs;
2181   } else {
2182     ierr = PetscMalloc2(nsends + nrecvs +1,MPI_Request,&s_waits3,nsends+1,MPI_Status,&send_status);CHKERRQ(ierr);
2183     r_waits3 = s_waits3 + nsends;
2184   }
2185 
2186   ierr = PetscObjectGetNewTag((PetscObject)mat,&tag3);CHKERRQ(ierr);
2187   if (reuse == MAT_INITIAL_MATRIX){
2188     /* get new tags to keep the communication clean */
2189     ierr = PetscObjectGetNewTag((PetscObject)mat,&tag1);CHKERRQ(ierr);
2190     ierr = PetscObjectGetNewTag((PetscObject)mat,&tag2);CHKERRQ(ierr);
2191     ierr = PetscMalloc3(nsends+nrecvs+1,PetscInt,&sbuf_nz,nrecvs,PetscInt*,&rbuf_j,nrecvs,PetscScalar*,&rbuf_a);CHKERRQ(ierr);
2192     rbuf_nz = sbuf_nz + nsends;
2193 
2194     /* post receives of other's nzlocal */
2195     for (i=0; i<nrecvs; i++){
2196       ierr = MPI_Irecv(rbuf_nz+i,1,MPIU_INT,MPI_ANY_SOURCE,tag1,comm,r_waits1+i);CHKERRQ(ierr);
2197     }
2198     /* send nzlocal to others */
2199     for (i=0; i<nsends; i++){
2200       sbuf_nz[i] = nzlocal;
2201       ierr = MPI_Isend(sbuf_nz+i,1,MPIU_INT,send_rank[i],tag1,comm,s_waits1+i);CHKERRQ(ierr);
2202     }
2203     /* wait on receives of nzlocal; allocate space for rbuf_j, rbuf_a */
2204     count = nrecvs;
2205     while (count) {
2206       ierr = MPI_Waitany(nrecvs,r_waits1,&imdex,&recv_status);CHKERRQ(ierr);
2207       recv_rank[imdex] = recv_status.MPI_SOURCE;
2208       /* allocate rbuf_a and rbuf_j; then post receives of rbuf_j */
2209       ierr = PetscMalloc((rbuf_nz[imdex]+1)*sizeof(PetscScalar),&rbuf_a[imdex]);CHKERRQ(ierr);
2210 
2211       i = rowrange[recv_status.MPI_SOURCE+1] - rowrange[recv_status.MPI_SOURCE]; /* number of expected mat->i */
2212       rbuf_nz[imdex] += i + 2;
2213       ierr = PetscMalloc(rbuf_nz[imdex]*sizeof(PetscInt),&rbuf_j[imdex]);CHKERRQ(ierr);
2214       ierr = MPI_Irecv(rbuf_j[imdex],rbuf_nz[imdex],MPIU_INT,recv_status.MPI_SOURCE,tag2,comm,r_waits2+imdex);CHKERRQ(ierr);
2215       count--;
2216     }
2217     /* wait on sends of nzlocal */
2218     if (nsends) {ierr = MPI_Waitall(nsends,s_waits1,send_status);CHKERRQ(ierr);}
2219     /* send mat->i,j to others, and recv from other's */
2220     /*------------------------------------------------*/
2221     for (i=0; i<nsends; i++){
2222       j = nzlocal + rowrange[rank+1] - rowrange[rank] + 1;
2223       ierr = MPI_Isend(sbuf_j,j,MPIU_INT,send_rank[i],tag2,comm,s_waits2+i);CHKERRQ(ierr);
2224     }
2225     /* wait on receives of mat->i,j */
2226     /*------------------------------*/
2227     count = nrecvs;
2228     while (count) {
2229       ierr = MPI_Waitany(nrecvs,r_waits2,&imdex,&recv_status);CHKERRQ(ierr);
2230       if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2231       count--;
2232     }
2233     /* wait on sends of mat->i,j */
2234     /*---------------------------*/
2235     if (nsends) {
2236       ierr = MPI_Waitall(nsends,s_waits2,send_status);CHKERRQ(ierr);
2237     }
2238   } /* endof if (reuse == MAT_INITIAL_MATRIX) */
2239 
2240   /* post receives, send and receive mat->a */
2241   /*----------------------------------------*/
2242   for (imdex=0; imdex<nrecvs; imdex++) {
2243     ierr = MPI_Irecv(rbuf_a[imdex],rbuf_nz[imdex],MPIU_SCALAR,recv_rank[imdex],tag3,comm,r_waits3+imdex);CHKERRQ(ierr);
2244   }
2245   for (i=0; i<nsends; i++){
2246     ierr = MPI_Isend(sbuf_a,nzlocal,MPIU_SCALAR,send_rank[i],tag3,comm,s_waits3+i);CHKERRQ(ierr);
2247   }
2248   count = nrecvs;
2249   while (count) {
2250     ierr = MPI_Waitany(nrecvs,r_waits3,&imdex,&recv_status);CHKERRQ(ierr);
2251     if (recv_rank[imdex] != recv_status.MPI_SOURCE) SETERRQ2(1, "recv_rank %d != MPI_SOURCE %d",recv_rank[imdex],recv_status.MPI_SOURCE);
2252     count--;
2253   }
2254   if (nsends) {
2255     ierr = MPI_Waitall(nsends,s_waits3,send_status);CHKERRQ(ierr);
2256   }
2257 
2258   ierr = PetscFree2(s_waits3,send_status);CHKERRQ(ierr);
2259 
2260   /* create redundant matrix */
2261   /*-------------------------*/
2262   if (reuse == MAT_INITIAL_MATRIX){
2263     /* compute rownz_max for preallocation */
2264     for (imdex=0; imdex<nrecvs; imdex++){
2265       j = rowrange[recv_rank[imdex]+1] - rowrange[recv_rank[imdex]];
2266       rptr = rbuf_j[imdex];
2267       for (i=0; i<j; i++){
2268         ncols = rptr[i+1] - rptr[i];
2269         if (rownz_max < ncols) rownz_max = ncols;
2270       }
2271     }
2272 
2273     ierr = MatCreate(subcomm,&C);CHKERRQ(ierr);
2274     ierr = MatSetSizes(C,mlocal_sub,mlocal_sub,PETSC_DECIDE,PETSC_DECIDE);CHKERRQ(ierr);
2275     ierr = MatSetFromOptions(C);CHKERRQ(ierr);
2276     ierr = MatSeqAIJSetPreallocation(C,rownz_max,PETSC_NULL);CHKERRQ(ierr);
2277     ierr = MatMPIAIJSetPreallocation(C,rownz_max,PETSC_NULL,rownz_max,PETSC_NULL);CHKERRQ(ierr);
2278   } else {
2279     C = *matredundant;
2280   }
2281 
2282   /* insert local matrix entries */
2283   rptr = sbuf_j;
2284   cols = sbuf_j + rend-rstart + 1;
2285   vals = sbuf_a;
2286   for (i=0; i<rend-rstart; i++){
2287     row   = i + rstart;
2288     ncols = rptr[i+1] - rptr[i];
2289     ierr = MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
2290     vals += ncols;
2291     cols += ncols;
2292   }
2293   /* insert received matrix entries */
2294   for (imdex=0; imdex<nrecvs; imdex++){
2295     rstart = rowrange[recv_rank[imdex]];
2296     rend   = rowrange[recv_rank[imdex]+1];
2297     rptr = rbuf_j[imdex];
2298     cols = rbuf_j[imdex] + rend-rstart + 1;
2299     vals = rbuf_a[imdex];
2300     for (i=0; i<rend-rstart; i++){
2301       row   = i + rstart;
2302       ncols = rptr[i+1] - rptr[i];
2303       ierr = MatSetValues(C,1,&row,ncols,cols,vals,INSERT_VALUES);CHKERRQ(ierr);
2304       vals += ncols;
2305       cols += ncols;
2306     }
2307   }
2308   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2309   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2310   ierr = MatGetSize(C,&M,&N);CHKERRQ(ierr);
2311   if (M != mat->rmap->N || N != mat->cmap->N) SETERRQ2(PETSC_ERR_ARG_INCOMP,"redundant mat size %d != input mat size %d",M,mat->rmap->N);
2312   if (reuse == MAT_INITIAL_MATRIX){
2313     PetscContainer container;
2314     *matredundant = C;
2315     /* create a supporting struct and attach it to C for reuse */
2316     ierr = PetscNewLog(C,Mat_Redundant,&redund);CHKERRQ(ierr);
2317     ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
2318     ierr = PetscContainerSetPointer(container,redund);CHKERRQ(ierr);
2319     ierr = PetscObjectCompose((PetscObject)C,"Mat_Redundant",(PetscObject)container);CHKERRQ(ierr);
2320     ierr = PetscContainerSetUserDestroy(container,PetscContainerDestroy_MatRedundant);CHKERRQ(ierr);
2321 
2322     redund->nzlocal = nzlocal;
2323     redund->nsends  = nsends;
2324     redund->nrecvs  = nrecvs;
2325     redund->send_rank = send_rank;
2326     redund->sbuf_nz = sbuf_nz;
2327     redund->sbuf_j  = sbuf_j;
2328     redund->sbuf_a  = sbuf_a;
2329     redund->rbuf_j  = rbuf_j;
2330     redund->rbuf_a  = rbuf_a;
2331 
2332     redund->MatDestroy = C->ops->destroy;
2333     C->ops->destroy    = MatDestroy_MatRedundant;
2334   }
2335   PetscFunctionReturn(0);
2336 }
2337 
2338 #undef __FUNCT__
2339 #define __FUNCT__ "MatGetRowMaxAbs_MPIAIJ"
2340 PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2341 {
2342   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2343   PetscErrorCode ierr;
2344   PetscInt       i,*idxb = 0;
2345   PetscScalar    *va,*vb;
2346   Vec            vtmp;
2347 
2348   PetscFunctionBegin;
2349   ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr);
2350   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2351   if (idx) {
2352     for (i=0; i<A->rmap->n; i++) {
2353       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2354     }
2355   }
2356 
2357   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2358   if (idx) {
2359     ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);
2360   }
2361   ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2362   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2363 
2364   for (i=0; i<A->rmap->n; i++){
2365     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2366       va[i] = vb[i];
2367       if (idx) idx[i] = a->garray[idxb[i]];
2368     }
2369   }
2370 
2371   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2372   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2373   if (idxb) {
2374     ierr = PetscFree(idxb);CHKERRQ(ierr);
2375   }
2376   ierr = VecDestroy(vtmp);CHKERRQ(ierr);
2377   PetscFunctionReturn(0);
2378 }
2379 
2380 #undef __FUNCT__
2381 #define __FUNCT__ "MatGetRowMinAbs_MPIAIJ"
2382 PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2383 {
2384   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2385   PetscErrorCode ierr;
2386   PetscInt       i,*idxb = 0;
2387   PetscScalar    *va,*vb;
2388   Vec            vtmp;
2389 
2390   PetscFunctionBegin;
2391   ierr = MatGetRowMinAbs(a->A,v,idx);CHKERRQ(ierr);
2392   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2393   if (idx) {
2394     for (i=0; i<A->cmap->n; i++) {
2395       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2396     }
2397   }
2398 
2399   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2400   if (idx) {
2401     ierr = PetscMalloc(A->rmap->n*sizeof(PetscInt),&idxb);CHKERRQ(ierr);
2402   }
2403   ierr = MatGetRowMinAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2404   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2405 
2406   for (i=0; i<A->rmap->n; i++){
2407     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2408       va[i] = vb[i];
2409       if (idx) idx[i] = a->garray[idxb[i]];
2410     }
2411   }
2412 
2413   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2414   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2415   if (idxb) {
2416     ierr = PetscFree(idxb);CHKERRQ(ierr);
2417   }
2418   ierr = VecDestroy(vtmp);CHKERRQ(ierr);
2419   PetscFunctionReturn(0);
2420 }
2421 
2422 #undef __FUNCT__
2423 #define __FUNCT__ "MatGetRowMin_MPIAIJ"
2424 PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2425 {
2426   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
2427   PetscInt       n      = A->rmap->n;
2428   PetscInt       cstart = A->cmap->rstart;
2429   PetscInt      *cmap   = mat->garray;
2430   PetscInt      *diagIdx, *offdiagIdx;
2431   Vec            diagV, offdiagV;
2432   PetscScalar   *a, *diagA, *offdiagA;
2433   PetscInt       r;
2434   PetscErrorCode ierr;
2435 
2436   PetscFunctionBegin;
2437   ierr = PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);CHKERRQ(ierr);
2438   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &diagV);CHKERRQ(ierr);
2439   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);CHKERRQ(ierr);
2440   ierr = MatGetRowMin(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2441   ierr = MatGetRowMin(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2442   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2443   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2444   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2445   for(r = 0; r < n; ++r) {
2446     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2447       a[r]   = diagA[r];
2448       idx[r] = cstart + diagIdx[r];
2449     } else {
2450       a[r]   = offdiagA[r];
2451       idx[r] = cmap[offdiagIdx[r]];
2452     }
2453   }
2454   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2455   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2456   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2457   ierr = VecDestroy(diagV);CHKERRQ(ierr);
2458   ierr = VecDestroy(offdiagV);CHKERRQ(ierr);
2459   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2460   PetscFunctionReturn(0);
2461 }
2462 
2463 #undef __FUNCT__
2464 #define __FUNCT__ "MatGetRowMax_MPIAIJ"
2465 PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2466 {
2467   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
2468   PetscInt       n      = A->rmap->n;
2469   PetscInt       cstart = A->cmap->rstart;
2470   PetscInt      *cmap   = mat->garray;
2471   PetscInt      *diagIdx, *offdiagIdx;
2472   Vec            diagV, offdiagV;
2473   PetscScalar   *a, *diagA, *offdiagA;
2474   PetscInt       r;
2475   PetscErrorCode ierr;
2476 
2477   PetscFunctionBegin;
2478   ierr = PetscMalloc2(n,PetscInt,&diagIdx,n,PetscInt,&offdiagIdx);CHKERRQ(ierr);
2479   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &diagV);CHKERRQ(ierr);
2480   ierr = VecCreateSeq(((PetscObject)A)->comm, n, &offdiagV);CHKERRQ(ierr);
2481   ierr = MatGetRowMax(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2482   ierr = MatGetRowMax(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2483   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2484   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2485   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2486   for(r = 0; r < n; ++r) {
2487     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2488       a[r]   = diagA[r];
2489       idx[r] = cstart + diagIdx[r];
2490     } else {
2491       a[r]   = offdiagA[r];
2492       idx[r] = cmap[offdiagIdx[r]];
2493     }
2494   }
2495   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2496   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2497   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2498   ierr = VecDestroy(diagV);CHKERRQ(ierr);
2499   ierr = VecDestroy(offdiagV);CHKERRQ(ierr);
2500   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2501   PetscFunctionReturn(0);
2502 }
2503 
2504 #undef __FUNCT__
2505 #define __FUNCT__ "MatGetSeqNonzerostructure_MPIAIJ"
2506 PetscErrorCode MatGetSeqNonzerostructure_MPIAIJ(Mat mat,Mat *newmat[])
2507 {
2508   PetscErrorCode ierr;
2509 
2510   PetscFunctionBegin;
2511   ierr = MatGetSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,newmat);CHKERRQ(ierr);
2512   PetscFunctionReturn(0);
2513 }
2514 
2515 /* -------------------------------------------------------------------*/
2516 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2517        MatGetRow_MPIAIJ,
2518        MatRestoreRow_MPIAIJ,
2519        MatMult_MPIAIJ,
2520 /* 4*/ MatMultAdd_MPIAIJ,
2521        MatMultTranspose_MPIAIJ,
2522        MatMultTransposeAdd_MPIAIJ,
2523 #ifdef PETSC_HAVE_PBGL
2524        MatSolve_MPIAIJ,
2525 #else
2526        0,
2527 #endif
2528        0,
2529        0,
2530 /*10*/ 0,
2531        0,
2532        0,
2533        MatRelax_MPIAIJ,
2534        MatTranspose_MPIAIJ,
2535 /*15*/ MatGetInfo_MPIAIJ,
2536        MatEqual_MPIAIJ,
2537        MatGetDiagonal_MPIAIJ,
2538        MatDiagonalScale_MPIAIJ,
2539        MatNorm_MPIAIJ,
2540 /*20*/ MatAssemblyBegin_MPIAIJ,
2541        MatAssemblyEnd_MPIAIJ,
2542        0,
2543        MatSetOption_MPIAIJ,
2544        MatZeroEntries_MPIAIJ,
2545 /*25*/ MatZeroRows_MPIAIJ,
2546        0,
2547 #ifdef PETSC_HAVE_PBGL
2548        0,
2549 #else
2550        0,
2551 #endif
2552        0,
2553        0,
2554 /*30*/ MatSetUpPreallocation_MPIAIJ,
2555 #ifdef PETSC_HAVE_PBGL
2556        0,
2557 #else
2558        0,
2559 #endif
2560        0,
2561        0,
2562        0,
2563 /*35*/ MatDuplicate_MPIAIJ,
2564        0,
2565        0,
2566        0,
2567        0,
2568 /*40*/ MatAXPY_MPIAIJ,
2569        MatGetSubMatrices_MPIAIJ,
2570        MatIncreaseOverlap_MPIAIJ,
2571        MatGetValues_MPIAIJ,
2572        MatCopy_MPIAIJ,
2573 /*45*/ MatGetRowMax_MPIAIJ,
2574        MatScale_MPIAIJ,
2575        0,
2576        0,
2577        0,
2578 /*50*/ MatSetBlockSize_MPIAIJ,
2579        0,
2580        0,
2581        0,
2582        0,
2583 /*55*/ MatFDColoringCreate_MPIAIJ,
2584        0,
2585        MatSetUnfactored_MPIAIJ,
2586        MatPermute_MPIAIJ,
2587        0,
2588 /*60*/ MatGetSubMatrix_MPIAIJ,
2589        MatDestroy_MPIAIJ,
2590        MatView_MPIAIJ,
2591        0,
2592        0,
2593 /*65*/ 0,
2594        0,
2595        0,
2596        0,
2597        0,
2598 /*70*/ MatGetRowMaxAbs_MPIAIJ,
2599        MatGetRowMinAbs_MPIAIJ,
2600        0,
2601        MatSetColoring_MPIAIJ,
2602 #if defined(PETSC_HAVE_ADIC)
2603        MatSetValuesAdic_MPIAIJ,
2604 #else
2605        0,
2606 #endif
2607        MatSetValuesAdifor_MPIAIJ,
2608 /*75*/ 0,
2609        0,
2610        0,
2611        0,
2612        0,
2613 /*80*/ 0,
2614        0,
2615        0,
2616 /*84*/ MatLoad_MPIAIJ,
2617        0,
2618        0,
2619        0,
2620        0,
2621        0,
2622 /*90*/ MatMatMult_MPIAIJ_MPIAIJ,
2623        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2624        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2625        MatPtAP_Basic,
2626        MatPtAPSymbolic_MPIAIJ,
2627 /*95*/ MatPtAPNumeric_MPIAIJ,
2628        0,
2629        0,
2630        0,
2631        0,
2632 /*100*/0,
2633        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2634        MatPtAPNumeric_MPIAIJ_MPIAIJ,
2635        MatConjugate_MPIAIJ,
2636        0,
2637 /*105*/MatSetValuesRow_MPIAIJ,
2638        MatRealPart_MPIAIJ,
2639        MatImaginaryPart_MPIAIJ,
2640        0,
2641        0,
2642 /*110*/0,
2643        MatGetRedundantMatrix_MPIAIJ,
2644        MatGetRowMin_MPIAIJ,
2645        0,
2646        0,
2647 /*115*/MatGetSeqNonzerostructure_MPIAIJ};
2648 
2649 /* ----------------------------------------------------------------------------------------*/
2650 
2651 EXTERN_C_BEGIN
2652 #undef __FUNCT__
2653 #define __FUNCT__ "MatStoreValues_MPIAIJ"
2654 PetscErrorCode PETSCMAT_DLLEXPORT MatStoreValues_MPIAIJ(Mat mat)
2655 {
2656   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
2657   PetscErrorCode ierr;
2658 
2659   PetscFunctionBegin;
2660   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
2661   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
2662   PetscFunctionReturn(0);
2663 }
2664 EXTERN_C_END
2665 
2666 EXTERN_C_BEGIN
2667 #undef __FUNCT__
2668 #define __FUNCT__ "MatRetrieveValues_MPIAIJ"
2669 PetscErrorCode PETSCMAT_DLLEXPORT MatRetrieveValues_MPIAIJ(Mat mat)
2670 {
2671   Mat_MPIAIJ     *aij = (Mat_MPIAIJ *)mat->data;
2672   PetscErrorCode ierr;
2673 
2674   PetscFunctionBegin;
2675   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
2676   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
2677   PetscFunctionReturn(0);
2678 }
2679 EXTERN_C_END
2680 
2681 #include "petscpc.h"
2682 EXTERN_C_BEGIN
2683 #undef __FUNCT__
2684 #define __FUNCT__ "MatMPIAIJSetPreallocation_MPIAIJ"
2685 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2686 {
2687   Mat_MPIAIJ     *b;
2688   PetscErrorCode ierr;
2689   PetscInt       i;
2690 
2691   PetscFunctionBegin;
2692   if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
2693   if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
2694   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %D",d_nz);
2695   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %D",o_nz);
2696 
2697   ierr = PetscMapSetBlockSize(B->rmap,1);CHKERRQ(ierr);
2698   ierr = PetscMapSetBlockSize(B->cmap,1);CHKERRQ(ierr);
2699   ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr);
2700   ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr);
2701   if (d_nnz) {
2702     for (i=0; i<B->rmap->n; i++) {
2703       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than 0: local row %D value %D",i,d_nnz[i]);
2704     }
2705   }
2706   if (o_nnz) {
2707     for (i=0; i<B->rmap->n; i++) {
2708       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than 0: local row %D value %D",i,o_nnz[i]);
2709     }
2710   }
2711   b = (Mat_MPIAIJ*)B->data;
2712 
2713   if (!B->preallocated) {
2714     /* Explicitly create 2 MATSEQAIJ matrices. */
2715     ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
2716     ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
2717     ierr = MatSetType(b->A,MATSEQAIJ);CHKERRQ(ierr);
2718     ierr = PetscLogObjectParent(B,b->A);CHKERRQ(ierr);
2719     ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
2720     ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
2721     ierr = MatSetType(b->B,MATSEQAIJ);CHKERRQ(ierr);
2722     ierr = PetscLogObjectParent(B,b->B);CHKERRQ(ierr);
2723   }
2724 
2725   ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
2726   ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
2727   B->preallocated = PETSC_TRUE;
2728   PetscFunctionReturn(0);
2729 }
2730 EXTERN_C_END
2731 
2732 #undef __FUNCT__
2733 #define __FUNCT__ "MatDuplicate_MPIAIJ"
2734 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2735 {
2736   Mat            mat;
2737   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2738   PetscErrorCode ierr;
2739 
2740   PetscFunctionBegin;
2741   *newmat       = 0;
2742   ierr = MatCreate(((PetscObject)matin)->comm,&mat);CHKERRQ(ierr);
2743   ierr = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr);
2744   ierr = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr);
2745   ierr = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
2746   a    = (Mat_MPIAIJ*)mat->data;
2747 
2748   mat->factor       = matin->factor;
2749   mat->rmap->bs      = matin->rmap->bs;
2750   mat->assembled    = PETSC_TRUE;
2751   mat->insertmode   = NOT_SET_VALUES;
2752   mat->preallocated = PETSC_TRUE;
2753 
2754   a->size           = oldmat->size;
2755   a->rank           = oldmat->rank;
2756   a->donotstash     = oldmat->donotstash;
2757   a->roworiented    = oldmat->roworiented;
2758   a->rowindices     = 0;
2759   a->rowvalues      = 0;
2760   a->getrowactive   = PETSC_FALSE;
2761 
2762   ierr = PetscMapCopy(((PetscObject)mat)->comm,matin->rmap,mat->rmap);CHKERRQ(ierr);
2763   ierr = PetscMapCopy(((PetscObject)mat)->comm,matin->cmap,mat->cmap);CHKERRQ(ierr);
2764 
2765   ierr = MatStashCreate_Private(((PetscObject)matin)->comm,1,&mat->stash);CHKERRQ(ierr);
2766   if (oldmat->colmap) {
2767 #if defined (PETSC_USE_CTABLE)
2768     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
2769 #else
2770     ierr = PetscMalloc((mat->cmap->N)*sizeof(PetscInt),&a->colmap);CHKERRQ(ierr);
2771     ierr = PetscLogObjectMemory(mat,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2772     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2773 #endif
2774   } else a->colmap = 0;
2775   if (oldmat->garray) {
2776     PetscInt len;
2777     len  = oldmat->B->cmap->n;
2778     ierr = PetscMalloc((len+1)*sizeof(PetscInt),&a->garray);CHKERRQ(ierr);
2779     ierr = PetscLogObjectMemory(mat,len*sizeof(PetscInt));CHKERRQ(ierr);
2780     if (len) { ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); }
2781   } else a->garray = 0;
2782 
2783   ierr = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
2784   ierr = PetscLogObjectParent(mat,a->lvec);CHKERRQ(ierr);
2785   ierr = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
2786   ierr = PetscLogObjectParent(mat,a->Mvctx);CHKERRQ(ierr);
2787   ierr = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
2788   ierr = PetscLogObjectParent(mat,a->A);CHKERRQ(ierr);
2789   ierr = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
2790   ierr = PetscLogObjectParent(mat,a->B);CHKERRQ(ierr);
2791   ierr = PetscFListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr);
2792   *newmat = mat;
2793   PetscFunctionReturn(0);
2794 }
2795 
2796 #include "petscsys.h"
2797 
2798 #undef __FUNCT__
2799 #define __FUNCT__ "MatLoad_MPIAIJ"
2800 PetscErrorCode MatLoad_MPIAIJ(PetscViewer viewer, const MatType type,Mat *newmat)
2801 {
2802   Mat            A;
2803   PetscScalar    *vals,*svals;
2804   MPI_Comm       comm = ((PetscObject)viewer)->comm;
2805   MPI_Status     status;
2806   PetscErrorCode ierr;
2807   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag,mpicnt,mpimaxnz;
2808   PetscInt       i,nz,j,rstart,rend,mmax,maxnz;
2809   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2810   PetscInt       *ourlens = PETSC_NULL,*procsnz = PETSC_NULL,*offlens = PETSC_NULL,jj,*mycols,*smycols;
2811   PetscInt       cend,cstart,n,*rowners;
2812   int            fd;
2813 
2814   PetscFunctionBegin;
2815   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2816   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2817   if (!rank) {
2818     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2819     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
2820     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2821   }
2822 
2823   ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr);
2824   M = header[1]; N = header[2];
2825   /* determine ownership of all rows */
2826   m    = M/size + ((M % size) > rank);
2827   ierr = PetscMalloc((size+1)*sizeof(PetscInt),&rowners);CHKERRQ(ierr);
2828   ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
2829 
2830   /* First process needs enough room for process with most rows */
2831   if (!rank) {
2832     mmax       = rowners[1];
2833     for (i=2; i<size; i++) {
2834       mmax = PetscMax(mmax,rowners[i]);
2835     }
2836   } else mmax = m;
2837 
2838   rowners[0] = 0;
2839   for (i=2; i<=size; i++) {
2840     rowners[i] += rowners[i-1];
2841   }
2842   rstart = rowners[rank];
2843   rend   = rowners[rank+1];
2844 
2845   /* distribute row lengths to all processors */
2846   ierr    = PetscMalloc2(mmax,PetscInt,&ourlens,mmax,PetscInt,&offlens);CHKERRQ(ierr);
2847   if (!rank) {
2848     ierr = PetscBinaryRead(fd,ourlens,m,PETSC_INT);CHKERRQ(ierr);
2849     ierr = PetscMalloc(m*sizeof(PetscInt),&rowlengths);CHKERRQ(ierr);
2850     ierr = PetscMalloc(size*sizeof(PetscInt),&procsnz);CHKERRQ(ierr);
2851     ierr = PetscMemzero(procsnz,size*sizeof(PetscInt));CHKERRQ(ierr);
2852     for (j=0; j<m; j++) {
2853       procsnz[0] += ourlens[j];
2854     }
2855     for (i=1; i<size; i++) {
2856       ierr = PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);CHKERRQ(ierr);
2857       /* calculate the number of nonzeros on each processor */
2858       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2859         procsnz[i] += rowlengths[j];
2860       }
2861       mpicnt = PetscMPIIntCast(rowners[i+1]-rowners[i]);
2862       ierr   = MPI_Send(rowlengths,mpicnt,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2863     }
2864     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2865   } else {
2866     mpicnt = PetscMPIIntCast(m);CHKERRQ(ierr);
2867     ierr   = MPI_Recv(ourlens,mpicnt,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
2868   }
2869 
2870   if (!rank) {
2871     /* determine max buffer needed and allocate it */
2872     maxnz = 0;
2873     for (i=0; i<size; i++) {
2874       maxnz = PetscMax(maxnz,procsnz[i]);
2875     }
2876     ierr = PetscMalloc(maxnz*sizeof(PetscInt),&cols);CHKERRQ(ierr);
2877 
2878     /* read in my part of the matrix column indices  */
2879     nz   = procsnz[0];
2880     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
2881     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2882 
2883     /* read in every one elses and ship off */
2884     for (i=1; i<size; i++) {
2885       nz     = procsnz[i];
2886       ierr   = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2887       mpicnt = PetscMPIIntCast(nz);
2888       ierr   = MPI_Send(cols,mpicnt,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2889     }
2890     ierr = PetscFree(cols);CHKERRQ(ierr);
2891   } else {
2892     /* determine buffer space needed for message */
2893     nz = 0;
2894     for (i=0; i<m; i++) {
2895       nz += ourlens[i];
2896     }
2897     ierr = PetscMalloc(nz*sizeof(PetscInt),&mycols);CHKERRQ(ierr);
2898 
2899     /* receive message of column indices*/
2900     mpicnt = PetscMPIIntCast(nz);CHKERRQ(ierr);
2901     ierr = MPI_Recv(mycols,mpicnt,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
2902     ierr = MPI_Get_count(&status,MPIU_INT,&mpimaxnz);CHKERRQ(ierr);
2903     if (mpimaxnz == MPI_UNDEFINED) {SETERRQ1(PETSC_ERR_LIB,"MPI_Get_count() returned MPI_UNDEFINED, expected %d",mpicnt);}
2904     else if (mpimaxnz < 0) {SETERRQ2(PETSC_ERR_LIB,"MPI_Get_count() returned impossible negative value %d, expected %d",mpimaxnz,mpicnt);}
2905     else if (mpimaxnz != mpicnt) {SETERRQ2(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file: expected %d received %d",mpicnt,mpimaxnz);}
2906   }
2907 
2908   /* determine column ownership if matrix is not square */
2909   if (N != M) {
2910     n      = N/size + ((N % size) > rank);
2911     ierr   = MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
2912     cstart = cend - n;
2913   } else {
2914     cstart = rstart;
2915     cend   = rend;
2916     n      = cend - cstart;
2917   }
2918 
2919   /* loop over local rows, determining number of off diagonal entries */
2920   ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr);
2921   jj = 0;
2922   for (i=0; i<m; i++) {
2923     for (j=0; j<ourlens[i]; j++) {
2924       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
2925       jj++;
2926     }
2927   }
2928 
2929   /* create our matrix */
2930   for (i=0; i<m; i++) {
2931     ourlens[i] -= offlens[i];
2932   }
2933   ierr = MatCreate(comm,&A);CHKERRQ(ierr);
2934   ierr = MatSetSizes(A,m,n,M,N);CHKERRQ(ierr);
2935   ierr = MatSetType(A,type);CHKERRQ(ierr);
2936   ierr = MatMPIAIJSetPreallocation(A,0,ourlens,0,offlens);CHKERRQ(ierr);
2937 
2938   for (i=0; i<m; i++) {
2939     ourlens[i] += offlens[i];
2940   }
2941 
2942   if (!rank) {
2943     ierr = PetscMalloc((maxnz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
2944 
2945     /* read in my part of the matrix numerical values  */
2946     nz   = procsnz[0];
2947     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2948 
2949     /* insert into matrix */
2950     jj      = rstart;
2951     smycols = mycols;
2952     svals   = vals;
2953     for (i=0; i<m; i++) {
2954       ierr = MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2955       smycols += ourlens[i];
2956       svals   += ourlens[i];
2957       jj++;
2958     }
2959 
2960     /* read in other processors and ship out */
2961     for (i=1; i<size; i++) {
2962       nz     = procsnz[i];
2963       ierr   = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2964       mpicnt = PetscMPIIntCast(nz);
2965       ierr   = MPI_Send(vals,mpicnt,MPIU_SCALAR,i,((PetscObject)A)->tag,comm);CHKERRQ(ierr);
2966     }
2967     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2968   } else {
2969     /* receive numeric values */
2970     ierr = PetscMalloc((nz+1)*sizeof(PetscScalar),&vals);CHKERRQ(ierr);
2971 
2972     /* receive message of values*/
2973     mpicnt = PetscMPIIntCast(nz);
2974     ierr   = MPI_Recv(vals,mpicnt,MPIU_SCALAR,0,((PetscObject)A)->tag,comm,&status);CHKERRQ(ierr);
2975     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&mpimaxnz);CHKERRQ(ierr);
2976     if (mpimaxnz == MPI_UNDEFINED) {SETERRQ1(PETSC_ERR_LIB,"MPI_Get_count() returned MPI_UNDEFINED, expected %d",mpicnt);}
2977     else if (mpimaxnz < 0) {SETERRQ2(PETSC_ERR_LIB,"MPI_Get_count() returned impossible negative value %d, expected %d",mpimaxnz,mpicnt);}
2978     else if (mpimaxnz != mpicnt) {SETERRQ2(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file: expected %d received %d",mpicnt,mpimaxnz);}
2979 
2980     /* insert into matrix */
2981     jj      = rstart;
2982     smycols = mycols;
2983     svals   = vals;
2984     for (i=0; i<m; i++) {
2985       ierr     = MatSetValues_MPIAIJ(A,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2986       smycols += ourlens[i];
2987       svals   += ourlens[i];
2988       jj++;
2989     }
2990   }
2991   ierr = PetscFree2(ourlens,offlens);CHKERRQ(ierr);
2992   ierr = PetscFree(vals);CHKERRQ(ierr);
2993   ierr = PetscFree(mycols);CHKERRQ(ierr);
2994   ierr = PetscFree(rowners);CHKERRQ(ierr);
2995 
2996   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2997   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2998   *newmat = A;
2999   PetscFunctionReturn(0);
3000 }
3001 
3002 #undef __FUNCT__
3003 #define __FUNCT__ "MatGetSubMatrix_MPIAIJ"
3004 /*
3005     Not great since it makes two copies of the submatrix, first an SeqAIJ
3006   in local and then by concatenating the local matrices the end result.
3007   Writing it directly would be much like MatGetSubMatrices_MPIAIJ()
3008 */
3009 PetscErrorCode MatGetSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3010 {
3011   PetscErrorCode ierr;
3012   PetscMPIInt    rank,size;
3013   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j;
3014   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3015   Mat            *local,M,Mreuse;
3016   MatScalar      *vwork,*aa;
3017   MPI_Comm       comm = ((PetscObject)mat)->comm;
3018   Mat_SeqAIJ     *aij;
3019 
3020 
3021   PetscFunctionBegin;
3022   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3023   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3024 
3025   if (call ==  MAT_REUSE_MATRIX) {
3026     ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject *)&Mreuse);CHKERRQ(ierr);
3027     if (!Mreuse) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3028     local = &Mreuse;
3029     ierr  = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&local);CHKERRQ(ierr);
3030   } else {
3031     ierr   = MatGetSubMatrices(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&local);CHKERRQ(ierr);
3032     Mreuse = *local;
3033     ierr   = PetscFree(local);CHKERRQ(ierr);
3034   }
3035 
3036   /*
3037       m - number of local rows
3038       n - number of columns (same on all processors)
3039       rstart - first row in new global matrix generated
3040   */
3041   ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr);
3042   if (call == MAT_INITIAL_MATRIX) {
3043     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3044     ii  = aij->i;
3045     jj  = aij->j;
3046 
3047     /*
3048         Determine the number of non-zeros in the diagonal and off-diagonal
3049         portions of the matrix in order to do correct preallocation
3050     */
3051 
3052     /* first get start and end of "diagonal" columns */
3053     if (csize == PETSC_DECIDE) {
3054       ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr);
3055       if (mglobal == n) { /* square matrix */
3056 	nlocal = m;
3057       } else {
3058         nlocal = n/size + ((n % size) > rank);
3059       }
3060     } else {
3061       nlocal = csize;
3062     }
3063     ierr   = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3064     rstart = rend - nlocal;
3065     if (rank == size - 1 && rend != n) {
3066       SETERRQ2(PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3067     }
3068 
3069     /* next, compute all the lengths */
3070     ierr  = PetscMalloc((2*m+1)*sizeof(PetscInt),&dlens);CHKERRQ(ierr);
3071     olens = dlens + m;
3072     for (i=0; i<m; i++) {
3073       jend = ii[i+1] - ii[i];
3074       olen = 0;
3075       dlen = 0;
3076       for (j=0; j<jend; j++) {
3077         if (*jj < rstart || *jj >= rend) olen++;
3078         else dlen++;
3079         jj++;
3080       }
3081       olens[i] = olen;
3082       dlens[i] = dlen;
3083     }
3084     ierr = MatCreate(comm,&M);CHKERRQ(ierr);
3085     ierr = MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);CHKERRQ(ierr);
3086     ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr);
3087     ierr = MatMPIAIJSetPreallocation(M,0,dlens,0,olens);CHKERRQ(ierr);
3088     ierr = PetscFree(dlens);CHKERRQ(ierr);
3089   } else {
3090     PetscInt ml,nl;
3091 
3092     M = *newmat;
3093     ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr);
3094     if (ml != m) SETERRQ(PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3095     ierr = MatZeroEntries(M);CHKERRQ(ierr);
3096     /*
3097          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3098        rather than the slower MatSetValues().
3099     */
3100     M->was_assembled = PETSC_TRUE;
3101     M->assembled     = PETSC_FALSE;
3102   }
3103   ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr);
3104   aij = (Mat_SeqAIJ*)(Mreuse)->data;
3105   ii  = aij->i;
3106   jj  = aij->j;
3107   aa  = aij->a;
3108   for (i=0; i<m; i++) {
3109     row   = rstart + i;
3110     nz    = ii[i+1] - ii[i];
3111     cwork = jj;     jj += nz;
3112     vwork = aa;     aa += nz;
3113     ierr = MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr);
3114   }
3115 
3116   ierr = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3117   ierr = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3118   *newmat = M;
3119 
3120   /* save submatrix used in processor for next request */
3121   if (call ==  MAT_INITIAL_MATRIX) {
3122     ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr);
3123     ierr = PetscObjectDereference((PetscObject)Mreuse);CHKERRQ(ierr);
3124   }
3125 
3126   PetscFunctionReturn(0);
3127 }
3128 
3129 EXTERN_C_BEGIN
3130 #undef __FUNCT__
3131 #define __FUNCT__ "MatMPIAIJSetPreallocationCSR_MPIAIJ"
3132 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3133 {
3134   PetscInt       m,cstart, cend,j,nnz,i,d;
3135   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3136   const PetscInt *JJ;
3137   PetscScalar    *values;
3138   PetscErrorCode ierr;
3139 
3140   PetscFunctionBegin;
3141   if (Ii[0]) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3142 
3143   ierr = PetscMapSetBlockSize(B->rmap,1);CHKERRQ(ierr);
3144   ierr = PetscMapSetBlockSize(B->cmap,1);CHKERRQ(ierr);
3145   ierr = PetscMapSetUp(B->rmap);CHKERRQ(ierr);
3146   ierr = PetscMapSetUp(B->cmap);CHKERRQ(ierr);
3147   m      = B->rmap->n;
3148   cstart = B->cmap->rstart;
3149   cend   = B->cmap->rend;
3150   rstart = B->rmap->rstart;
3151 
3152   ierr  = PetscMalloc((2*m+1)*sizeof(PetscInt),&d_nnz);CHKERRQ(ierr);
3153   o_nnz = d_nnz + m;
3154 
3155 #if defined(PETSC_USE_DEBUGGING)
3156   for (i=0; i<m; i++) {
3157     nnz     = Ii[i+1]- Ii[i];
3158     JJ      = J + Ii[i];
3159     if (nnz < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3160     if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3161     if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3162     for (j=1; j<nnz; j++) {
3163       if (JJ[i] <= JJ[i-1]) SETERRRQ(PETSC_ERR_ARG_WRONGSTATE,"Row %D has unsorted column index at %D location in column indices",i,j);
3164     }
3165   }
3166 #endif
3167 
3168   for (i=0; i<m; i++) {
3169     nnz     = Ii[i+1]- Ii[i];
3170     JJ      = J + Ii[i];
3171     nnz_max = PetscMax(nnz_max,nnz);
3172     for (j=0; j<nnz; j++) {
3173       if (*JJ >= cstart) break;
3174       JJ++;
3175     }
3176     d = 0;
3177     for (; j<nnz; j++) {
3178       if (*JJ++ >= cend) break;
3179       d++;
3180     }
3181     d_nnz[i] = d;
3182     o_nnz[i] = nnz - d;
3183   }
3184   ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
3185   ierr = PetscFree(d_nnz);CHKERRQ(ierr);
3186 
3187   if (v) values = (PetscScalar*)v;
3188   else {
3189     ierr = PetscMalloc((nnz_max+1)*sizeof(PetscScalar),&values);CHKERRQ(ierr);
3190     ierr = PetscMemzero(values,nnz_max*sizeof(PetscScalar));CHKERRQ(ierr);
3191   }
3192 
3193   for (i=0; i<m; i++) {
3194     ii   = i + rstart;
3195     nnz  = Ii[i+1]- Ii[i];
3196     ierr = MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);CHKERRQ(ierr);
3197   }
3198   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3199   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3200 
3201   if (!v) {
3202     ierr = PetscFree(values);CHKERRQ(ierr);
3203   }
3204   PetscFunctionReturn(0);
3205 }
3206 EXTERN_C_END
3207 
3208 #undef __FUNCT__
3209 #define __FUNCT__ "MatMPIAIJSetPreallocationCSR"
3210 /*@
3211    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3212    (the default parallel PETSc format).
3213 
3214    Collective on MPI_Comm
3215 
3216    Input Parameters:
3217 +  B - the matrix
3218 .  i - the indices into j for the start of each local row (starts with zero)
3219 .  j - the column indices for each local row (starts with zero) these must be sorted for each row
3220 -  v - optional values in the matrix
3221 
3222    Level: developer
3223 
3224    Notes:
3225        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3226      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3227      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3228 
3229        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3230 
3231        The format which is used for the sparse matrix input, is equivalent to a
3232     row-major ordering.. i.e for the following matrix, the input data expected is
3233     as shown:
3234 
3235         1 0 0
3236         2 0 3     P0
3237        -------
3238         4 5 6     P1
3239 
3240      Process0 [P0]: rows_owned=[0,1]
3241         i =  {0,1,3}  [size = nrow+1  = 2+1]
3242         j =  {0,0,2}  [size = nz = 6]
3243         v =  {1,2,3}  [size = nz = 6]
3244 
3245      Process1 [P1]: rows_owned=[2]
3246         i =  {0,3}    [size = nrow+1  = 1+1]
3247         j =  {0,1,2}  [size = nz = 6]
3248         v =  {4,5,6}  [size = nz = 6]
3249 
3250       The column indices for each row MUST be sorted.
3251 
3252 .keywords: matrix, aij, compressed row, sparse, parallel
3253 
3254 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateMPIAIJ(), MPIAIJ,
3255           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3256 @*/
3257 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3258 {
3259   PetscErrorCode ierr,(*f)(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]);
3260 
3261   PetscFunctionBegin;
3262   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",(void (**)(void))&f);CHKERRQ(ierr);
3263   if (f) {
3264     ierr = (*f)(B,i,j,v);CHKERRQ(ierr);
3265   }
3266   PetscFunctionReturn(0);
3267 }
3268 
3269 #undef __FUNCT__
3270 #define __FUNCT__ "MatMPIAIJSetPreallocation"
3271 /*@C
3272    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3273    (the default parallel PETSc format).  For good matrix assembly performance
3274    the user should preallocate the matrix storage by setting the parameters
3275    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3276    performance can be increased by more than a factor of 50.
3277 
3278    Collective on MPI_Comm
3279 
3280    Input Parameters:
3281 +  A - the matrix
3282 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3283            (same value is used for all local rows)
3284 .  d_nnz - array containing the number of nonzeros in the various rows of the
3285            DIAGONAL portion of the local submatrix (possibly different for each row)
3286            or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3287            The size of this array is equal to the number of local rows, i.e 'm'.
3288            You must leave room for the diagonal entry even if it is zero.
3289 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3290            submatrix (same value is used for all local rows).
3291 -  o_nnz - array containing the number of nonzeros in the various rows of the
3292            OFF-DIAGONAL portion of the local submatrix (possibly different for
3293            each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3294            structure. The size of this array is equal to the number
3295            of local rows, i.e 'm'.
3296 
3297    If the *_nnz parameter is given then the *_nz parameter is ignored
3298 
3299    The AIJ format (also called the Yale sparse matrix format or
3300    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3301    storage.  The stored row and column indices begin with zero.  See the users manual for details.
3302 
3303    The parallel matrix is partitioned such that the first m0 rows belong to
3304    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3305    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3306 
3307    The DIAGONAL portion of the local submatrix of a processor can be defined
3308    as the submatrix which is obtained by extraction the part corresponding
3309    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
3310    first row that belongs to the processor, and r2 is the last row belonging
3311    to the this processor. This is a square mxm matrix. The remaining portion
3312    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
3313 
3314    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3315 
3316    You can call MatGetInfo() to get information on how effective the preallocation was;
3317    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3318    You can also run with the option -info and look for messages with the string
3319    malloc in them to see if additional memory allocation was needed.
3320 
3321    Example usage:
3322 
3323    Consider the following 8x8 matrix with 34 non-zero values, that is
3324    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3325    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3326    as follows:
3327 
3328 .vb
3329             1  2  0  |  0  3  0  |  0  4
3330     Proc0   0  5  6  |  7  0  0  |  8  0
3331             9  0 10  | 11  0  0  | 12  0
3332     -------------------------------------
3333            13  0 14  | 15 16 17  |  0  0
3334     Proc1   0 18  0  | 19 20 21  |  0  0
3335             0  0  0  | 22 23  0  | 24  0
3336     -------------------------------------
3337     Proc2  25 26 27  |  0  0 28  | 29  0
3338            30  0  0  | 31 32 33  |  0 34
3339 .ve
3340 
3341    This can be represented as a collection of submatrices as:
3342 
3343 .vb
3344       A B C
3345       D E F
3346       G H I
3347 .ve
3348 
3349    Where the submatrices A,B,C are owned by proc0, D,E,F are
3350    owned by proc1, G,H,I are owned by proc2.
3351 
3352    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3353    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3354    The 'M','N' parameters are 8,8, and have the same values on all procs.
3355 
3356    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3357    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3358    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3359    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3360    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3361    matrix, ans [DF] as another SeqAIJ matrix.
3362 
3363    When d_nz, o_nz parameters are specified, d_nz storage elements are
3364    allocated for every row of the local diagonal submatrix, and o_nz
3365    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3366    One way to choose d_nz and o_nz is to use the max nonzerors per local
3367    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3368    In this case, the values of d_nz,o_nz are:
3369 .vb
3370      proc0 : dnz = 2, o_nz = 2
3371      proc1 : dnz = 3, o_nz = 2
3372      proc2 : dnz = 1, o_nz = 4
3373 .ve
3374    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3375    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3376    for proc3. i.e we are using 12+15+10=37 storage locations to store
3377    34 values.
3378 
3379    When d_nnz, o_nnz parameters are specified, the storage is specified
3380    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3381    In the above case the values for d_nnz,o_nnz are:
3382 .vb
3383      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3384      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3385      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3386 .ve
3387    Here the space allocated is sum of all the above values i.e 34, and
3388    hence pre-allocation is perfect.
3389 
3390    Level: intermediate
3391 
3392 .keywords: matrix, aij, compressed row, sparse, parallel
3393 
3394 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateMPIAIJ(), MatMPIAIJSetPreallocationCSR(),
3395           MPIAIJ, MatGetInfo()
3396 @*/
3397 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3398 {
3399   PetscErrorCode ierr,(*f)(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]);
3400 
3401   PetscFunctionBegin;
3402   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
3403   if (f) {
3404     ierr = (*f)(B,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
3405   }
3406   PetscFunctionReturn(0);
3407 }
3408 
3409 #undef __FUNCT__
3410 #define __FUNCT__ "MatCreateMPIAIJWithArrays"
3411 /*@
3412      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3413          CSR format the local rows.
3414 
3415    Collective on MPI_Comm
3416 
3417    Input Parameters:
3418 +  comm - MPI communicator
3419 .  m - number of local rows (Cannot be PETSC_DECIDE)
3420 .  n - This value should be the same as the local size used in creating the
3421        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3422        calculated if N is given) For square matrices n is almost always m.
3423 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3424 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3425 .   i - row indices
3426 .   j - column indices
3427 -   a - matrix values
3428 
3429    Output Parameter:
3430 .   mat - the matrix
3431 
3432    Level: intermediate
3433 
3434    Notes:
3435        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3436      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3437      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3438 
3439        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3440 
3441        The format which is used for the sparse matrix input, is equivalent to a
3442     row-major ordering.. i.e for the following matrix, the input data expected is
3443     as shown:
3444 
3445         1 0 0
3446         2 0 3     P0
3447        -------
3448         4 5 6     P1
3449 
3450      Process0 [P0]: rows_owned=[0,1]
3451         i =  {0,1,3}  [size = nrow+1  = 2+1]
3452         j =  {0,0,2}  [size = nz = 6]
3453         v =  {1,2,3}  [size = nz = 6]
3454 
3455      Process1 [P1]: rows_owned=[2]
3456         i =  {0,3}    [size = nrow+1  = 1+1]
3457         j =  {0,1,2}  [size = nz = 6]
3458         v =  {4,5,6}  [size = nz = 6]
3459 
3460 .keywords: matrix, aij, compressed row, sparse, parallel
3461 
3462 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3463           MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithSplitArrays()
3464 @*/
3465 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
3466 {
3467   PetscErrorCode ierr;
3468 
3469  PetscFunctionBegin;
3470   if (i[0]) {
3471     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
3472   }
3473   if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
3474   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
3475   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
3476   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
3477   ierr = MatMPIAIJSetPreallocationCSR(*mat,i,j,a);CHKERRQ(ierr);
3478   PetscFunctionReturn(0);
3479 }
3480 
3481 #undef __FUNCT__
3482 #define __FUNCT__ "MatCreateMPIAIJ"
3483 /*@C
3484    MatCreateMPIAIJ - Creates a sparse parallel matrix in AIJ format
3485    (the default parallel PETSc format).  For good matrix assembly performance
3486    the user should preallocate the matrix storage by setting the parameters
3487    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3488    performance can be increased by more than a factor of 50.
3489 
3490    Collective on MPI_Comm
3491 
3492    Input Parameters:
3493 +  comm - MPI communicator
3494 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3495            This value should be the same as the local size used in creating the
3496            y vector for the matrix-vector product y = Ax.
3497 .  n - This value should be the same as the local size used in creating the
3498        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3499        calculated if N is given) For square matrices n is almost always m.
3500 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3501 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3502 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3503            (same value is used for all local rows)
3504 .  d_nnz - array containing the number of nonzeros in the various rows of the
3505            DIAGONAL portion of the local submatrix (possibly different for each row)
3506            or PETSC_NULL, if d_nz is used to specify the nonzero structure.
3507            The size of this array is equal to the number of local rows, i.e 'm'.
3508            You must leave room for the diagonal entry even if it is zero.
3509 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3510            submatrix (same value is used for all local rows).
3511 -  o_nnz - array containing the number of nonzeros in the various rows of the
3512            OFF-DIAGONAL portion of the local submatrix (possibly different for
3513            each row) or PETSC_NULL, if o_nz is used to specify the nonzero
3514            structure. The size of this array is equal to the number
3515            of local rows, i.e 'm'.
3516 
3517    Output Parameter:
3518 .  A - the matrix
3519 
3520    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3521    MatXXXXSetPreallocation() paradgm instead of this routine directly.
3522    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3523 
3524    Notes:
3525    If the *_nnz parameter is given then the *_nz parameter is ignored
3526 
3527    m,n,M,N parameters specify the size of the matrix, and its partitioning across
3528    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
3529    storage requirements for this matrix.
3530 
3531    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
3532    processor than it must be used on all processors that share the object for
3533    that argument.
3534 
3535    The user MUST specify either the local or global matrix dimensions
3536    (possibly both).
3537 
3538    The parallel matrix is partitioned across processors such that the
3539    first m0 rows belong to process 0, the next m1 rows belong to
3540    process 1, the next m2 rows belong to process 2 etc.. where
3541    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
3542    values corresponding to [m x N] submatrix.
3543 
3544    The columns are logically partitioned with the n0 columns belonging
3545    to 0th partition, the next n1 columns belonging to the next
3546    partition etc.. where n0,n1,n2... are the the input parameter 'n'.
3547 
3548    The DIAGONAL portion of the local submatrix on any given processor
3549    is the submatrix corresponding to the rows and columns m,n
3550    corresponding to the given processor. i.e diagonal matrix on
3551    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
3552    etc. The remaining portion of the local submatrix [m x (N-n)]
3553    constitute the OFF-DIAGONAL portion. The example below better
3554    illustrates this concept.
3555 
3556    For a square global matrix we define each processor's diagonal portion
3557    to be its local rows and the corresponding columns (a square submatrix);
3558    each processor's off-diagonal portion encompasses the remainder of the
3559    local matrix (a rectangular submatrix).
3560 
3561    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3562 
3563    When calling this routine with a single process communicator, a matrix of
3564    type SEQAIJ is returned.  If a matrix of type MPIAIJ is desired for this
3565    type of communicator, use the construction mechanism:
3566      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
3567 
3568    By default, this format uses inodes (identical nodes) when possible.
3569    We search for consecutive rows with the same nonzero structure, thereby
3570    reusing matrix information to achieve increased efficiency.
3571 
3572    Options Database Keys:
3573 +  -mat_no_inode  - Do not use inodes
3574 .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
3575 -  -mat_aij_oneindex - Internally use indexing starting at 1
3576         rather than 0.  Note that when calling MatSetValues(),
3577         the user still MUST index entries starting at 0!
3578 
3579 
3580    Example usage:
3581 
3582    Consider the following 8x8 matrix with 34 non-zero values, that is
3583    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3584    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3585    as follows:
3586 
3587 .vb
3588             1  2  0  |  0  3  0  |  0  4
3589     Proc0   0  5  6  |  7  0  0  |  8  0
3590             9  0 10  | 11  0  0  | 12  0
3591     -------------------------------------
3592            13  0 14  | 15 16 17  |  0  0
3593     Proc1   0 18  0  | 19 20 21  |  0  0
3594             0  0  0  | 22 23  0  | 24  0
3595     -------------------------------------
3596     Proc2  25 26 27  |  0  0 28  | 29  0
3597            30  0  0  | 31 32 33  |  0 34
3598 .ve
3599 
3600    This can be represented as a collection of submatrices as:
3601 
3602 .vb
3603       A B C
3604       D E F
3605       G H I
3606 .ve
3607 
3608    Where the submatrices A,B,C are owned by proc0, D,E,F are
3609    owned by proc1, G,H,I are owned by proc2.
3610 
3611    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3612    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3613    The 'M','N' parameters are 8,8, and have the same values on all procs.
3614 
3615    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3616    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3617    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3618    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3619    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3620    matrix, ans [DF] as another SeqAIJ matrix.
3621 
3622    When d_nz, o_nz parameters are specified, d_nz storage elements are
3623    allocated for every row of the local diagonal submatrix, and o_nz
3624    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3625    One way to choose d_nz and o_nz is to use the max nonzerors per local
3626    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3627    In this case, the values of d_nz,o_nz are:
3628 .vb
3629      proc0 : dnz = 2, o_nz = 2
3630      proc1 : dnz = 3, o_nz = 2
3631      proc2 : dnz = 1, o_nz = 4
3632 .ve
3633    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3634    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3635    for proc3. i.e we are using 12+15+10=37 storage locations to store
3636    34 values.
3637 
3638    When d_nnz, o_nnz parameters are specified, the storage is specified
3639    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3640    In the above case the values for d_nnz,o_nnz are:
3641 .vb
3642      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3643      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3644      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3645 .ve
3646    Here the space allocated is sum of all the above values i.e 34, and
3647    hence pre-allocation is perfect.
3648 
3649    Level: intermediate
3650 
3651 .keywords: matrix, aij, compressed row, sparse, parallel
3652 
3653 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3654           MPIAIJ, MatCreateMPIAIJWithArrays()
3655 @*/
3656 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
3657 {
3658   PetscErrorCode ierr;
3659   PetscMPIInt    size;
3660 
3661   PetscFunctionBegin;
3662   ierr = MatCreate(comm,A);CHKERRQ(ierr);
3663   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
3664   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3665   if (size > 1) {
3666     ierr = MatSetType(*A,MATMPIAIJ);CHKERRQ(ierr);
3667     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
3668   } else {
3669     ierr = MatSetType(*A,MATSEQAIJ);CHKERRQ(ierr);
3670     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
3671   }
3672   PetscFunctionReturn(0);
3673 }
3674 
3675 #undef __FUNCT__
3676 #define __FUNCT__ "MatMPIAIJGetSeqAIJ"
3677 PetscErrorCode PETSCMAT_DLLEXPORT MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,PetscInt *colmap[])
3678 {
3679   Mat_MPIAIJ *a = (Mat_MPIAIJ *)A->data;
3680 
3681   PetscFunctionBegin;
3682   *Ad     = a->A;
3683   *Ao     = a->B;
3684   *colmap = a->garray;
3685   PetscFunctionReturn(0);
3686 }
3687 
3688 #undef __FUNCT__
3689 #define __FUNCT__ "MatSetColoring_MPIAIJ"
3690 PetscErrorCode MatSetColoring_MPIAIJ(Mat A,ISColoring coloring)
3691 {
3692   PetscErrorCode ierr;
3693   PetscInt       i;
3694   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3695 
3696   PetscFunctionBegin;
3697   if (coloring->ctype == IS_COLORING_GLOBAL) {
3698     ISColoringValue *allcolors,*colors;
3699     ISColoring      ocoloring;
3700 
3701     /* set coloring for diagonal portion */
3702     ierr = MatSetColoring_SeqAIJ(a->A,coloring);CHKERRQ(ierr);
3703 
3704     /* set coloring for off-diagonal portion */
3705     ierr = ISAllGatherColors(((PetscObject)A)->comm,coloring->n,coloring->colors,PETSC_NULL,&allcolors);CHKERRQ(ierr);
3706     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3707     for (i=0; i<a->B->cmap->n; i++) {
3708       colors[i] = allcolors[a->garray[i]];
3709     }
3710     ierr = PetscFree(allcolors);CHKERRQ(ierr);
3711     ierr = ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3712     ierr = MatSetColoring_SeqAIJ(a->B,ocoloring);CHKERRQ(ierr);
3713     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3714   } else if (coloring->ctype == IS_COLORING_GHOSTED) {
3715     ISColoringValue *colors;
3716     PetscInt        *larray;
3717     ISColoring      ocoloring;
3718 
3719     /* set coloring for diagonal portion */
3720     ierr = PetscMalloc((a->A->cmap->n+1)*sizeof(PetscInt),&larray);CHKERRQ(ierr);
3721     for (i=0; i<a->A->cmap->n; i++) {
3722       larray[i] = i + A->cmap->rstart;
3723     }
3724     ierr = ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->A->cmap->n,larray,PETSC_NULL,larray);CHKERRQ(ierr);
3725     ierr = PetscMalloc((a->A->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3726     for (i=0; i<a->A->cmap->n; i++) {
3727       colors[i] = coloring->colors[larray[i]];
3728     }
3729     ierr = PetscFree(larray);CHKERRQ(ierr);
3730     ierr = ISColoringCreate(PETSC_COMM_SELF,coloring->n,a->A->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3731     ierr = MatSetColoring_SeqAIJ(a->A,ocoloring);CHKERRQ(ierr);
3732     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3733 
3734     /* set coloring for off-diagonal portion */
3735     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(PetscInt),&larray);CHKERRQ(ierr);
3736     ierr = ISGlobalToLocalMappingApply(A->mapping,IS_GTOLM_MASK,a->B->cmap->n,a->garray,PETSC_NULL,larray);CHKERRQ(ierr);
3737     ierr = PetscMalloc((a->B->cmap->n+1)*sizeof(ISColoringValue),&colors);CHKERRQ(ierr);
3738     for (i=0; i<a->B->cmap->n; i++) {
3739       colors[i] = coloring->colors[larray[i]];
3740     }
3741     ierr = PetscFree(larray);CHKERRQ(ierr);
3742     ierr = ISColoringCreate(MPI_COMM_SELF,coloring->n,a->B->cmap->n,colors,&ocoloring);CHKERRQ(ierr);
3743     ierr = MatSetColoring_SeqAIJ(a->B,ocoloring);CHKERRQ(ierr);
3744     ierr = ISColoringDestroy(ocoloring);CHKERRQ(ierr);
3745   } else {
3746     SETERRQ1(PETSC_ERR_SUP,"No support ISColoringType %d",(int)coloring->ctype);
3747   }
3748 
3749   PetscFunctionReturn(0);
3750 }
3751 
3752 #if defined(PETSC_HAVE_ADIC)
3753 #undef __FUNCT__
3754 #define __FUNCT__ "MatSetValuesAdic_MPIAIJ"
3755 PetscErrorCode MatSetValuesAdic_MPIAIJ(Mat A,void *advalues)
3756 {
3757   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3758   PetscErrorCode ierr;
3759 
3760   PetscFunctionBegin;
3761   ierr = MatSetValuesAdic_SeqAIJ(a->A,advalues);CHKERRQ(ierr);
3762   ierr = MatSetValuesAdic_SeqAIJ(a->B,advalues);CHKERRQ(ierr);
3763   PetscFunctionReturn(0);
3764 }
3765 #endif
3766 
3767 #undef __FUNCT__
3768 #define __FUNCT__ "MatSetValuesAdifor_MPIAIJ"
3769 PetscErrorCode MatSetValuesAdifor_MPIAIJ(Mat A,PetscInt nl,void *advalues)
3770 {
3771   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
3772   PetscErrorCode ierr;
3773 
3774   PetscFunctionBegin;
3775   ierr = MatSetValuesAdifor_SeqAIJ(a->A,nl,advalues);CHKERRQ(ierr);
3776   ierr = MatSetValuesAdifor_SeqAIJ(a->B,nl,advalues);CHKERRQ(ierr);
3777   PetscFunctionReturn(0);
3778 }
3779 
3780 #undef __FUNCT__
3781 #define __FUNCT__ "MatMerge"
3782 /*@
3783       MatMerge - Creates a single large PETSc matrix by concatinating sequential
3784                  matrices from each processor
3785 
3786     Collective on MPI_Comm
3787 
3788    Input Parameters:
3789 +    comm - the communicators the parallel matrix will live on
3790 .    inmat - the input sequential matrices
3791 .    n - number of local columns (or PETSC_DECIDE)
3792 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3793 
3794    Output Parameter:
3795 .    outmat - the parallel matrix generated
3796 
3797     Level: advanced
3798 
3799    Notes: The number of columns of the matrix in EACH processor MUST be the same.
3800 
3801 @*/
3802 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
3803 {
3804   PetscErrorCode ierr;
3805   PetscInt       m,N,i,rstart,nnz,Ii,*dnz,*onz;
3806   PetscInt       *indx;
3807   PetscScalar    *values;
3808 
3809   PetscFunctionBegin;
3810   ierr = MatGetSize(inmat,&m,&N);CHKERRQ(ierr);
3811   if (scall == MAT_INITIAL_MATRIX){
3812     /* count nonzeros in each row, for diagonal and off diagonal portion of matrix */
3813     if (n == PETSC_DECIDE){
3814       ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
3815     }
3816     ierr = MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3817     rstart -= m;
3818 
3819     ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
3820     for (i=0;i<m;i++) {
3821       ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);CHKERRQ(ierr);
3822       ierr = MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);CHKERRQ(ierr);
3823       ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,PETSC_NULL);CHKERRQ(ierr);
3824     }
3825     /* This routine will ONLY return MPIAIJ type matrix */
3826     ierr = MatCreate(comm,outmat);CHKERRQ(ierr);
3827     ierr = MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
3828     ierr = MatSetType(*outmat,MATMPIAIJ);CHKERRQ(ierr);
3829     ierr = MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);CHKERRQ(ierr);
3830     ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
3831 
3832   } else if (scall == MAT_REUSE_MATRIX){
3833     ierr = MatGetOwnershipRange(*outmat,&rstart,PETSC_NULL);CHKERRQ(ierr);
3834   } else {
3835     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
3836   }
3837 
3838   for (i=0;i<m;i++) {
3839     ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
3840     Ii    = i + rstart;
3841     ierr = MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
3842     ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
3843   }
3844   ierr = MatDestroy(inmat);CHKERRQ(ierr);
3845   ierr = MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3846   ierr = MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3847 
3848   PetscFunctionReturn(0);
3849 }
3850 
3851 #undef __FUNCT__
3852 #define __FUNCT__ "MatFileSplit"
3853 PetscErrorCode MatFileSplit(Mat A,char *outfile)
3854 {
3855   PetscErrorCode    ierr;
3856   PetscMPIInt       rank;
3857   PetscInt          m,N,i,rstart,nnz;
3858   size_t            len;
3859   const PetscInt    *indx;
3860   PetscViewer       out;
3861   char              *name;
3862   Mat               B;
3863   const PetscScalar *values;
3864 
3865   PetscFunctionBegin;
3866   ierr = MatGetLocalSize(A,&m,0);CHKERRQ(ierr);
3867   ierr = MatGetSize(A,0,&N);CHKERRQ(ierr);
3868   /* Should this be the type of the diagonal block of A? */
3869   ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr);
3870   ierr = MatSetSizes(B,m,N,m,N);CHKERRQ(ierr);
3871   ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr);
3872   ierr = MatSeqAIJSetPreallocation(B,0,PETSC_NULL);CHKERRQ(ierr);
3873   ierr = MatGetOwnershipRange(A,&rstart,0);CHKERRQ(ierr);
3874   for (i=0;i<m;i++) {
3875     ierr = MatGetRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
3876     ierr = MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
3877     ierr = MatRestoreRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
3878   }
3879   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3880   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3881 
3882   ierr = MPI_Comm_rank(((PetscObject)A)->comm,&rank);CHKERRQ(ierr);
3883   ierr = PetscStrlen(outfile,&len);CHKERRQ(ierr);
3884   ierr = PetscMalloc((len+5)*sizeof(char),&name);CHKERRQ(ierr);
3885   sprintf(name,"%s.%d",outfile,rank);
3886   ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);CHKERRQ(ierr);
3887   ierr = PetscFree(name);
3888   ierr = MatView(B,out);CHKERRQ(ierr);
3889   ierr = PetscViewerDestroy(out);CHKERRQ(ierr);
3890   ierr = MatDestroy(B);CHKERRQ(ierr);
3891   PetscFunctionReturn(0);
3892 }
3893 
3894 EXTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
3895 #undef __FUNCT__
3896 #define __FUNCT__ "MatDestroy_MPIAIJ_SeqsToMPI"
3897 PetscErrorCode PETSCMAT_DLLEXPORT MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
3898 {
3899   PetscErrorCode       ierr;
3900   Mat_Merge_SeqsToMPI  *merge;
3901   PetscContainer       container;
3902 
3903   PetscFunctionBegin;
3904   ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr);
3905   if (container) {
3906     ierr = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr);
3907     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
3908     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
3909     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
3910     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
3911     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
3912     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
3913     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
3914     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
3915     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
3916     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
3917     ierr = PetscFree(merge->rowmap.range);CHKERRQ(ierr);
3918 
3919     ierr = PetscContainerDestroy(container);CHKERRQ(ierr);
3920     ierr = PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);CHKERRQ(ierr);
3921   }
3922   ierr = PetscFree(merge);CHKERRQ(ierr);
3923 
3924   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
3925   PetscFunctionReturn(0);
3926 }
3927 
3928 #include "../src/mat/utils/freespace.h"
3929 #include "petscbt.h"
3930 
3931 #undef __FUNCT__
3932 #define __FUNCT__ "MatMerge_SeqsToMPINumeric"
3933 /*@C
3934       MatMerge_SeqsToMPI - Creates a MPIAIJ matrix by adding sequential
3935                  matrices from each processor
3936 
3937     Collective on MPI_Comm
3938 
3939    Input Parameters:
3940 +    comm - the communicators the parallel matrix will live on
3941 .    seqmat - the input sequential matrices
3942 .    m - number of local rows (or PETSC_DECIDE)
3943 .    n - number of local columns (or PETSC_DECIDE)
3944 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
3945 
3946    Output Parameter:
3947 .    mpimat - the parallel matrix generated
3948 
3949     Level: advanced
3950 
3951    Notes:
3952      The dimensions of the sequential matrix in each processor MUST be the same.
3953      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
3954      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
3955 @*/
3956 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPINumeric(Mat seqmat,Mat mpimat)
3957 {
3958   PetscErrorCode       ierr;
3959   MPI_Comm             comm=((PetscObject)mpimat)->comm;
3960   Mat_SeqAIJ           *a=(Mat_SeqAIJ*)seqmat->data;
3961   PetscMPIInt          size,rank,taga,*len_s;
3962   PetscInt             N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj=a->j;
3963   PetscInt             proc,m;
3964   PetscInt             **buf_ri,**buf_rj;
3965   PetscInt             k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
3966   PetscInt             nrows,**buf_ri_k,**nextrow,**nextai;
3967   MPI_Request          *s_waits,*r_waits;
3968   MPI_Status           *status;
3969   MatScalar            *aa=a->a;
3970   MatScalar            **abuf_r,*ba_i;
3971   Mat_Merge_SeqsToMPI  *merge;
3972   PetscContainer       container;
3973 
3974   PetscFunctionBegin;
3975   ierr = PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
3976 
3977   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3978   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3979 
3980   ierr = PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject *)&container);CHKERRQ(ierr);
3981   if (container) {
3982     ierr  = PetscContainerGetPointer(container,(void **)&merge);CHKERRQ(ierr);
3983   }
3984   bi     = merge->bi;
3985   bj     = merge->bj;
3986   buf_ri = merge->buf_ri;
3987   buf_rj = merge->buf_rj;
3988 
3989   ierr   = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
3990   owners = merge->rowmap.range;
3991   len_s  = merge->len_s;
3992 
3993   /* send and recv matrix values */
3994   /*-----------------------------*/
3995   ierr = PetscObjectGetNewTag((PetscObject)mpimat,&taga);CHKERRQ(ierr);
3996   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
3997 
3998   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);CHKERRQ(ierr);
3999   for (proc=0,k=0; proc<size; proc++){
4000     if (!len_s[proc]) continue;
4001     i = owners[proc];
4002     ierr = MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
4003     k++;
4004   }
4005 
4006   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
4007   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
4008   ierr = PetscFree(status);CHKERRQ(ierr);
4009 
4010   ierr = PetscFree(s_waits);CHKERRQ(ierr);
4011   ierr = PetscFree(r_waits);CHKERRQ(ierr);
4012 
4013   /* insert mat values of mpimat */
4014   /*----------------------------*/
4015   ierr = PetscMalloc(N*sizeof(PetscScalar),&ba_i);CHKERRQ(ierr);
4016   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
4017   nextrow = buf_ri_k + merge->nrecv;
4018   nextai  = nextrow + merge->nrecv;
4019 
4020   for (k=0; k<merge->nrecv; k++){
4021     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4022     nrows = *(buf_ri_k[k]);
4023     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4024     nextai[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
4025   }
4026 
4027   /* set values of ba */
4028   m = merge->rowmap.n;
4029   for (i=0; i<m; i++) {
4030     arow = owners[rank] + i;
4031     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4032     bnzi = bi[i+1] - bi[i];
4033     ierr = PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));CHKERRQ(ierr);
4034 
4035     /* add local non-zero vals of this proc's seqmat into ba */
4036     anzi = ai[arow+1] - ai[arow];
4037     aj   = a->j + ai[arow];
4038     aa   = a->a + ai[arow];
4039     nextaj = 0;
4040     for (j=0; nextaj<anzi; j++){
4041       if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4042         ba_i[j] += aa[nextaj++];
4043       }
4044     }
4045 
4046     /* add received vals into ba */
4047     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4048       /* i-th row */
4049       if (i == *nextrow[k]) {
4050         anzi = *(nextai[k]+1) - *nextai[k];
4051         aj   = buf_rj[k] + *(nextai[k]);
4052         aa   = abuf_r[k] + *(nextai[k]);
4053         nextaj = 0;
4054         for (j=0; nextaj<anzi; j++){
4055           if (*(bj_i + j) == aj[nextaj]){ /* bcol == acol */
4056             ba_i[j] += aa[nextaj++];
4057           }
4058         }
4059         nextrow[k]++; nextai[k]++;
4060       }
4061     }
4062     ierr = MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
4063   }
4064   ierr = MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4065   ierr = MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4066 
4067   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
4068   ierr = PetscFree(ba_i);CHKERRQ(ierr);
4069   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
4070   ierr = PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
4071   PetscFunctionReturn(0);
4072 }
4073 
4074 #undef __FUNCT__
4075 #define __FUNCT__ "MatMerge_SeqsToMPISymbolic"
4076 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPISymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4077 {
4078   PetscErrorCode       ierr;
4079   Mat                  B_mpi;
4080   Mat_SeqAIJ           *a=(Mat_SeqAIJ*)seqmat->data;
4081   PetscMPIInt          size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4082   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
4083   PetscInt             M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4084   PetscInt             len,proc,*dnz,*onz;
4085   PetscInt             k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4086   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4087   MPI_Request          *si_waits,*sj_waits,*ri_waits,*rj_waits;
4088   MPI_Status           *status;
4089   PetscFreeSpaceList   free_space=PETSC_NULL,current_space=PETSC_NULL;
4090   PetscBT              lnkbt;
4091   Mat_Merge_SeqsToMPI  *merge;
4092   PetscContainer       container;
4093 
4094   PetscFunctionBegin;
4095   ierr = PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4096 
4097   /* make sure it is a PETSc comm */
4098   ierr = PetscCommDuplicate(comm,&comm,PETSC_NULL);CHKERRQ(ierr);
4099   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4100   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4101 
4102   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
4103   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
4104 
4105   /* determine row ownership */
4106   /*---------------------------------------------------------*/
4107   ierr = PetscMapInitialize(comm,&merge->rowmap);CHKERRQ(ierr);
4108   merge->rowmap.n = m;
4109   merge->rowmap.N = M;
4110   merge->rowmap.bs = 1;
4111   ierr = PetscMapSetUp(&merge->rowmap);CHKERRQ(ierr);
4112   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
4113   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
4114 
4115   m      = merge->rowmap.n;
4116   M      = merge->rowmap.N;
4117   owners = merge->rowmap.range;
4118 
4119   /* determine the number of messages to send, their lengths */
4120   /*---------------------------------------------------------*/
4121   len_s  = merge->len_s;
4122 
4123   len = 0;  /* length of buf_si[] */
4124   merge->nsend = 0;
4125   for (proc=0; proc<size; proc++){
4126     len_si[proc] = 0;
4127     if (proc == rank){
4128       len_s[proc] = 0;
4129     } else {
4130       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4131       len_s[proc] = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4132     }
4133     if (len_s[proc]) {
4134       merge->nsend++;
4135       nrows = 0;
4136       for (i=owners[proc]; i<owners[proc+1]; i++){
4137         if (ai[i+1] > ai[i]) nrows++;
4138       }
4139       len_si[proc] = 2*(nrows+1);
4140       len += len_si[proc];
4141     }
4142   }
4143 
4144   /* determine the number and length of messages to receive for ij-structure */
4145   /*-------------------------------------------------------------------------*/
4146   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
4147   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
4148 
4149   /* post the Irecv of j-structure */
4150   /*-------------------------------*/
4151   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
4152   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr);
4153 
4154   /* post the Isend of j-structure */
4155   /*--------------------------------*/
4156   ierr = PetscMalloc((2*merge->nsend+1)*sizeof(MPI_Request),&si_waits);CHKERRQ(ierr);
4157   sj_waits = si_waits + merge->nsend;
4158 
4159   for (proc=0, k=0; proc<size; proc++){
4160     if (!len_s[proc]) continue;
4161     i = owners[proc];
4162     ierr = MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr);
4163     k++;
4164   }
4165 
4166   /* receives and sends of j-structure are complete */
4167   /*------------------------------------------------*/
4168   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);}
4169   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);}
4170 
4171   /* send and recv i-structure */
4172   /*---------------------------*/
4173   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
4174   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr);
4175 
4176   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
4177   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4178   for (proc=0,k=0; proc<size; proc++){
4179     if (!len_s[proc]) continue;
4180     /* form outgoing message for i-structure:
4181          buf_si[0]:                 nrows to be sent
4182                [1:nrows]:           row index (global)
4183                [nrows+1:2*nrows+1]: i-structure index
4184     */
4185     /*-------------------------------------------*/
4186     nrows = len_si[proc]/2 - 1;
4187     buf_si_i    = buf_si + nrows+1;
4188     buf_si[0]   = nrows;
4189     buf_si_i[0] = 0;
4190     nrows = 0;
4191     for (i=owners[proc]; i<owners[proc+1]; i++){
4192       anzi = ai[i+1] - ai[i];
4193       if (anzi) {
4194         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4195         buf_si[nrows+1] = i-owners[proc]; /* local row index */
4196         nrows++;
4197       }
4198     }
4199     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr);
4200     k++;
4201     buf_si += len_si[proc];
4202   }
4203 
4204   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);}
4205   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);}
4206 
4207   ierr = PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
4208   for (i=0; i<merge->nrecv; i++){
4209     ierr = PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
4210   }
4211 
4212   ierr = PetscFree(len_si);CHKERRQ(ierr);
4213   ierr = PetscFree(len_ri);CHKERRQ(ierr);
4214   ierr = PetscFree(rj_waits);CHKERRQ(ierr);
4215   ierr = PetscFree(si_waits);CHKERRQ(ierr);
4216   ierr = PetscFree(ri_waits);CHKERRQ(ierr);
4217   ierr = PetscFree(buf_s);CHKERRQ(ierr);
4218   ierr = PetscFree(status);CHKERRQ(ierr);
4219 
4220   /* compute a local seq matrix in each processor */
4221   /*----------------------------------------------*/
4222   /* allocate bi array and free space for accumulating nonzero column info */
4223   ierr = PetscMalloc((m+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
4224   bi[0] = 0;
4225 
4226   /* create and initialize a linked list */
4227   nlnk = N+1;
4228   ierr = PetscLLCreate(N,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4229 
4230   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4231   len = 0;
4232   len  = ai[owners[rank+1]] - ai[owners[rank]];
4233   ierr = PetscFreeSpaceGet((PetscInt)(2*len+1),&free_space);CHKERRQ(ierr);
4234   current_space = free_space;
4235 
4236   /* determine symbolic info for each local row */
4237   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
4238   nextrow = buf_ri_k + merge->nrecv;
4239   nextai  = nextrow + merge->nrecv;
4240   for (k=0; k<merge->nrecv; k++){
4241     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4242     nrows = *buf_ri_k[k];
4243     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4244     nextai[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
4245   }
4246 
4247   ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
4248   len = 0;
4249   for (i=0;i<m;i++) {
4250     bnzi   = 0;
4251     /* add local non-zero cols of this proc's seqmat into lnk */
4252     arow   = owners[rank] + i;
4253     anzi   = ai[arow+1] - ai[arow];
4254     aj     = a->j + ai[arow];
4255     ierr = PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4256     bnzi += nlnk;
4257     /* add received col data into lnk */
4258     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
4259       if (i == *nextrow[k]) { /* i-th row */
4260         anzi = *(nextai[k]+1) - *nextai[k];
4261         aj   = buf_rj[k] + *nextai[k];
4262         ierr = PetscLLAdd(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4263         bnzi += nlnk;
4264         nextrow[k]++; nextai[k]++;
4265       }
4266     }
4267     if (len < bnzi) len = bnzi;  /* =max(bnzi) */
4268 
4269     /* if free space is not available, make more free space */
4270     if (current_space->local_remaining<bnzi) {
4271       ierr = PetscFreeSpaceGet(bnzi+current_space->total_array_size,&current_space);CHKERRQ(ierr);
4272       nspacedouble++;
4273     }
4274     /* copy data into free space, then initialize lnk */
4275     ierr = PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
4276     ierr = MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);CHKERRQ(ierr);
4277 
4278     current_space->array           += bnzi;
4279     current_space->local_used      += bnzi;
4280     current_space->local_remaining -= bnzi;
4281 
4282     bi[i+1] = bi[i] + bnzi;
4283   }
4284 
4285   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
4286 
4287   ierr = PetscMalloc((bi[m]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
4288   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
4289   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
4290 
4291   /* create symbolic parallel matrix B_mpi */
4292   /*---------------------------------------*/
4293   ierr = MatCreate(comm,&B_mpi);CHKERRQ(ierr);
4294   if (n==PETSC_DECIDE) {
4295     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);CHKERRQ(ierr);
4296   } else {
4297     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4298   }
4299   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
4300   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
4301   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
4302 
4303   /* B_mpi is not ready for use - assembly will be done by MatMerge_SeqsToMPINumeric() */
4304   B_mpi->assembled     = PETSC_FALSE;
4305   B_mpi->ops->destroy  = MatDestroy_MPIAIJ_SeqsToMPI;
4306   merge->bi            = bi;
4307   merge->bj            = bj;
4308   merge->buf_ri        = buf_ri;
4309   merge->buf_rj        = buf_rj;
4310   merge->coi           = PETSC_NULL;
4311   merge->coj           = PETSC_NULL;
4312   merge->owners_co     = PETSC_NULL;
4313 
4314   /* attach the supporting struct to B_mpi for reuse */
4315   ierr = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
4316   ierr = PetscContainerSetPointer(container,merge);CHKERRQ(ierr);
4317   ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
4318   *mpimat = B_mpi;
4319 
4320   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
4321   ierr = PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4322   PetscFunctionReturn(0);
4323 }
4324 
4325 #undef __FUNCT__
4326 #define __FUNCT__ "MatMerge_SeqsToMPI"
4327 PetscErrorCode PETSCMAT_DLLEXPORT MatMerge_SeqsToMPI(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4328 {
4329   PetscErrorCode   ierr;
4330 
4331   PetscFunctionBegin;
4332   ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4333   if (scall == MAT_INITIAL_MATRIX){
4334     ierr = MatMerge_SeqsToMPISymbolic(comm,seqmat,m,n,mpimat);CHKERRQ(ierr);
4335   }
4336   ierr = MatMerge_SeqsToMPINumeric(seqmat,*mpimat);CHKERRQ(ierr);
4337   ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4338   PetscFunctionReturn(0);
4339 }
4340 
4341 #undef __FUNCT__
4342 #define __FUNCT__ "MatGetLocalMat"
4343 /*@
4344      MatGetLocalMat - Creates a SeqAIJ matrix by taking all its local rows
4345 
4346     Not Collective
4347 
4348    Input Parameters:
4349 +    A - the matrix
4350 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4351 
4352    Output Parameter:
4353 .    A_loc - the local sequential matrix generated
4354 
4355     Level: developer
4356 
4357 @*/
4358 PetscErrorCode PETSCMAT_DLLEXPORT MatGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4359 {
4360   PetscErrorCode  ierr;
4361   Mat_MPIAIJ      *mpimat=(Mat_MPIAIJ*)A->data;
4362   Mat_SeqAIJ      *mat,*a=(Mat_SeqAIJ*)(mpimat->A)->data,*b=(Mat_SeqAIJ*)(mpimat->B)->data;
4363   PetscInt        *ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j,*cmap=mpimat->garray;
4364   MatScalar       *aa=a->a,*ba=b->a,*cam;
4365   PetscScalar     *ca;
4366   PetscInt        am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4367   PetscInt        *ci,*cj,col,ncols_d,ncols_o,jo;
4368 
4369   PetscFunctionBegin;
4370   ierr = PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4371   if (scall == MAT_INITIAL_MATRIX){
4372     ierr = PetscMalloc((1+am)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
4373     ci[0] = 0;
4374     for (i=0; i<am; i++){
4375       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4376     }
4377     ierr = PetscMalloc((1+ci[am])*sizeof(PetscInt),&cj);CHKERRQ(ierr);
4378     ierr = PetscMalloc((1+ci[am])*sizeof(PetscScalar),&ca);CHKERRQ(ierr);
4379     k = 0;
4380     for (i=0; i<am; i++) {
4381       ncols_o = bi[i+1] - bi[i];
4382       ncols_d = ai[i+1] - ai[i];
4383       /* off-diagonal portion of A */
4384       for (jo=0; jo<ncols_o; jo++) {
4385         col = cmap[*bj];
4386         if (col >= cstart) break;
4387         cj[k]   = col; bj++;
4388         ca[k++] = *ba++;
4389       }
4390       /* diagonal portion of A */
4391       for (j=0; j<ncols_d; j++) {
4392         cj[k]   = cstart + *aj++;
4393         ca[k++] = *aa++;
4394       }
4395       /* off-diagonal portion of A */
4396       for (j=jo; j<ncols_o; j++) {
4397         cj[k]   = cmap[*bj++];
4398         ca[k++] = *ba++;
4399       }
4400     }
4401     /* put together the new matrix */
4402     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);CHKERRQ(ierr);
4403     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4404     /* Since these are PETSc arrays, change flags to free them as necessary. */
4405     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
4406     mat->free_a  = PETSC_TRUE;
4407     mat->free_ij = PETSC_TRUE;
4408     mat->nonew   = 0;
4409   } else if (scall == MAT_REUSE_MATRIX){
4410     mat=(Mat_SeqAIJ*)(*A_loc)->data;
4411     ci = mat->i; cj = mat->j; cam = mat->a;
4412     for (i=0; i<am; i++) {
4413       /* off-diagonal portion of A */
4414       ncols_o = bi[i+1] - bi[i];
4415       for (jo=0; jo<ncols_o; jo++) {
4416         col = cmap[*bj];
4417         if (col >= cstart) break;
4418         *cam++ = *ba++; bj++;
4419       }
4420       /* diagonal portion of A */
4421       ncols_d = ai[i+1] - ai[i];
4422       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4423       /* off-diagonal portion of A */
4424       for (j=jo; j<ncols_o; j++) {
4425         *cam++ = *ba++; bj++;
4426       }
4427     }
4428   } else {
4429     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4430   }
4431 
4432   ierr = PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4433   PetscFunctionReturn(0);
4434 }
4435 
4436 #undef __FUNCT__
4437 #define __FUNCT__ "MatGetLocalMatCondensed"
4438 /*@C
4439      MatGetLocalMatCondensed - Creates a SeqAIJ matrix by taking all its local rows and NON-ZERO columns
4440 
4441     Not Collective
4442 
4443    Input Parameters:
4444 +    A - the matrix
4445 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4446 -    row, col - index sets of rows and columns to extract (or PETSC_NULL)
4447 
4448    Output Parameter:
4449 .    A_loc - the local sequential matrix generated
4450 
4451     Level: developer
4452 
4453 @*/
4454 PetscErrorCode PETSCMAT_DLLEXPORT MatGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4455 {
4456   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
4457   PetscErrorCode    ierr;
4458   PetscInt          i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4459   IS                isrowa,iscola;
4460   Mat               *aloc;
4461 
4462   PetscFunctionBegin;
4463   ierr = PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4464   if (!row){
4465     start = A->rmap->rstart; end = A->rmap->rend;
4466     ierr = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);CHKERRQ(ierr);
4467   } else {
4468     isrowa = *row;
4469   }
4470   if (!col){
4471     start = A->cmap->rstart;
4472     cmap  = a->garray;
4473     nzA   = a->A->cmap->n;
4474     nzB   = a->B->cmap->n;
4475     ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
4476     ncols = 0;
4477     for (i=0; i<nzB; i++) {
4478       if (cmap[i] < start) idx[ncols++] = cmap[i];
4479       else break;
4480     }
4481     imark = i;
4482     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4483     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4484     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&iscola);CHKERRQ(ierr);
4485     ierr = PetscFree(idx);CHKERRQ(ierr);
4486   } else {
4487     iscola = *col;
4488   }
4489   if (scall != MAT_INITIAL_MATRIX){
4490     ierr = PetscMalloc(sizeof(Mat),&aloc);CHKERRQ(ierr);
4491     aloc[0] = *A_loc;
4492   }
4493   ierr = MatGetSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);CHKERRQ(ierr);
4494   *A_loc = aloc[0];
4495   ierr = PetscFree(aloc);CHKERRQ(ierr);
4496   if (!row){
4497     ierr = ISDestroy(isrowa);CHKERRQ(ierr);
4498   }
4499   if (!col){
4500     ierr = ISDestroy(iscola);CHKERRQ(ierr);
4501   }
4502   ierr = PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4503   PetscFunctionReturn(0);
4504 }
4505 
4506 #undef __FUNCT__
4507 #define __FUNCT__ "MatGetBrowsOfAcols"
4508 /*@C
4509     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4510 
4511     Collective on Mat
4512 
4513    Input Parameters:
4514 +    A,B - the matrices in mpiaij format
4515 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4516 -    rowb, colb - index sets of rows and columns of B to extract (or PETSC_NULL)
4517 
4518    Output Parameter:
4519 +    rowb, colb - index sets of rows and columns of B to extract
4520 .    brstart - row index of B_seq from which next B->rmap->n rows are taken from B's local rows
4521 -    B_seq - the sequential matrix generated
4522 
4523     Level: developer
4524 
4525 @*/
4526 PetscErrorCode PETSCMAT_DLLEXPORT MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,PetscInt *brstart,Mat *B_seq)
4527 {
4528   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
4529   PetscErrorCode    ierr;
4530   PetscInt          *idx,i,start,ncols,nzA,nzB,*cmap,imark;
4531   IS                isrowb,iscolb;
4532   Mat               *bseq;
4533 
4534   PetscFunctionBegin;
4535   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4536     SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4537   }
4538   ierr = PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4539 
4540   if (scall == MAT_INITIAL_MATRIX){
4541     start = A->cmap->rstart;
4542     cmap  = a->garray;
4543     nzA   = a->A->cmap->n;
4544     nzB   = a->B->cmap->n;
4545     ierr  = PetscMalloc((nzA+nzB)*sizeof(PetscInt), &idx);CHKERRQ(ierr);
4546     ncols = 0;
4547     for (i=0; i<nzB; i++) {  /* row < local row index */
4548       if (cmap[i] < start) idx[ncols++] = cmap[i];
4549       else break;
4550     }
4551     imark = i;
4552     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
4553     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
4554     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,&isrowb);CHKERRQ(ierr);
4555     ierr = PetscFree(idx);CHKERRQ(ierr);
4556     *brstart = imark;
4557     ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);CHKERRQ(ierr);
4558   } else {
4559     if (!rowb || !colb) SETERRQ(PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
4560     isrowb = *rowb; iscolb = *colb;
4561     ierr = PetscMalloc(sizeof(Mat),&bseq);CHKERRQ(ierr);
4562     bseq[0] = *B_seq;
4563   }
4564   ierr = MatGetSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);CHKERRQ(ierr);
4565   *B_seq = bseq[0];
4566   ierr = PetscFree(bseq);CHKERRQ(ierr);
4567   if (!rowb){
4568     ierr = ISDestroy(isrowb);CHKERRQ(ierr);
4569   } else {
4570     *rowb = isrowb;
4571   }
4572   if (!colb){
4573     ierr = ISDestroy(iscolb);CHKERRQ(ierr);
4574   } else {
4575     *colb = iscolb;
4576   }
4577   ierr = PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4578   PetscFunctionReturn(0);
4579 }
4580 
4581 #undef __FUNCT__
4582 #define __FUNCT__ "MatGetBrowsOfAoCols"
4583 /*@C
4584     MatGetBrowsOfAoCols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
4585     of the OFF-DIAGONAL portion of local A
4586 
4587     Collective on Mat
4588 
4589    Input Parameters:
4590 +    A,B - the matrices in mpiaij format
4591 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4592 .    startsj - starting point in B's sending and receiving j-arrays, saved for MAT_REUSE (or PETSC_NULL)
4593 -    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or PETSC_NULL)
4594 
4595    Output Parameter:
4596 +    B_oth - the sequential matrix generated
4597 
4598     Level: developer
4599 
4600 @*/
4601 PetscErrorCode PETSCMAT_DLLEXPORT MatGetBrowsOfAoCols(Mat A,Mat B,MatReuse scall,PetscInt **startsj,MatScalar **bufa_ptr,Mat *B_oth)
4602 {
4603   VecScatter_MPI_General *gen_to,*gen_from;
4604   PetscErrorCode         ierr;
4605   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
4606   Mat_SeqAIJ             *b_oth;
4607   VecScatter             ctx=a->Mvctx;
4608   MPI_Comm               comm=((PetscObject)ctx)->comm;
4609   PetscMPIInt            *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
4610   PetscInt               *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
4611   PetscScalar            *rvalues,*svalues;
4612   MatScalar              *b_otha,*bufa,*bufA;
4613   PetscInt               i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
4614   MPI_Request            *rwaits = PETSC_NULL,*swaits = PETSC_NULL;
4615   MPI_Status             *sstatus,rstatus;
4616   PetscMPIInt            jj;
4617   PetscInt               *cols,sbs,rbs;
4618   PetscScalar            *vals;
4619 
4620   PetscFunctionBegin;
4621   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend){
4622     SETERRQ4(PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4623   }
4624   ierr = PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
4625   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4626 
4627   gen_to   = (VecScatter_MPI_General*)ctx->todata;
4628   gen_from = (VecScatter_MPI_General*)ctx->fromdata;
4629   rvalues  = gen_from->values; /* holds the length of receiving row */
4630   svalues  = gen_to->values;   /* holds the length of sending row */
4631   nrecvs   = gen_from->n;
4632   nsends   = gen_to->n;
4633 
4634   ierr = PetscMalloc2(nrecvs,MPI_Request,&rwaits,nsends,MPI_Request,&swaits);CHKERRQ(ierr);
4635   srow     = gen_to->indices;   /* local row index to be sent */
4636   sstarts  = gen_to->starts;
4637   sprocs   = gen_to->procs;
4638   sstatus  = gen_to->sstatus;
4639   sbs      = gen_to->bs;
4640   rstarts  = gen_from->starts;
4641   rprocs   = gen_from->procs;
4642   rbs      = gen_from->bs;
4643 
4644   if (!startsj || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
4645   if (scall == MAT_INITIAL_MATRIX){
4646     /* i-array */
4647     /*---------*/
4648     /*  post receives */
4649     for (i=0; i<nrecvs; i++){
4650       rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4651       nrows = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
4652       ierr = MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4653     }
4654 
4655     /* pack the outgoing message */
4656     ierr = PetscMalloc((nsends+nrecvs+3)*sizeof(PetscInt),&sstartsj);CHKERRQ(ierr);
4657     rstartsj = sstartsj + nsends +1;
4658     sstartsj[0] = 0;  rstartsj[0] = 0;
4659     len = 0; /* total length of j or a array to be sent */
4660     k = 0;
4661     for (i=0; i<nsends; i++){
4662       rowlen = (PetscInt*)svalues + sstarts[i]*sbs;
4663       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4664       for (j=0; j<nrows; j++) {
4665         row = srow[k] + B->rmap->range[rank]; /* global row idx */
4666         for (l=0; l<sbs; l++){
4667           ierr = MatGetRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr); /* rowlength */
4668           rowlen[j*sbs+l] = ncols;
4669           len += ncols;
4670           ierr = MatRestoreRow_MPIAIJ(B,row+l,&ncols,PETSC_NULL,PETSC_NULL);CHKERRQ(ierr);
4671         }
4672         k++;
4673       }
4674       ierr = MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4675       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
4676     }
4677     /* recvs and sends of i-array are completed */
4678     i = nrecvs;
4679     while (i--) {
4680       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4681     }
4682     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4683 
4684     /* allocate buffers for sending j and a arrays */
4685     ierr = PetscMalloc((len+1)*sizeof(PetscInt),&bufj);CHKERRQ(ierr);
4686     ierr = PetscMalloc((len+1)*sizeof(PetscScalar),&bufa);CHKERRQ(ierr);
4687 
4688     /* create i-array of B_oth */
4689     ierr = PetscMalloc((aBn+2)*sizeof(PetscInt),&b_othi);CHKERRQ(ierr);
4690     b_othi[0] = 0;
4691     len = 0; /* total length of j or a array to be received */
4692     k = 0;
4693     for (i=0; i<nrecvs; i++){
4694       rowlen = (PetscInt*)rvalues + rstarts[i]*rbs;
4695       nrows = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be recieved */
4696       for (j=0; j<nrows; j++) {
4697         b_othi[k+1] = b_othi[k] + rowlen[j];
4698         len += rowlen[j]; k++;
4699       }
4700       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
4701     }
4702 
4703     /* allocate space for j and a arrrays of B_oth */
4704     ierr = PetscMalloc((b_othi[aBn]+1)*sizeof(PetscInt),&b_othj);CHKERRQ(ierr);
4705     ierr = PetscMalloc((b_othi[aBn]+1)*sizeof(MatScalar),&b_otha);CHKERRQ(ierr);
4706 
4707     /* j-array */
4708     /*---------*/
4709     /*  post receives of j-array */
4710     for (i=0; i<nrecvs; i++){
4711       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4712       ierr = MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4713     }
4714 
4715     /* pack the outgoing message j-array */
4716     k = 0;
4717     for (i=0; i<nsends; i++){
4718       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4719       bufJ = bufj+sstartsj[i];
4720       for (j=0; j<nrows; j++) {
4721         row  = srow[k++] + B->rmap->range[rank]; /* global row idx */
4722         for (ll=0; ll<sbs; ll++){
4723           ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);CHKERRQ(ierr);
4724           for (l=0; l<ncols; l++){
4725             *bufJ++ = cols[l];
4726           }
4727           ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,PETSC_NULL);CHKERRQ(ierr);
4728         }
4729       }
4730       ierr = MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4731     }
4732 
4733     /* recvs and sends of j-array are completed */
4734     i = nrecvs;
4735     while (i--) {
4736       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4737     }
4738     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4739   } else if (scall == MAT_REUSE_MATRIX){
4740     sstartsj = *startsj;
4741     rstartsj = sstartsj + nsends +1;
4742     bufa     = *bufa_ptr;
4743     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
4744     b_otha   = b_oth->a;
4745   } else {
4746     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
4747   }
4748 
4749   /* a-array */
4750   /*---------*/
4751   /*  post receives of a-array */
4752   for (i=0; i<nrecvs; i++){
4753     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
4754     ierr = MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4755   }
4756 
4757   /* pack the outgoing message a-array */
4758   k = 0;
4759   for (i=0; i<nsends; i++){
4760     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
4761     bufA = bufa+sstartsj[i];
4762     for (j=0; j<nrows; j++) {
4763       row  = srow[k++] + B->rmap->range[rank]; /* global row idx */
4764       for (ll=0; ll<sbs; ll++){
4765         ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);CHKERRQ(ierr);
4766         for (l=0; l<ncols; l++){
4767           *bufA++ = vals[l];
4768         }
4769         ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,PETSC_NULL,&vals);CHKERRQ(ierr);
4770       }
4771     }
4772     ierr = MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4773   }
4774   /* recvs and sends of a-array are completed */
4775   i = nrecvs;
4776   while (i--) {
4777     ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4778   }
4779   if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4780   ierr = PetscFree2(rwaits,swaits);CHKERRQ(ierr);
4781 
4782   if (scall == MAT_INITIAL_MATRIX){
4783     /* put together the new matrix */
4784     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);CHKERRQ(ierr);
4785 
4786     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4787     /* Since these are PETSc arrays, change flags to free them as necessary. */
4788     b_oth          = (Mat_SeqAIJ *)(*B_oth)->data;
4789     b_oth->free_a  = PETSC_TRUE;
4790     b_oth->free_ij = PETSC_TRUE;
4791     b_oth->nonew   = 0;
4792 
4793     ierr = PetscFree(bufj);CHKERRQ(ierr);
4794     if (!startsj || !bufa_ptr){
4795       ierr = PetscFree(sstartsj);CHKERRQ(ierr);
4796       ierr = PetscFree(bufa_ptr);CHKERRQ(ierr);
4797     } else {
4798       *startsj  = sstartsj;
4799       *bufa_ptr = bufa;
4800     }
4801   }
4802   ierr = PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
4803   PetscFunctionReturn(0);
4804 }
4805 
4806 #undef __FUNCT__
4807 #define __FUNCT__ "MatGetCommunicationStructs"
4808 /*@C
4809   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
4810 
4811   Not Collective
4812 
4813   Input Parameters:
4814 . A - The matrix in mpiaij format
4815 
4816   Output Parameter:
4817 + lvec - The local vector holding off-process values from the argument to a matrix-vector product
4818 . colmap - A map from global column index to local index into lvec
4819 - multScatter - A scatter from the argument of a matrix-vector product to lvec
4820 
4821   Level: developer
4822 
4823 @*/
4824 #if defined (PETSC_USE_CTABLE)
4825 PetscErrorCode PETSCMAT_DLLEXPORT MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
4826 #else
4827 PetscErrorCode PETSCMAT_DLLEXPORT MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
4828 #endif
4829 {
4830   Mat_MPIAIJ *a;
4831 
4832   PetscFunctionBegin;
4833   PetscValidHeaderSpecific(A, MAT_COOKIE, 1);
4834   PetscValidPointer(lvec, 2)
4835   PetscValidPointer(colmap, 3)
4836   PetscValidPointer(multScatter, 4)
4837   a = (Mat_MPIAIJ *) A->data;
4838   if (lvec) *lvec = a->lvec;
4839   if (colmap) *colmap = a->colmap;
4840   if (multScatter) *multScatter = a->Mvctx;
4841   PetscFunctionReturn(0);
4842 }
4843 
4844 EXTERN_C_BEGIN
4845 extern PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIAIJ_MPICRL(Mat,const MatType,MatReuse,Mat*);
4846 extern PetscErrorCode PETSCMAT_DLLEXPORT MatConvert_MPIAIJ_MPICSRPERM(Mat,const MatType,MatReuse,Mat*);
4847 EXTERN_C_END
4848 
4849 #include "../src/mat/impls/dense/mpi/mpidense.h"
4850 
4851 #undef __FUNCT__
4852 #define __FUNCT__ "MatMatMultNumeric_MPIDense_MPIAIJ"
4853 /*
4854     Computes (B'*A')' since computing B*A directly is untenable
4855 
4856                n                       p                          p
4857         (              )       (              )         (                  )
4858       m (      A       )  *  n (       B      )   =   m (         C        )
4859         (              )       (              )         (                  )
4860 
4861 */
4862 PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
4863 {
4864   PetscErrorCode     ierr;
4865   Mat                At,Bt,Ct;
4866 
4867   PetscFunctionBegin;
4868   ierr = MatTranspose(A,MAT_INITIAL_MATRIX,&At);CHKERRQ(ierr);
4869   ierr = MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);CHKERRQ(ierr);
4870   ierr = MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);CHKERRQ(ierr);
4871   ierr = MatDestroy(At);CHKERRQ(ierr);
4872   ierr = MatDestroy(Bt);CHKERRQ(ierr);
4873   ierr = MatTranspose(Ct,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
4874   ierr = MatDestroy(Ct);CHKERRQ(ierr);
4875   PetscFunctionReturn(0);
4876 }
4877 
4878 #undef __FUNCT__
4879 #define __FUNCT__ "MatMatMultSymbolic_MPIDense_MPIAIJ"
4880 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
4881 {
4882   PetscErrorCode ierr;
4883   PetscInt       m=A->rmap->n,n=B->cmap->n;
4884   Mat            Cmat;
4885 
4886   PetscFunctionBegin;
4887   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
4888   ierr = MatCreate(((PetscObject)A)->comm,&Cmat);CHKERRQ(ierr);
4889   ierr = MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4890   ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr);
4891   ierr = MatMPIDenseSetPreallocation(Cmat,PETSC_NULL);CHKERRQ(ierr);
4892   ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4893   ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4894   *C   = Cmat;
4895   PetscFunctionReturn(0);
4896 }
4897 
4898 /* ----------------------------------------------------------------*/
4899 #undef __FUNCT__
4900 #define __FUNCT__ "MatMatMult_MPIDense_MPIAIJ"
4901 PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
4902 {
4903   PetscErrorCode ierr;
4904 
4905   PetscFunctionBegin;
4906   if (scall == MAT_INITIAL_MATRIX){
4907     ierr = MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
4908   }
4909   ierr = MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);CHKERRQ(ierr);
4910   PetscFunctionReturn(0);
4911 }
4912 
4913 EXTERN_C_BEGIN
4914 #if defined(PETSC_HAVE_MUMPS)
4915 extern PetscErrorCode MatGetFactor_mpiaij_mumps(Mat,MatFactorType,Mat*);
4916 #endif
4917 #if defined(PETSC_HAVE_PASTIX)
4918 extern PetscErrorCode MatGetFactor_mpiaij_pastix(Mat,MatFactorType,Mat*);
4919 #endif
4920 #if defined(PETSC_HAVE_SUPERLU_DIST)
4921 extern PetscErrorCode MatGetFactor_mpiaij_superlu_dist(Mat,MatFactorType,Mat*);
4922 #endif
4923 #if defined(PETSC_HAVE_SPOOLES)
4924 extern PetscErrorCode MatGetFactor_mpiaij_spooles(Mat,MatFactorType,Mat*);
4925 #endif
4926 EXTERN_C_END
4927 
4928 /*MC
4929    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
4930 
4931    Options Database Keys:
4932 . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
4933 
4934   Level: beginner
4935 
4936 .seealso: MatCreateMPIAIJ()
4937 M*/
4938 
4939 EXTERN_C_BEGIN
4940 #undef __FUNCT__
4941 #define __FUNCT__ "MatCreate_MPIAIJ"
4942 PetscErrorCode PETSCMAT_DLLEXPORT MatCreate_MPIAIJ(Mat B)
4943 {
4944   Mat_MPIAIJ     *b;
4945   PetscErrorCode ierr;
4946   PetscMPIInt    size;
4947 
4948   PetscFunctionBegin;
4949   ierr = MPI_Comm_size(((PetscObject)B)->comm,&size);CHKERRQ(ierr);
4950 
4951   ierr            = PetscNewLog(B,Mat_MPIAIJ,&b);CHKERRQ(ierr);
4952   B->data         = (void*)b;
4953   ierr            = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
4954   B->rmap->bs      = 1;
4955   B->assembled    = PETSC_FALSE;
4956   B->mapping      = 0;
4957 
4958   B->insertmode      = NOT_SET_VALUES;
4959   b->size            = size;
4960   ierr = MPI_Comm_rank(((PetscObject)B)->comm,&b->rank);CHKERRQ(ierr);
4961 
4962   /* build cache for off array entries formed */
4963   ierr = MatStashCreate_Private(((PetscObject)B)->comm,1,&B->stash);CHKERRQ(ierr);
4964   b->donotstash  = PETSC_FALSE;
4965   b->colmap      = 0;
4966   b->garray      = 0;
4967   b->roworiented = PETSC_TRUE;
4968 
4969   /* stuff used for matrix vector multiply */
4970   b->lvec      = PETSC_NULL;
4971   b->Mvctx     = PETSC_NULL;
4972 
4973   /* stuff for MatGetRow() */
4974   b->rowindices   = 0;
4975   b->rowvalues    = 0;
4976   b->getrowactive = PETSC_FALSE;
4977 
4978 #if defined(PETSC_HAVE_SPOOLES)
4979   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_spooles_C",
4980                                      "MatGetFactor_mpiaij_spooles",
4981                                      MatGetFactor_mpiaij_spooles);CHKERRQ(ierr);
4982 #endif
4983 #if defined(PETSC_HAVE_MUMPS)
4984   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_mumps_C",
4985                                      "MatGetFactor_mpiaij_mumps",
4986                                      MatGetFactor_mpiaij_mumps);CHKERRQ(ierr);
4987 #endif
4988 #if defined(PETSC_HAVE_PASTIX)
4989   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_pastix_C",
4990 					   "MatGetFactor_mpiaij_pastix",
4991 					   MatGetFactor_mpiaij_pastix);CHKERRQ(ierr);
4992 #endif
4993 #if defined(PETSC_HAVE_SUPERLU_DIST)
4994   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetFactor_mpiaij_superlu_dist_C",
4995                                      "MatGetFactor_mpiaij_superlu_dist",
4996                                      MatGetFactor_mpiaij_superlu_dist);CHKERRQ(ierr);
4997 #endif
4998   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
4999                                      "MatStoreValues_MPIAIJ",
5000                                      MatStoreValues_MPIAIJ);CHKERRQ(ierr);
5001   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
5002                                      "MatRetrieveValues_MPIAIJ",
5003                                      MatRetrieveValues_MPIAIJ);CHKERRQ(ierr);
5004   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
5005 				     "MatGetDiagonalBlock_MPIAIJ",
5006                                      MatGetDiagonalBlock_MPIAIJ);CHKERRQ(ierr);
5007   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatIsTranspose_C",
5008 				     "MatIsTranspose_MPIAIJ",
5009 				     MatIsTranspose_MPIAIJ);CHKERRQ(ierr);
5010   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocation_C",
5011 				     "MatMPIAIJSetPreallocation_MPIAIJ",
5012 				     MatMPIAIJSetPreallocation_MPIAIJ);CHKERRQ(ierr);
5013   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",
5014 				     "MatMPIAIJSetPreallocationCSR_MPIAIJ",
5015 				     MatMPIAIJSetPreallocationCSR_MPIAIJ);CHKERRQ(ierr);
5016   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
5017 				     "MatDiagonalScaleLocal_MPIAIJ",
5018 				     MatDiagonalScaleLocal_MPIAIJ);CHKERRQ(ierr);
5019   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicsrperm_C",
5020                                      "MatConvert_MPIAIJ_MPICSRPERM",
5021                                       MatConvert_MPIAIJ_MPICSRPERM);CHKERRQ(ierr);
5022   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpiaij_mpicrl_C",
5023                                      "MatConvert_MPIAIJ_MPICRL",
5024                                       MatConvert_MPIAIJ_MPICRL);CHKERRQ(ierr);
5025   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",
5026                                      "MatMatMult_MPIDense_MPIAIJ",
5027                                       MatMatMult_MPIDense_MPIAIJ);CHKERRQ(ierr);
5028   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",
5029                                      "MatMatMultSymbolic_MPIDense_MPIAIJ",
5030                                       MatMatMultSymbolic_MPIDense_MPIAIJ);CHKERRQ(ierr);
5031   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",
5032                                      "MatMatMultNumeric_MPIDense_MPIAIJ",
5033                                       MatMatMultNumeric_MPIDense_MPIAIJ);CHKERRQ(ierr);
5034   ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);CHKERRQ(ierr);
5035   PetscFunctionReturn(0);
5036 }
5037 EXTERN_C_END
5038 
5039 #undef __FUNCT__
5040 #define __FUNCT__ "MatCreateMPIAIJWithSplitArrays"
5041 /*@
5042      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5043          and "off-diagonal" part of the matrix in CSR format.
5044 
5045    Collective on MPI_Comm
5046 
5047    Input Parameters:
5048 +  comm - MPI communicator
5049 .  m - number of local rows (Cannot be PETSC_DECIDE)
5050 .  n - This value should be the same as the local size used in creating the
5051        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5052        calculated if N is given) For square matrices n is almost always m.
5053 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5054 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5055 .   i - row indices for "diagonal" portion of matrix
5056 .   j - column indices
5057 .   a - matrix values
5058 .   oi - row indices for "off-diagonal" portion of matrix
5059 .   oj - column indices
5060 -   oa - matrix values
5061 
5062    Output Parameter:
5063 .   mat - the matrix
5064 
5065    Level: advanced
5066 
5067    Notes:
5068        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc.
5069 
5070        The i and j indices are 0 based
5071 
5072        See MatCreateMPIAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5073 
5074 
5075 .keywords: matrix, aij, compressed row, sparse, parallel
5076 
5077 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5078           MPIAIJ, MatCreateMPIAIJ(), MatCreateMPIAIJWithArrays()
5079 @*/
5080 PetscErrorCode PETSCMAT_DLLEXPORT MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],
5081 								PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5082 {
5083   PetscErrorCode ierr;
5084   Mat_MPIAIJ     *maij;
5085 
5086  PetscFunctionBegin;
5087   if (m < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5088   if (i[0]) {
5089     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5090   }
5091   if (oi[0]) {
5092     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5093   }
5094   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
5095   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
5096   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
5097   maij = (Mat_MPIAIJ*) (*mat)->data;
5098   maij->donotstash     = PETSC_TRUE;
5099   (*mat)->preallocated = PETSC_TRUE;
5100 
5101   ierr = PetscMapSetBlockSize((*mat)->rmap,1);CHKERRQ(ierr);
5102   ierr = PetscMapSetBlockSize((*mat)->cmap,1);CHKERRQ(ierr);
5103   ierr = PetscMapSetUp((*mat)->rmap);CHKERRQ(ierr);
5104   ierr = PetscMapSetUp((*mat)->cmap);CHKERRQ(ierr);
5105 
5106   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);CHKERRQ(ierr);
5107   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);CHKERRQ(ierr);
5108 
5109   ierr = MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5110   ierr = MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5111   ierr = MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5112   ierr = MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5113 
5114   ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5115   ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5116   PetscFunctionReturn(0);
5117 }
5118 
5119 /*
5120     Special version for direct calls from Fortran
5121 */
5122 #if defined(PETSC_HAVE_FORTRAN_CAPS)
5123 #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5124 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5125 #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5126 #endif
5127 
5128 /* Change these macros so can be used in void function */
5129 #undef CHKERRQ
5130 #define CHKERRQ(ierr) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5131 #undef SETERRQ2
5132 #define SETERRQ2(ierr,b,c,d) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5133 #undef SETERRQ
5134 #define SETERRQ(ierr,b) CHKERRABORT(((PetscObject)mat)->comm,ierr)
5135 
5136 EXTERN_C_BEGIN
5137 #undef __FUNCT__
5138 #define __FUNCT__ "matsetvaluesmpiaij_"
5139 void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5140 {
5141   Mat             mat = *mmat;
5142   PetscInt        m = *mm, n = *mn;
5143   InsertMode      addv = *maddv;
5144   Mat_MPIAIJ      *aij = (Mat_MPIAIJ*)mat->data;
5145   PetscScalar     value;
5146   PetscErrorCode  ierr;
5147 
5148   ierr = MatPreallocated(mat);CHKERRQ(ierr);
5149   if (mat->insertmode == NOT_SET_VALUES) {
5150     mat->insertmode = addv;
5151   }
5152 #if defined(PETSC_USE_DEBUG)
5153   else if (mat->insertmode != addv) {
5154     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5155   }
5156 #endif
5157   {
5158   PetscInt        i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
5159   PetscInt        cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5160   PetscTruth      roworiented = aij->roworiented;
5161 
5162   /* Some Variables required in the macro */
5163   Mat             A = aij->A;
5164   Mat_SeqAIJ      *a = (Mat_SeqAIJ*)A->data;
5165   PetscInt        *aimax = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5166   MatScalar       *aa = a->a;
5167   PetscTruth      ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES))?PETSC_TRUE:PETSC_FALSE);
5168   Mat             B = aij->B;
5169   Mat_SeqAIJ      *b = (Mat_SeqAIJ*)B->data;
5170   PetscInt        *bimax = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5171   MatScalar       *ba = b->a;
5172 
5173   PetscInt        *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5174   PetscInt        nonew = a->nonew;
5175   MatScalar       *ap1,*ap2;
5176 
5177   PetscFunctionBegin;
5178   for (i=0; i<m; i++) {
5179     if (im[i] < 0) continue;
5180 #if defined(PETSC_USE_DEBUG)
5181     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5182 #endif
5183     if (im[i] >= rstart && im[i] < rend) {
5184       row      = im[i] - rstart;
5185       lastcol1 = -1;
5186       rp1      = aj + ai[row];
5187       ap1      = aa + ai[row];
5188       rmax1    = aimax[row];
5189       nrow1    = ailen[row];
5190       low1     = 0;
5191       high1    = nrow1;
5192       lastcol2 = -1;
5193       rp2      = bj + bi[row];
5194       ap2      = ba + bi[row];
5195       rmax2    = bimax[row];
5196       nrow2    = bilen[row];
5197       low2     = 0;
5198       high2    = nrow2;
5199 
5200       for (j=0; j<n; j++) {
5201         if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
5202         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
5203         if (in[j] >= cstart && in[j] < cend){
5204           col = in[j] - cstart;
5205           MatSetValues_SeqAIJ_A_Private(row,col,value,addv);
5206         } else if (in[j] < 0) continue;
5207 #if defined(PETSC_USE_DEBUG)
5208         else if (in[j] >= mat->cmap->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);}
5209 #endif
5210         else {
5211           if (mat->was_assembled) {
5212             if (!aij->colmap) {
5213               ierr = CreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
5214             }
5215 #if defined (PETSC_USE_CTABLE)
5216             ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
5217 	    col--;
5218 #else
5219             col = aij->colmap[in[j]] - 1;
5220 #endif
5221             if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5222               ierr = DisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
5223               col =  in[j];
5224               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5225               B = aij->B;
5226               b = (Mat_SeqAIJ*)B->data;
5227               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5228               rp2      = bj + bi[row];
5229               ap2      = ba + bi[row];
5230               rmax2    = bimax[row];
5231               nrow2    = bilen[row];
5232               low2     = 0;
5233               high2    = nrow2;
5234               bm       = aij->B->rmap->n;
5235               ba = b->a;
5236             }
5237           } else col = in[j];
5238           MatSetValues_SeqAIJ_B_Private(row,col,value,addv);
5239         }
5240       }
5241     } else {
5242       if (!aij->donotstash) {
5243         if (roworiented) {
5244           if (ignorezeroentries && v[i*n] == 0.0) continue;
5245           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
5246         } else {
5247           if (ignorezeroentries && v[i] == 0.0) continue;
5248           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
5249         }
5250       }
5251     }
5252   }}
5253   PetscFunctionReturnVoid();
5254 }
5255 EXTERN_C_END
5256 
5257