xref: /petsc/src/mat/impls/aij/mpi/mpiaij.c (revision 8b3fa1f7362a032c26473e2c80cb80f09a5a21c7)
1 
2 
3 #include <../src/mat/impls/aij/mpi/mpiaij.h>   /*I "petscmat.h" I*/
4 #include <petsc/private/vecimpl.h>
5 #include <petsc/private/isimpl.h>
6 #include <petscblaslapack.h>
7 #include <petscsf.h>
8 
9 /*MC
10    MATAIJ - MATAIJ = "aij" - A matrix type to be used for sparse matrices.
11 
12    This matrix type is identical to MATSEQAIJ when constructed with a single process communicator,
13    and MATMPIAIJ otherwise.  As a result, for single process communicators,
14   MatSeqAIJSetPreallocation is supported, and similarly MatMPIAIJSetPreallocation is supported
15   for communicators controlling multiple processes.  It is recommended that you call both of
16   the above preallocation routines for simplicity.
17 
18    Options Database Keys:
19 . -mat_type aij - sets the matrix type to "aij" during a call to MatSetFromOptions()
20 
21   Developer Notes: Subclasses include MATAIJCUSP, MATAIJCUSPARSE, MATAIJPERM, MATAIJCRL, and also automatically switches over to use inodes when
22    enough exist.
23 
24   Level: beginner
25 
26 .seealso: MatCreateAIJ(), MatCreateSeqAIJ(), MATSEQAIJ, MATMPIAIJ
27 M*/
28 
29 /*MC
30    MATAIJCRL - MATAIJCRL = "aijcrl" - A matrix type to be used for sparse matrices.
31 
32    This matrix type is identical to MATSEQAIJCRL when constructed with a single process communicator,
33    and MATMPIAIJCRL otherwise.  As a result, for single process communicators,
34    MatSeqAIJSetPreallocation() is supported, and similarly MatMPIAIJSetPreallocation() is supported
35   for communicators controlling multiple processes.  It is recommended that you call both of
36   the above preallocation routines for simplicity.
37 
38    Options Database Keys:
39 . -mat_type aijcrl - sets the matrix type to "aijcrl" during a call to MatSetFromOptions()
40 
41   Level: beginner
42 
43 .seealso: MatCreateMPIAIJCRL,MATSEQAIJCRL,MATMPIAIJCRL, MATSEQAIJCRL, MATMPIAIJCRL
44 M*/
45 
46 PetscErrorCode MatSetBlockSizes_MPIAIJ(Mat M, PetscInt rbs, PetscInt cbs)
47 {
48   PetscErrorCode ierr;
49   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)M->data;
50 
51   PetscFunctionBegin;
52   if (mat->A) {
53     ierr = MatSetBlockSizes(mat->A,rbs,cbs);CHKERRQ(ierr);
54     ierr = MatSetBlockSizes(mat->B,rbs,1);CHKERRQ(ierr);
55   }
56   PetscFunctionReturn(0);
57 }
58 
59 PetscErrorCode MatFindNonzeroRows_MPIAIJ(Mat M,IS *keptrows)
60 {
61   PetscErrorCode  ierr;
62   Mat_MPIAIJ      *mat = (Mat_MPIAIJ*)M->data;
63   Mat_SeqAIJ      *a   = (Mat_SeqAIJ*)mat->A->data;
64   Mat_SeqAIJ      *b   = (Mat_SeqAIJ*)mat->B->data;
65   const PetscInt  *ia,*ib;
66   const MatScalar *aa,*bb;
67   PetscInt        na,nb,i,j,*rows,cnt=0,n0rows;
68   PetscInt        m = M->rmap->n,rstart = M->rmap->rstart;
69 
70   PetscFunctionBegin;
71   *keptrows = 0;
72   ia        = a->i;
73   ib        = b->i;
74   for (i=0; i<m; i++) {
75     na = ia[i+1] - ia[i];
76     nb = ib[i+1] - ib[i];
77     if (!na && !nb) {
78       cnt++;
79       goto ok1;
80     }
81     aa = a->a + ia[i];
82     for (j=0; j<na; j++) {
83       if (aa[j] != 0.0) goto ok1;
84     }
85     bb = b->a + ib[i];
86     for (j=0; j <nb; j++) {
87       if (bb[j] != 0.0) goto ok1;
88     }
89     cnt++;
90 ok1:;
91   }
92   ierr = MPIU_Allreduce(&cnt,&n0rows,1,MPIU_INT,MPI_SUM,PetscObjectComm((PetscObject)M));CHKERRQ(ierr);
93   if (!n0rows) PetscFunctionReturn(0);
94   ierr = PetscMalloc1(M->rmap->n-cnt,&rows);CHKERRQ(ierr);
95   cnt  = 0;
96   for (i=0; i<m; i++) {
97     na = ia[i+1] - ia[i];
98     nb = ib[i+1] - ib[i];
99     if (!na && !nb) continue;
100     aa = a->a + ia[i];
101     for (j=0; j<na;j++) {
102       if (aa[j] != 0.0) {
103         rows[cnt++] = rstart + i;
104         goto ok2;
105       }
106     }
107     bb = b->a + ib[i];
108     for (j=0; j<nb; j++) {
109       if (bb[j] != 0.0) {
110         rows[cnt++] = rstart + i;
111         goto ok2;
112       }
113     }
114 ok2:;
115   }
116   ierr = ISCreateGeneral(PetscObjectComm((PetscObject)M),cnt,rows,PETSC_OWN_POINTER,keptrows);CHKERRQ(ierr);
117   PetscFunctionReturn(0);
118 }
119 
120 PetscErrorCode  MatDiagonalSet_MPIAIJ(Mat Y,Vec D,InsertMode is)
121 {
122   PetscErrorCode    ierr;
123   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*) Y->data;
124 
125   PetscFunctionBegin;
126   if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
127     ierr = MatDiagonalSet(aij->A,D,is);CHKERRQ(ierr);
128   } else {
129     ierr = MatDiagonalSet_Default(Y,D,is);CHKERRQ(ierr);
130   }
131   PetscFunctionReturn(0);
132 }
133 
134 
135 PetscErrorCode MatFindZeroDiagonals_MPIAIJ(Mat M,IS *zrows)
136 {
137   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)M->data;
138   PetscErrorCode ierr;
139   PetscInt       i,rstart,nrows,*rows;
140 
141   PetscFunctionBegin;
142   *zrows = NULL;
143   ierr   = MatFindZeroDiagonals_SeqAIJ_Private(aij->A,&nrows,&rows);CHKERRQ(ierr);
144   ierr   = MatGetOwnershipRange(M,&rstart,NULL);CHKERRQ(ierr);
145   for (i=0; i<nrows; i++) rows[i] += rstart;
146   ierr = ISCreateGeneral(PetscObjectComm((PetscObject)M),nrows,rows,PETSC_OWN_POINTER,zrows);CHKERRQ(ierr);
147   PetscFunctionReturn(0);
148 }
149 
150 PetscErrorCode MatGetColumnNorms_MPIAIJ(Mat A,NormType type,PetscReal *norms)
151 {
152   PetscErrorCode ierr;
153   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)A->data;
154   PetscInt       i,n,*garray = aij->garray;
155   Mat_SeqAIJ     *a_aij = (Mat_SeqAIJ*) aij->A->data;
156   Mat_SeqAIJ     *b_aij = (Mat_SeqAIJ*) aij->B->data;
157   PetscReal      *work;
158 
159   PetscFunctionBegin;
160   ierr = MatGetSize(A,NULL,&n);CHKERRQ(ierr);
161   ierr = PetscCalloc1(n,&work);CHKERRQ(ierr);
162   if (type == NORM_2) {
163     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
164       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]*a_aij->a[i]);
165     }
166     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
167       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]*b_aij->a[i]);
168     }
169   } else if (type == NORM_1) {
170     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
171       work[A->cmap->rstart + a_aij->j[i]] += PetscAbsScalar(a_aij->a[i]);
172     }
173     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
174       work[garray[b_aij->j[i]]] += PetscAbsScalar(b_aij->a[i]);
175     }
176   } else if (type == NORM_INFINITY) {
177     for (i=0; i<a_aij->i[aij->A->rmap->n]; i++) {
178       work[A->cmap->rstart + a_aij->j[i]] = PetscMax(PetscAbsScalar(a_aij->a[i]), work[A->cmap->rstart + a_aij->j[i]]);
179     }
180     for (i=0; i<b_aij->i[aij->B->rmap->n]; i++) {
181       work[garray[b_aij->j[i]]] = PetscMax(PetscAbsScalar(b_aij->a[i]),work[garray[b_aij->j[i]]]);
182     }
183 
184   } else SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_WRONG,"Unknown NormType");
185   if (type == NORM_INFINITY) {
186     ierr = MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
187   } else {
188     ierr = MPIU_Allreduce(work,norms,n,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
189   }
190   ierr = PetscFree(work);CHKERRQ(ierr);
191   if (type == NORM_2) {
192     for (i=0; i<n; i++) norms[i] = PetscSqrtReal(norms[i]);
193   }
194   PetscFunctionReturn(0);
195 }
196 
197 PetscErrorCode MatFindOffBlockDiagonalEntries_MPIAIJ(Mat A,IS *is)
198 {
199   Mat_MPIAIJ      *a  = (Mat_MPIAIJ*)A->data;
200   IS              sis,gis;
201   PetscErrorCode  ierr;
202   const PetscInt  *isis,*igis;
203   PetscInt        n,*iis,nsis,ngis,rstart,i;
204 
205   PetscFunctionBegin;
206   ierr = MatFindOffBlockDiagonalEntries(a->A,&sis);CHKERRQ(ierr);
207   ierr = MatFindNonzeroRows(a->B,&gis);CHKERRQ(ierr);
208   ierr = ISGetSize(gis,&ngis);CHKERRQ(ierr);
209   ierr = ISGetSize(sis,&nsis);CHKERRQ(ierr);
210   ierr = ISGetIndices(sis,&isis);CHKERRQ(ierr);
211   ierr = ISGetIndices(gis,&igis);CHKERRQ(ierr);
212 
213   ierr = PetscMalloc1(ngis+nsis,&iis);CHKERRQ(ierr);
214   ierr = PetscMemcpy(iis,igis,ngis*sizeof(PetscInt));CHKERRQ(ierr);
215   ierr = PetscMemcpy(iis+ngis,isis,nsis*sizeof(PetscInt));CHKERRQ(ierr);
216   n    = ngis + nsis;
217   ierr = PetscSortRemoveDupsInt(&n,iis);CHKERRQ(ierr);
218   ierr = MatGetOwnershipRange(A,&rstart,NULL);CHKERRQ(ierr);
219   for (i=0; i<n; i++) iis[i] += rstart;
220   ierr = ISCreateGeneral(PetscObjectComm((PetscObject)A),n,iis,PETSC_OWN_POINTER,is);CHKERRQ(ierr);
221 
222   ierr = ISRestoreIndices(sis,&isis);CHKERRQ(ierr);
223   ierr = ISRestoreIndices(gis,&igis);CHKERRQ(ierr);
224   ierr = ISDestroy(&sis);CHKERRQ(ierr);
225   ierr = ISDestroy(&gis);CHKERRQ(ierr);
226   PetscFunctionReturn(0);
227 }
228 
229 /*
230     Distributes a SeqAIJ matrix across a set of processes. Code stolen from
231     MatLoad_MPIAIJ(). Horrible lack of reuse. Should be a routine for each matrix type.
232 
233     Only for square matrices
234 
235     Used by a preconditioner, hence PETSC_EXTERN
236 */
237 PETSC_EXTERN PetscErrorCode MatDistribute_MPIAIJ(MPI_Comm comm,Mat gmat,PetscInt m,MatReuse reuse,Mat *inmat)
238 {
239   PetscMPIInt    rank,size;
240   PetscInt       *rowners,*dlens,*olens,i,rstart,rend,j,jj,nz = 0,*gmataj,cnt,row,*ld,bses[2];
241   PetscErrorCode ierr;
242   Mat            mat;
243   Mat_SeqAIJ     *gmata;
244   PetscMPIInt    tag;
245   MPI_Status     status;
246   PetscBool      aij;
247   MatScalar      *gmataa,*ao,*ad,*gmataarestore=0;
248 
249   PetscFunctionBegin;
250   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
251   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
252   if (!rank) {
253     ierr = PetscObjectTypeCompare((PetscObject)gmat,MATSEQAIJ,&aij);CHKERRQ(ierr);
254     if (!aij) SETERRQ1(PetscObjectComm((PetscObject)gmat),PETSC_ERR_SUP,"Currently no support for input matrix of type %s\n",((PetscObject)gmat)->type_name);
255   }
256   if (reuse == MAT_INITIAL_MATRIX) {
257     ierr = MatCreate(comm,&mat);CHKERRQ(ierr);
258     ierr = MatSetSizes(mat,m,m,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
259     ierr = MatGetBlockSizes(gmat,&bses[0],&bses[1]);CHKERRQ(ierr);
260     ierr = MPI_Bcast(bses,2,MPIU_INT,0,comm);CHKERRQ(ierr);
261     ierr = MatSetBlockSizes(mat,bses[0],bses[1]);CHKERRQ(ierr);
262     ierr = MatSetType(mat,MATAIJ);CHKERRQ(ierr);
263     ierr = PetscMalloc1(size+1,&rowners);CHKERRQ(ierr);
264     ierr = PetscMalloc2(m,&dlens,m,&olens);CHKERRQ(ierr);
265     ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
266 
267     rowners[0] = 0;
268     for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
269     rstart = rowners[rank];
270     rend   = rowners[rank+1];
271     ierr   = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
272     if (!rank) {
273       gmata = (Mat_SeqAIJ*) gmat->data;
274       /* send row lengths to all processors */
275       for (i=0; i<m; i++) dlens[i] = gmata->ilen[i];
276       for (i=1; i<size; i++) {
277         ierr = MPI_Send(gmata->ilen + rowners[i],rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr);
278       }
279       /* determine number diagonal and off-diagonal counts */
280       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
281       ierr = PetscCalloc1(m,&ld);CHKERRQ(ierr);
282       jj   = 0;
283       for (i=0; i<m; i++) {
284         for (j=0; j<dlens[i]; j++) {
285           if (gmata->j[jj] < rstart) ld[i]++;
286           if (gmata->j[jj] < rstart || gmata->j[jj] >= rend) olens[i]++;
287           jj++;
288         }
289       }
290       /* send column indices to other processes */
291       for (i=1; i<size; i++) {
292         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
293         ierr = MPI_Send(&nz,1,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
294         ierr = MPI_Send(gmata->j + gmata->i[rowners[i]],nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
295       }
296 
297       /* send numerical values to other processes */
298       for (i=1; i<size; i++) {
299         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
300         ierr = MPI_Send(gmata->a + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
301       }
302       gmataa = gmata->a;
303       gmataj = gmata->j;
304 
305     } else {
306       /* receive row lengths */
307       ierr = MPI_Recv(dlens,m,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
308       /* receive column indices */
309       ierr = MPI_Recv(&nz,1,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
310       ierr = PetscMalloc2(nz,&gmataa,nz,&gmataj);CHKERRQ(ierr);
311       ierr = MPI_Recv(gmataj,nz,MPIU_INT,0,tag,comm,&status);CHKERRQ(ierr);
312       /* determine number diagonal and off-diagonal counts */
313       ierr = PetscMemzero(olens,m*sizeof(PetscInt));CHKERRQ(ierr);
314       ierr = PetscCalloc1(m,&ld);CHKERRQ(ierr);
315       jj   = 0;
316       for (i=0; i<m; i++) {
317         for (j=0; j<dlens[i]; j++) {
318           if (gmataj[jj] < rstart) ld[i]++;
319           if (gmataj[jj] < rstart || gmataj[jj] >= rend) olens[i]++;
320           jj++;
321         }
322       }
323       /* receive numerical values */
324       ierr = PetscMemzero(gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr);
325       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
326     }
327     /* set preallocation */
328     for (i=0; i<m; i++) {
329       dlens[i] -= olens[i];
330     }
331     ierr = MatSeqAIJSetPreallocation(mat,0,dlens);CHKERRQ(ierr);
332     ierr = MatMPIAIJSetPreallocation(mat,0,dlens,0,olens);CHKERRQ(ierr);
333 
334     for (i=0; i<m; i++) {
335       dlens[i] += olens[i];
336     }
337     cnt = 0;
338     for (i=0; i<m; i++) {
339       row  = rstart + i;
340       ierr = MatSetValues(mat,1,&row,dlens[i],gmataj+cnt,gmataa+cnt,INSERT_VALUES);CHKERRQ(ierr);
341       cnt += dlens[i];
342     }
343     if (rank) {
344       ierr = PetscFree2(gmataa,gmataj);CHKERRQ(ierr);
345     }
346     ierr = PetscFree2(dlens,olens);CHKERRQ(ierr);
347     ierr = PetscFree(rowners);CHKERRQ(ierr);
348 
349     ((Mat_MPIAIJ*)(mat->data))->ld = ld;
350 
351     *inmat = mat;
352   } else {   /* column indices are already set; only need to move over numerical values from process 0 */
353     Mat_SeqAIJ *Ad = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->A->data;
354     Mat_SeqAIJ *Ao = (Mat_SeqAIJ*)((Mat_MPIAIJ*)((*inmat)->data))->B->data;
355     mat  = *inmat;
356     ierr = PetscObjectGetNewTag((PetscObject)mat,&tag);CHKERRQ(ierr);
357     if (!rank) {
358       /* send numerical values to other processes */
359       gmata  = (Mat_SeqAIJ*) gmat->data;
360       ierr   = MatGetOwnershipRanges(mat,(const PetscInt**)&rowners);CHKERRQ(ierr);
361       gmataa = gmata->a;
362       for (i=1; i<size; i++) {
363         nz   = gmata->i[rowners[i+1]]-gmata->i[rowners[i]];
364         ierr = MPI_Send(gmataa + gmata->i[rowners[i]],nz,MPIU_SCALAR,i,tag,comm);CHKERRQ(ierr);
365       }
366       nz = gmata->i[rowners[1]]-gmata->i[rowners[0]];
367     } else {
368       /* receive numerical values from process 0*/
369       nz   = Ad->nz + Ao->nz;
370       ierr = PetscMalloc1(nz,&gmataa);CHKERRQ(ierr); gmataarestore = gmataa;
371       ierr = MPI_Recv(gmataa,nz,MPIU_SCALAR,0,tag,comm,&status);CHKERRQ(ierr);
372     }
373     /* transfer numerical values into the diagonal A and off diagonal B parts of mat */
374     ld = ((Mat_MPIAIJ*)(mat->data))->ld;
375     ad = Ad->a;
376     ao = Ao->a;
377     if (mat->rmap->n) {
378       i  = 0;
379       nz = ld[i];                                   ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
380       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
381     }
382     for (i=1; i<mat->rmap->n; i++) {
383       nz = Ao->i[i] - Ao->i[i-1] - ld[i-1] + ld[i]; ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ao += nz; gmataa += nz;
384       nz = Ad->i[i+1] - Ad->i[i];                   ierr = PetscMemcpy(ad,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr); ad += nz; gmataa += nz;
385     }
386     i--;
387     if (mat->rmap->n) {
388       nz = Ao->i[i+1] - Ao->i[i] - ld[i];           ierr = PetscMemcpy(ao,gmataa,nz*sizeof(PetscScalar));CHKERRQ(ierr);
389     }
390     if (rank) {
391       ierr = PetscFree(gmataarestore);CHKERRQ(ierr);
392     }
393   }
394   ierr = MatAssemblyBegin(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
395   ierr = MatAssemblyEnd(mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
396   PetscFunctionReturn(0);
397 }
398 
399 /*
400   Local utility routine that creates a mapping from the global column
401 number to the local number in the off-diagonal part of the local
402 storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
403 a slightly higher hash table cost; without it it is not scalable (each processor
404 has an order N integer array but is fast to acess.
405 */
406 PetscErrorCode MatCreateColmap_MPIAIJ_Private(Mat mat)
407 {
408   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
409   PetscErrorCode ierr;
410   PetscInt       n = aij->B->cmap->n,i;
411 
412   PetscFunctionBegin;
413   if (!aij->garray) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"MPIAIJ Matrix was assembled but is missing garray");
414 #if defined(PETSC_USE_CTABLE)
415   ierr = PetscTableCreate(n,mat->cmap->N+1,&aij->colmap);CHKERRQ(ierr);
416   for (i=0; i<n; i++) {
417     ierr = PetscTableAdd(aij->colmap,aij->garray[i]+1,i+1,INSERT_VALUES);CHKERRQ(ierr);
418   }
419 #else
420   ierr = PetscCalloc1(mat->cmap->N+1,&aij->colmap);CHKERRQ(ierr);
421   ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N+1)*sizeof(PetscInt));CHKERRQ(ierr);
422   for (i=0; i<n; i++) aij->colmap[aij->garray[i]] = i+1;
423 #endif
424   PetscFunctionReturn(0);
425 }
426 
427 #define MatSetValues_SeqAIJ_A_Private(row,col,value,addv,orow,ocol)     \
428 { \
429     if (col <= lastcol1)  low1 = 0;     \
430     else                 high1 = nrow1; \
431     lastcol1 = col;\
432     while (high1-low1 > 5) { \
433       t = (low1+high1)/2; \
434       if (rp1[t] > col) high1 = t; \
435       else              low1  = t; \
436     } \
437       for (_i=low1; _i<high1; _i++) { \
438         if (rp1[_i] > col) break; \
439         if (rp1[_i] == col) { \
440           if (addv == ADD_VALUES) ap1[_i] += value;   \
441           else                    ap1[_i] = value; \
442           goto a_noinsert; \
443         } \
444       }  \
445       if (value == 0.0 && ignorezeroentries && row != col) {low1 = 0; high1 = nrow1;goto a_noinsert;} \
446       if (nonew == 1) {low1 = 0; high1 = nrow1; goto a_noinsert;}                \
447       if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
448       MatSeqXAIJReallocateAIJ(A,am,1,nrow1,row,col,rmax1,aa,ai,aj,rp1,ap1,aimax,nonew,MatScalar); \
449       N = nrow1++ - 1; a->nz++; high1++; \
450       /* shift up all the later entries in this row */ \
451       for (ii=N; ii>=_i; ii--) { \
452         rp1[ii+1] = rp1[ii]; \
453         ap1[ii+1] = ap1[ii]; \
454       } \
455       rp1[_i] = col;  \
456       ap1[_i] = value;  \
457       A->nonzerostate++;\
458       a_noinsert: ; \
459       ailen[row] = nrow1; \
460 }
461 
462 
463 #define MatSetValues_SeqAIJ_B_Private(row,col,value,addv,orow,ocol) \
464   { \
465     if (col <= lastcol2) low2 = 0;                        \
466     else high2 = nrow2;                                   \
467     lastcol2 = col;                                       \
468     while (high2-low2 > 5) {                              \
469       t = (low2+high2)/2;                                 \
470       if (rp2[t] > col) high2 = t;                        \
471       else             low2  = t;                         \
472     }                                                     \
473     for (_i=low2; _i<high2; _i++) {                       \
474       if (rp2[_i] > col) break;                           \
475       if (rp2[_i] == col) {                               \
476         if (addv == ADD_VALUES) ap2[_i] += value;         \
477         else                    ap2[_i] = value;          \
478         goto b_noinsert;                                  \
479       }                                                   \
480     }                                                     \
481     if (value == 0.0 && ignorezeroentries) {low2 = 0; high2 = nrow2; goto b_noinsert;} \
482     if (nonew == 1) {low2 = 0; high2 = nrow2; goto b_noinsert;}                        \
483     if (nonew == -1) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", orow, ocol); \
484     MatSeqXAIJReallocateAIJ(B,bm,1,nrow2,row,col,rmax2,ba,bi,bj,rp2,ap2,bimax,nonew,MatScalar); \
485     N = nrow2++ - 1; b->nz++; high2++;                    \
486     /* shift up all the later entries in this row */      \
487     for (ii=N; ii>=_i; ii--) {                            \
488       rp2[ii+1] = rp2[ii];                                \
489       ap2[ii+1] = ap2[ii];                                \
490     }                                                     \
491     rp2[_i] = col;                                        \
492     ap2[_i] = value;                                      \
493     B->nonzerostate++;                                    \
494     b_noinsert: ;                                         \
495     bilen[row] = nrow2;                                   \
496   }
497 
498 PetscErrorCode MatSetValuesRow_MPIAIJ(Mat A,PetscInt row,const PetscScalar v[])
499 {
500   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)A->data;
501   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)mat->A->data,*b = (Mat_SeqAIJ*)mat->B->data;
502   PetscErrorCode ierr;
503   PetscInt       l,*garray = mat->garray,diag;
504 
505   PetscFunctionBegin;
506   /* code only works for square matrices A */
507 
508   /* find size of row to the left of the diagonal part */
509   ierr = MatGetOwnershipRange(A,&diag,0);CHKERRQ(ierr);
510   row  = row - diag;
511   for (l=0; l<b->i[row+1]-b->i[row]; l++) {
512     if (garray[b->j[b->i[row]+l]] > diag) break;
513   }
514   ierr = PetscMemcpy(b->a+b->i[row],v,l*sizeof(PetscScalar));CHKERRQ(ierr);
515 
516   /* diagonal part */
517   ierr = PetscMemcpy(a->a+a->i[row],v+l,(a->i[row+1]-a->i[row])*sizeof(PetscScalar));CHKERRQ(ierr);
518 
519   /* right of diagonal part */
520   ierr = PetscMemcpy(b->a+b->i[row]+l,v+l+a->i[row+1]-a->i[row],(b->i[row+1]-b->i[row]-l)*sizeof(PetscScalar));CHKERRQ(ierr);
521   PetscFunctionReturn(0);
522 }
523 
524 PetscErrorCode MatSetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt im[],PetscInt n,const PetscInt in[],const PetscScalar v[],InsertMode addv)
525 {
526   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
527   PetscScalar    value;
528   PetscErrorCode ierr;
529   PetscInt       i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
530   PetscInt       cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
531   PetscBool      roworiented = aij->roworiented;
532 
533   /* Some Variables required in the macro */
534   Mat        A                 = aij->A;
535   Mat_SeqAIJ *a                = (Mat_SeqAIJ*)A->data;
536   PetscInt   *aimax            = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
537   MatScalar  *aa               = a->a;
538   PetscBool  ignorezeroentries = a->ignorezeroentries;
539   Mat        B                 = aij->B;
540   Mat_SeqAIJ *b                = (Mat_SeqAIJ*)B->data;
541   PetscInt   *bimax            = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
542   MatScalar  *ba               = b->a;
543 
544   PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
545   PetscInt  nonew;
546   MatScalar *ap1,*ap2;
547 
548   PetscFunctionBegin;
549   for (i=0; i<m; i++) {
550     if (im[i] < 0) continue;
551 #if defined(PETSC_USE_DEBUG)
552     if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
553 #endif
554     if (im[i] >= rstart && im[i] < rend) {
555       row      = im[i] - rstart;
556       lastcol1 = -1;
557       rp1      = aj + ai[row];
558       ap1      = aa + ai[row];
559       rmax1    = aimax[row];
560       nrow1    = ailen[row];
561       low1     = 0;
562       high1    = nrow1;
563       lastcol2 = -1;
564       rp2      = bj + bi[row];
565       ap2      = ba + bi[row];
566       rmax2    = bimax[row];
567       nrow2    = bilen[row];
568       low2     = 0;
569       high2    = nrow2;
570 
571       for (j=0; j<n; j++) {
572         if (roworiented) value = v[i*n+j];
573         else             value = v[i+j*m];
574         if (in[j] >= cstart && in[j] < cend) {
575           col   = in[j] - cstart;
576           nonew = a->nonew;
577           if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
578           MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
579         } else if (in[j] < 0) continue;
580 #if defined(PETSC_USE_DEBUG)
581         else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
582 #endif
583         else {
584           if (mat->was_assembled) {
585             if (!aij->colmap) {
586               ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
587             }
588 #if defined(PETSC_USE_CTABLE)
589             ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
590             col--;
591 #else
592             col = aij->colmap[in[j]] - 1;
593 #endif
594             if (col < 0 && !((Mat_SeqAIJ*)(aij->B->data))->nonew) {
595               ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
596               col  =  in[j];
597               /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
598               B     = aij->B;
599               b     = (Mat_SeqAIJ*)B->data;
600               bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j; ba = b->a;
601               rp2   = bj + bi[row];
602               ap2   = ba + bi[row];
603               rmax2 = bimax[row];
604               nrow2 = bilen[row];
605               low2  = 0;
606               high2 = nrow2;
607               bm    = aij->B->rmap->n;
608               ba    = b->a;
609             } else if (col < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero at global row/column (%D, %D) into matrix", im[i], in[j]);
610           } else col = in[j];
611           nonew = b->nonew;
612           MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
613         }
614       }
615     } else {
616       if (mat->nooffprocentries) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Setting off process row %D even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set",im[i]);
617       if (!aij->donotstash) {
618         mat->assembled = PETSC_FALSE;
619         if (roworiented) {
620           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr);
621         } else {
622           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr);
623         }
624       }
625     }
626   }
627   PetscFunctionReturn(0);
628 }
629 
630 PetscErrorCode MatGetValues_MPIAIJ(Mat mat,PetscInt m,const PetscInt idxm[],PetscInt n,const PetscInt idxn[],PetscScalar v[])
631 {
632   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
633   PetscErrorCode ierr;
634   PetscInt       i,j,rstart = mat->rmap->rstart,rend = mat->rmap->rend;
635   PetscInt       cstart = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
636 
637   PetscFunctionBegin;
638   for (i=0; i<m; i++) {
639     if (idxm[i] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %D",idxm[i]);*/
640     if (idxm[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",idxm[i],mat->rmap->N-1);
641     if (idxm[i] >= rstart && idxm[i] < rend) {
642       row = idxm[i] - rstart;
643       for (j=0; j<n; j++) {
644         if (idxn[j] < 0) continue; /* SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %D",idxn[j]); */
645         if (idxn[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",idxn[j],mat->cmap->N-1);
646         if (idxn[j] >= cstart && idxn[j] < cend) {
647           col  = idxn[j] - cstart;
648           ierr = MatGetValues(aij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
649         } else {
650           if (!aij->colmap) {
651             ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
652           }
653 #if defined(PETSC_USE_CTABLE)
654           ierr = PetscTableFind(aij->colmap,idxn[j]+1,&col);CHKERRQ(ierr);
655           col--;
656 #else
657           col = aij->colmap[idxn[j]] - 1;
658 #endif
659           if ((col < 0) || (aij->garray[col] != idxn[j])) *(v+i*n+j) = 0.0;
660           else {
661             ierr = MatGetValues(aij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
662           }
663         }
664       }
665     } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only local values currently supported");
666   }
667   PetscFunctionReturn(0);
668 }
669 
670 extern PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat,Vec,Vec);
671 
672 PetscErrorCode MatAssemblyBegin_MPIAIJ(Mat mat,MatAssemblyType mode)
673 {
674   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
675   PetscErrorCode ierr;
676   PetscInt       nstash,reallocs;
677 
678   PetscFunctionBegin;
679   if (aij->donotstash || mat->nooffprocentries) PetscFunctionReturn(0);
680 
681   ierr = MatStashScatterBegin_Private(mat,&mat->stash,mat->rmap->range);CHKERRQ(ierr);
682   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
683   ierr = PetscInfo2(aij->A,"Stash has %D entries, uses %D mallocs.\n",nstash,reallocs);CHKERRQ(ierr);
684   PetscFunctionReturn(0);
685 }
686 
687 PetscErrorCode MatAssemblyEnd_MPIAIJ(Mat mat,MatAssemblyType mode)
688 {
689   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
690   Mat_SeqAIJ     *a   = (Mat_SeqAIJ*)aij->A->data;
691   PetscErrorCode ierr;
692   PetscMPIInt    n;
693   PetscInt       i,j,rstart,ncols,flg;
694   PetscInt       *row,*col;
695   PetscBool      other_disassembled;
696   PetscScalar    *val;
697 
698   /* do not use 'b = (Mat_SeqAIJ*)aij->B->data' as B can be reset in disassembly */
699 
700   PetscFunctionBegin;
701   if (!aij->donotstash && !mat->nooffprocentries) {
702     while (1) {
703       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
704       if (!flg) break;
705 
706       for (i=0; i<n; ) {
707         /* Now identify the consecutive vals belonging to the same row */
708         for (j=i,rstart=row[j]; j<n; j++) {
709           if (row[j] != rstart) break;
710         }
711         if (j < n) ncols = j-i;
712         else       ncols = n-i;
713         /* Now assemble all these values with a single function call */
714         ierr = MatSetValues_MPIAIJ(mat,1,row+i,ncols,col+i,val+i,mat->insertmode);CHKERRQ(ierr);
715 
716         i = j;
717       }
718     }
719     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
720   }
721   ierr = MatAssemblyBegin(aij->A,mode);CHKERRQ(ierr);
722   ierr = MatAssemblyEnd(aij->A,mode);CHKERRQ(ierr);
723 
724   /* determine if any processor has disassembled, if so we must
725      also disassemble ourselfs, in order that we may reassemble. */
726   /*
727      if nonzero structure of submatrix B cannot change then we know that
728      no processor disassembled thus we can skip this stuff
729   */
730   if (!((Mat_SeqAIJ*)aij->B->data)->nonew) {
731     ierr = MPIU_Allreduce(&mat->was_assembled,&other_disassembled,1,MPIU_BOOL,MPI_PROD,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
732     if (mat->was_assembled && !other_disassembled) {
733       ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
734     }
735   }
736   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
737     ierr = MatSetUpMultiply_MPIAIJ(mat);CHKERRQ(ierr);
738   }
739   ierr = MatSetOption(aij->B,MAT_USE_INODES,PETSC_FALSE);CHKERRQ(ierr);
740   ierr = MatAssemblyBegin(aij->B,mode);CHKERRQ(ierr);
741   ierr = MatAssemblyEnd(aij->B,mode);CHKERRQ(ierr);
742 
743   ierr = PetscFree2(aij->rowvalues,aij->rowindices);CHKERRQ(ierr);
744 
745   aij->rowvalues = 0;
746 
747   ierr = VecDestroy(&aij->diag);CHKERRQ(ierr);
748   if (a->inode.size) mat->ops->multdiagonalblock = MatMultDiagonalBlock_MPIAIJ;
749 
750   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
751   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
752     PetscObjectState state = aij->A->nonzerostate + aij->B->nonzerostate;
753     ierr = MPIU_Allreduce(&state,&mat->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
754   }
755   PetscFunctionReturn(0);
756 }
757 
758 PetscErrorCode MatZeroEntries_MPIAIJ(Mat A)
759 {
760   Mat_MPIAIJ     *l = (Mat_MPIAIJ*)A->data;
761   PetscErrorCode ierr;
762 
763   PetscFunctionBegin;
764   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
765   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
766   PetscFunctionReturn(0);
767 }
768 
769 PetscErrorCode MatZeroRows_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
770 {
771   Mat_MPIAIJ    *mat    = (Mat_MPIAIJ *) A->data;
772   PetscInt      *lrows;
773   PetscInt       r, len;
774   PetscErrorCode ierr;
775 
776   PetscFunctionBegin;
777   /* get locally owned rows */
778   ierr = MatZeroRowsMapLocal_Private(A,N,rows,&len,&lrows);CHKERRQ(ierr);
779   /* fix right hand side if needed */
780   if (x && b) {
781     const PetscScalar *xx;
782     PetscScalar       *bb;
783 
784     ierr = VecGetArrayRead(x, &xx);CHKERRQ(ierr);
785     ierr = VecGetArray(b, &bb);CHKERRQ(ierr);
786     for (r = 0; r < len; ++r) bb[lrows[r]] = diag*xx[lrows[r]];
787     ierr = VecRestoreArrayRead(x, &xx);CHKERRQ(ierr);
788     ierr = VecRestoreArray(b, &bb);CHKERRQ(ierr);
789   }
790   /* Must zero l->B before l->A because the (diag) case below may put values into l->B*/
791   ierr = MatZeroRows(mat->B, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr);
792   if (A->congruentlayouts == -1) { /* first time we compare rows and cols layouts */
793     PetscBool cong;
794     ierr = PetscLayoutCompare(A->rmap,A->cmap,&cong);CHKERRQ(ierr);
795     if (cong) A->congruentlayouts = 1;
796     else      A->congruentlayouts = 0;
797   }
798   if ((diag != 0.0) && A->congruentlayouts) {
799     ierr = MatZeroRows(mat->A, len, lrows, diag, NULL, NULL);CHKERRQ(ierr);
800   } else if (diag != 0.0) {
801     ierr = MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr);
802     if (((Mat_SeqAIJ *) mat->A->data)->nonew) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_SUP, "MatZeroRows() on rectangular matrices cannot be used with the Mat options\nMAT_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
803     for (r = 0; r < len; ++r) {
804       const PetscInt row = lrows[r] + A->rmap->rstart;
805       ierr = MatSetValues(A, 1, &row, 1, &row, &diag, INSERT_VALUES);CHKERRQ(ierr);
806     }
807     ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
808     ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
809   } else {
810     ierr = MatZeroRows(mat->A, len, lrows, 0.0, NULL, NULL);CHKERRQ(ierr);
811   }
812   ierr = PetscFree(lrows);CHKERRQ(ierr);
813 
814   /* only change matrix nonzero state if pattern was allowed to be changed */
815   if (!((Mat_SeqAIJ*)(mat->A->data))->keepnonzeropattern) {
816     PetscObjectState state = mat->A->nonzerostate + mat->B->nonzerostate;
817     ierr = MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
818   }
819   PetscFunctionReturn(0);
820 }
821 
822 PetscErrorCode MatZeroRowsColumns_MPIAIJ(Mat A,PetscInt N,const PetscInt rows[],PetscScalar diag,Vec x,Vec b)
823 {
824   Mat_MPIAIJ        *l = (Mat_MPIAIJ*)A->data;
825   PetscErrorCode    ierr;
826   PetscMPIInt       n = A->rmap->n;
827   PetscInt          i,j,r,m,p = 0,len = 0;
828   PetscInt          *lrows,*owners = A->rmap->range;
829   PetscSFNode       *rrows;
830   PetscSF           sf;
831   const PetscScalar *xx;
832   PetscScalar       *bb,*mask;
833   Vec               xmask,lmask;
834   Mat_SeqAIJ        *aij = (Mat_SeqAIJ*)l->B->data;
835   const PetscInt    *aj, *ii,*ridx;
836   PetscScalar       *aa;
837 
838   PetscFunctionBegin;
839   /* Create SF where leaves are input rows and roots are owned rows */
840   ierr = PetscMalloc1(n, &lrows);CHKERRQ(ierr);
841   for (r = 0; r < n; ++r) lrows[r] = -1;
842   ierr = PetscMalloc1(N, &rrows);CHKERRQ(ierr);
843   for (r = 0; r < N; ++r) {
844     const PetscInt idx   = rows[r];
845     if (idx < 0 || A->rmap->N <= idx) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row %D out of range [0,%D)",idx,A->rmap->N);
846     if (idx < owners[p] || owners[p+1] <= idx) { /* short-circuit the search if the last p owns this row too */
847       ierr = PetscLayoutFindOwner(A->rmap,idx,&p);CHKERRQ(ierr);
848     }
849     rrows[r].rank  = p;
850     rrows[r].index = rows[r] - owners[p];
851   }
852   ierr = PetscSFCreate(PetscObjectComm((PetscObject) A), &sf);CHKERRQ(ierr);
853   ierr = PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, rrows, PETSC_OWN_POINTER);CHKERRQ(ierr);
854   /* Collect flags for rows to be zeroed */
855   ierr = PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);CHKERRQ(ierr);
856   ierr = PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *) rows, lrows, MPI_LOR);CHKERRQ(ierr);
857   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
858   /* Compress and put in row numbers */
859   for (r = 0; r < n; ++r) if (lrows[r] >= 0) lrows[len++] = r;
860   /* zero diagonal part of matrix */
861   ierr = MatZeroRowsColumns(l->A,len,lrows,diag,x,b);CHKERRQ(ierr);
862   /* handle off diagonal part of matrix */
863   ierr = MatCreateVecs(A,&xmask,NULL);CHKERRQ(ierr);
864   ierr = VecDuplicate(l->lvec,&lmask);CHKERRQ(ierr);
865   ierr = VecGetArray(xmask,&bb);CHKERRQ(ierr);
866   for (i=0; i<len; i++) bb[lrows[i]] = 1;
867   ierr = VecRestoreArray(xmask,&bb);CHKERRQ(ierr);
868   ierr = VecScatterBegin(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
869   ierr = VecScatterEnd(l->Mvctx,xmask,lmask,ADD_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
870   ierr = VecDestroy(&xmask);CHKERRQ(ierr);
871   if (x) {
872     ierr = VecScatterBegin(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
873     ierr = VecScatterEnd(l->Mvctx,x,l->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
874     ierr = VecGetArrayRead(l->lvec,&xx);CHKERRQ(ierr);
875     ierr = VecGetArray(b,&bb);CHKERRQ(ierr);
876   }
877   ierr = VecGetArray(lmask,&mask);CHKERRQ(ierr);
878   /* remove zeroed rows of off diagonal matrix */
879   ii = aij->i;
880   for (i=0; i<len; i++) {
881     ierr = PetscMemzero(aij->a + ii[lrows[i]],(ii[lrows[i]+1] - ii[lrows[i]])*sizeof(PetscScalar));CHKERRQ(ierr);
882   }
883   /* loop over all elements of off process part of matrix zeroing removed columns*/
884   if (aij->compressedrow.use) {
885     m    = aij->compressedrow.nrows;
886     ii   = aij->compressedrow.i;
887     ridx = aij->compressedrow.rindex;
888     for (i=0; i<m; i++) {
889       n  = ii[i+1] - ii[i];
890       aj = aij->j + ii[i];
891       aa = aij->a + ii[i];
892 
893       for (j=0; j<n; j++) {
894         if (PetscAbsScalar(mask[*aj])) {
895           if (b) bb[*ridx] -= *aa*xx[*aj];
896           *aa = 0.0;
897         }
898         aa++;
899         aj++;
900       }
901       ridx++;
902     }
903   } else { /* do not use compressed row format */
904     m = l->B->rmap->n;
905     for (i=0; i<m; i++) {
906       n  = ii[i+1] - ii[i];
907       aj = aij->j + ii[i];
908       aa = aij->a + ii[i];
909       for (j=0; j<n; j++) {
910         if (PetscAbsScalar(mask[*aj])) {
911           if (b) bb[i] -= *aa*xx[*aj];
912           *aa = 0.0;
913         }
914         aa++;
915         aj++;
916       }
917     }
918   }
919   if (x) {
920     ierr = VecRestoreArray(b,&bb);CHKERRQ(ierr);
921     ierr = VecRestoreArrayRead(l->lvec,&xx);CHKERRQ(ierr);
922   }
923   ierr = VecRestoreArray(lmask,&mask);CHKERRQ(ierr);
924   ierr = VecDestroy(&lmask);CHKERRQ(ierr);
925   ierr = PetscFree(lrows);CHKERRQ(ierr);
926 
927   /* only change matrix nonzero state if pattern was allowed to be changed */
928   if (!((Mat_SeqAIJ*)(l->A->data))->keepnonzeropattern) {
929     PetscObjectState state = l->A->nonzerostate + l->B->nonzerostate;
930     ierr = MPIU_Allreduce(&state,&A->nonzerostate,1,MPIU_INT64,MPI_SUM,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
931   }
932   PetscFunctionReturn(0);
933 }
934 
935 PetscErrorCode MatMult_MPIAIJ(Mat A,Vec xx,Vec yy)
936 {
937   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
938   PetscErrorCode ierr;
939   PetscInt       nt;
940 
941   PetscFunctionBegin;
942   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
943   if (nt != A->cmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Incompatible partition of A (%D) and xx (%D)",A->cmap->n,nt);
944   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
945   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
946   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
947   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
948   PetscFunctionReturn(0);
949 }
950 
951 PetscErrorCode MatMultDiagonalBlock_MPIAIJ(Mat A,Vec bb,Vec xx)
952 {
953   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
954   PetscErrorCode ierr;
955 
956   PetscFunctionBegin;
957   ierr = MatMultDiagonalBlock(a->A,bb,xx);CHKERRQ(ierr);
958   PetscFunctionReturn(0);
959 }
960 
961 PetscErrorCode MatMultAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
962 {
963   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
964   PetscErrorCode ierr;
965 
966   PetscFunctionBegin;
967   ierr = VecScatterBegin(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
968   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
969   ierr = VecScatterEnd(a->Mvctx,xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
970   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
971   PetscFunctionReturn(0);
972 }
973 
974 PetscErrorCode MatMultTranspose_MPIAIJ(Mat A,Vec xx,Vec yy)
975 {
976   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
977   PetscErrorCode ierr;
978   PetscBool      merged;
979 
980   PetscFunctionBegin;
981   ierr = VecScatterGetMerged(a->Mvctx,&merged);CHKERRQ(ierr);
982   /* do nondiagonal part */
983   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
984   if (!merged) {
985     /* send it on its way */
986     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
987     /* do local part */
988     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
989     /* receive remote parts: note this assumes the values are not actually */
990     /* added in yy until the next line, */
991     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
992   } else {
993     /* do local part */
994     ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
995     /* send it on its way */
996     ierr = VecScatterBegin(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
997     /* values actually were received in the Begin() but we need to call this nop */
998     ierr = VecScatterEnd(a->Mvctx,a->lvec,yy,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
999   }
1000   PetscFunctionReturn(0);
1001 }
1002 
1003 PetscErrorCode  MatIsTranspose_MPIAIJ(Mat Amat,Mat Bmat,PetscReal tol,PetscBool  *f)
1004 {
1005   MPI_Comm       comm;
1006   Mat_MPIAIJ     *Aij = (Mat_MPIAIJ*) Amat->data, *Bij;
1007   Mat            Adia = Aij->A, Bdia, Aoff,Boff,*Aoffs,*Boffs;
1008   IS             Me,Notme;
1009   PetscErrorCode ierr;
1010   PetscInt       M,N,first,last,*notme,i;
1011   PetscMPIInt    size;
1012 
1013   PetscFunctionBegin;
1014   /* Easy test: symmetric diagonal block */
1015   Bij  = (Mat_MPIAIJ*) Bmat->data; Bdia = Bij->A;
1016   ierr = MatIsTranspose(Adia,Bdia,tol,f);CHKERRQ(ierr);
1017   if (!*f) PetscFunctionReturn(0);
1018   ierr = PetscObjectGetComm((PetscObject)Amat,&comm);CHKERRQ(ierr);
1019   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1020   if (size == 1) PetscFunctionReturn(0);
1021 
1022   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
1023   ierr = MatGetSize(Amat,&M,&N);CHKERRQ(ierr);
1024   ierr = MatGetOwnershipRange(Amat,&first,&last);CHKERRQ(ierr);
1025   ierr = PetscMalloc1(N-last+first,&notme);CHKERRQ(ierr);
1026   for (i=0; i<first; i++) notme[i] = i;
1027   for (i=last; i<M; i++) notme[i-last+first] = i;
1028   ierr = ISCreateGeneral(MPI_COMM_SELF,N-last+first,notme,PETSC_COPY_VALUES,&Notme);CHKERRQ(ierr);
1029   ierr = ISCreateStride(MPI_COMM_SELF,last-first,first,1,&Me);CHKERRQ(ierr);
1030   ierr = MatCreateSubMatrices(Amat,1,&Me,&Notme,MAT_INITIAL_MATRIX,&Aoffs);CHKERRQ(ierr);
1031   Aoff = Aoffs[0];
1032   ierr = MatCreateSubMatrices(Bmat,1,&Notme,&Me,MAT_INITIAL_MATRIX,&Boffs);CHKERRQ(ierr);
1033   Boff = Boffs[0];
1034   ierr = MatIsTranspose(Aoff,Boff,tol,f);CHKERRQ(ierr);
1035   ierr = MatDestroyMatrices(1,&Aoffs);CHKERRQ(ierr);
1036   ierr = MatDestroyMatrices(1,&Boffs);CHKERRQ(ierr);
1037   ierr = ISDestroy(&Me);CHKERRQ(ierr);
1038   ierr = ISDestroy(&Notme);CHKERRQ(ierr);
1039   ierr = PetscFree(notme);CHKERRQ(ierr);
1040   PetscFunctionReturn(0);
1041 }
1042 
1043 PetscErrorCode MatMultTransposeAdd_MPIAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1044 {
1045   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1046   PetscErrorCode ierr;
1047 
1048   PetscFunctionBegin;
1049   /* do nondiagonal part */
1050   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
1051   /* send it on its way */
1052   ierr = VecScatterBegin(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
1053   /* do local part */
1054   ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
1055   /* receive remote parts */
1056   ierr = VecScatterEnd(a->Mvctx,a->lvec,zz,ADD_VALUES,SCATTER_REVERSE);CHKERRQ(ierr);
1057   PetscFunctionReturn(0);
1058 }
1059 
1060 /*
1061   This only works correctly for square matrices where the subblock A->A is the
1062    diagonal block
1063 */
1064 PetscErrorCode MatGetDiagonal_MPIAIJ(Mat A,Vec v)
1065 {
1066   PetscErrorCode ierr;
1067   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1068 
1069   PetscFunctionBegin;
1070   if (A->rmap->N != A->cmap->N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1071   if (A->rmap->rstart != A->cmap->rstart || A->rmap->rend != A->cmap->rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"row partition must equal col partition");
1072   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
1073   PetscFunctionReturn(0);
1074 }
1075 
1076 PetscErrorCode MatScale_MPIAIJ(Mat A,PetscScalar aa)
1077 {
1078   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1079   PetscErrorCode ierr;
1080 
1081   PetscFunctionBegin;
1082   ierr = MatScale(a->A,aa);CHKERRQ(ierr);
1083   ierr = MatScale(a->B,aa);CHKERRQ(ierr);
1084   PetscFunctionReturn(0);
1085 }
1086 
1087 PetscErrorCode MatDestroy_MPIAIJ(Mat mat)
1088 {
1089   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1090   PetscErrorCode ierr;
1091 
1092   PetscFunctionBegin;
1093 #if defined(PETSC_USE_LOG)
1094   PetscLogObjectState((PetscObject)mat,"Rows=%D, Cols=%D",mat->rmap->N,mat->cmap->N);
1095 #endif
1096   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
1097   ierr = VecDestroy(&aij->diag);CHKERRQ(ierr);
1098   ierr = MatDestroy(&aij->A);CHKERRQ(ierr);
1099   ierr = MatDestroy(&aij->B);CHKERRQ(ierr);
1100 #if defined(PETSC_USE_CTABLE)
1101   ierr = PetscTableDestroy(&aij->colmap);CHKERRQ(ierr);
1102 #else
1103   ierr = PetscFree(aij->colmap);CHKERRQ(ierr);
1104 #endif
1105   ierr = PetscFree(aij->garray);CHKERRQ(ierr);
1106   ierr = VecDestroy(&aij->lvec);CHKERRQ(ierr);
1107   ierr = VecScatterDestroy(&aij->Mvctx);CHKERRQ(ierr);
1108   ierr = PetscFree2(aij->rowvalues,aij->rowindices);CHKERRQ(ierr);
1109   ierr = PetscFree(aij->ld);CHKERRQ(ierr);
1110   ierr = PetscFree(mat->data);CHKERRQ(ierr);
1111 
1112   ierr = PetscObjectChangeTypeName((PetscObject)mat,0);CHKERRQ(ierr);
1113   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatStoreValues_C",NULL);CHKERRQ(ierr);
1114   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatRetrieveValues_C",NULL);CHKERRQ(ierr);
1115   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatIsTranspose_C",NULL);CHKERRQ(ierr);
1116   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocation_C",NULL);CHKERRQ(ierr);
1117   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMPIAIJSetPreallocationCSR_C",NULL);CHKERRQ(ierr);
1118   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatDiagonalScaleLocal_C",NULL);CHKERRQ(ierr);
1119   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_mpisbaij_C",NULL);CHKERRQ(ierr);
1120 #if defined(PETSC_HAVE_ELEMENTAL)
1121   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_elemental_C",NULL);CHKERRQ(ierr);
1122 #endif
1123 #if defined(PETSC_HAVE_HYPRE)
1124   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatConvert_mpiaij_hypre_C",NULL);CHKERRQ(ierr);
1125   ierr = PetscObjectComposeFunction((PetscObject)mat,"MatMatMatMult_transpose_mpiaij_mpiaij_C",NULL);CHKERRQ(ierr);
1126 #endif
1127   PetscFunctionReturn(0);
1128 }
1129 
1130 PetscErrorCode MatView_MPIAIJ_Binary(Mat mat,PetscViewer viewer)
1131 {
1132   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1133   Mat_SeqAIJ     *A   = (Mat_SeqAIJ*)aij->A->data;
1134   Mat_SeqAIJ     *B   = (Mat_SeqAIJ*)aij->B->data;
1135   PetscErrorCode ierr;
1136   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
1137   int            fd;
1138   PetscInt       nz,header[4],*row_lengths,*range=0,rlen,i;
1139   PetscInt       nzmax,*column_indices,j,k,col,*garray = aij->garray,cnt,cstart = mat->cmap->rstart,rnz = 0;
1140   PetscScalar    *column_values;
1141   PetscInt       message_count,flowcontrolcount;
1142   FILE           *file;
1143 
1144   PetscFunctionBegin;
1145   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr);
1146   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)mat),&size);CHKERRQ(ierr);
1147   nz   = A->nz + B->nz;
1148   ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1149   if (!rank) {
1150     header[0] = MAT_FILE_CLASSID;
1151     header[1] = mat->rmap->N;
1152     header[2] = mat->cmap->N;
1153 
1154     ierr = MPI_Reduce(&nz,&header[3],1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1155     ierr = PetscBinaryWrite(fd,header,4,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
1156     /* get largest number of rows any processor has */
1157     rlen  = mat->rmap->n;
1158     range = mat->rmap->range;
1159     for (i=1; i<size; i++) rlen = PetscMax(rlen,range[i+1] - range[i]);
1160   } else {
1161     ierr = MPI_Reduce(&nz,0,1,MPIU_INT,MPI_SUM,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1162     rlen = mat->rmap->n;
1163   }
1164 
1165   /* load up the local row counts */
1166   ierr = PetscMalloc1(rlen+1,&row_lengths);CHKERRQ(ierr);
1167   for (i=0; i<mat->rmap->n; i++) row_lengths[i] = A->i[i+1] - A->i[i] + B->i[i+1] - B->i[i];
1168 
1169   /* store the row lengths to the file */
1170   ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr);
1171   if (!rank) {
1172     ierr = PetscBinaryWrite(fd,row_lengths,mat->rmap->n,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
1173     for (i=1; i<size; i++) {
1174       ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr);
1175       rlen = range[i+1] - range[i];
1176       ierr = MPIULong_Recv(row_lengths,rlen,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1177       ierr = PetscBinaryWrite(fd,row_lengths,rlen,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
1178     }
1179     ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr);
1180   } else {
1181     ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr);
1182     ierr = MPIULong_Send(row_lengths,mat->rmap->n,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1183     ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr);
1184   }
1185   ierr = PetscFree(row_lengths);CHKERRQ(ierr);
1186 
1187   /* load up the local column indices */
1188   nzmax = nz; /* th processor needs space a largest processor needs */
1189   ierr  = MPI_Reduce(&nz,&nzmax,1,MPIU_INT,MPI_MAX,0,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1190   ierr  = PetscMalloc1(nzmax+1,&column_indices);CHKERRQ(ierr);
1191   cnt   = 0;
1192   for (i=0; i<mat->rmap->n; i++) {
1193     for (j=B->i[i]; j<B->i[i+1]; j++) {
1194       if ((col = garray[B->j[j]]) > cstart) break;
1195       column_indices[cnt++] = col;
1196     }
1197     for (k=A->i[i]; k<A->i[i+1]; k++) column_indices[cnt++] = A->j[k] + cstart;
1198     for (; j<B->i[i+1]; j++) column_indices[cnt++] = garray[B->j[j]];
1199   }
1200   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1201 
1202   /* store the column indices to the file */
1203   ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr);
1204   if (!rank) {
1205     MPI_Status status;
1206     ierr = PetscBinaryWrite(fd,column_indices,nz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
1207     for (i=1; i<size; i++) {
1208       ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr);
1209       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr);
1210       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1211       ierr = MPIULong_Recv(column_indices,rnz,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1212       ierr = PetscBinaryWrite(fd,column_indices,rnz,PETSC_INT,PETSC_TRUE);CHKERRQ(ierr);
1213     }
1214     ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr);
1215   } else {
1216     ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr);
1217     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1218     ierr = MPIULong_Send(column_indices,nz,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1219     ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr);
1220   }
1221   ierr = PetscFree(column_indices);CHKERRQ(ierr);
1222 
1223   /* load up the local column values */
1224   ierr = PetscMalloc1(nzmax+1,&column_values);CHKERRQ(ierr);
1225   cnt  = 0;
1226   for (i=0; i<mat->rmap->n; i++) {
1227     for (j=B->i[i]; j<B->i[i+1]; j++) {
1228       if (garray[B->j[j]] > cstart) break;
1229       column_values[cnt++] = B->a[j];
1230     }
1231     for (k=A->i[i]; k<A->i[i+1]; k++) column_values[cnt++] = A->a[k];
1232     for (; j<B->i[i+1]; j++) column_values[cnt++] = B->a[j];
1233   }
1234   if (cnt != A->nz + B->nz) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Internal PETSc error: cnt = %D nz = %D",cnt,A->nz+B->nz);
1235 
1236   /* store the column values to the file */
1237   ierr = PetscViewerFlowControlStart(viewer,&message_count,&flowcontrolcount);CHKERRQ(ierr);
1238   if (!rank) {
1239     MPI_Status status;
1240     ierr = PetscBinaryWrite(fd,column_values,nz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1241     for (i=1; i<size; i++) {
1242       ierr = PetscViewerFlowControlStepMaster(viewer,i,&message_count,flowcontrolcount);CHKERRQ(ierr);
1243       ierr = MPI_Recv(&rnz,1,MPIU_INT,i,tag,PetscObjectComm((PetscObject)mat),&status);CHKERRQ(ierr);
1244       if (rnz > nzmax) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_LIB,"Internal PETSc error: nz = %D nzmax = %D",nz,nzmax);
1245       ierr = MPIULong_Recv(column_values,rnz,MPIU_SCALAR,i,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1246       ierr = PetscBinaryWrite(fd,column_values,rnz,PETSC_SCALAR,PETSC_TRUE);CHKERRQ(ierr);
1247     }
1248     ierr = PetscViewerFlowControlEndMaster(viewer,&message_count);CHKERRQ(ierr);
1249   } else {
1250     ierr = PetscViewerFlowControlStepWorker(viewer,rank,&message_count);CHKERRQ(ierr);
1251     ierr = MPI_Send(&nz,1,MPIU_INT,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1252     ierr = MPIULong_Send(column_values,nz,MPIU_SCALAR,0,tag,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1253     ierr = PetscViewerFlowControlEndWorker(viewer,&message_count);CHKERRQ(ierr);
1254   }
1255   ierr = PetscFree(column_values);CHKERRQ(ierr);
1256 
1257   ierr = PetscViewerBinaryGetInfoPointer(viewer,&file);CHKERRQ(ierr);
1258   if (file) fprintf(file,"-matload_block_size %d\n",(int)PetscAbs(mat->rmap->bs));
1259   PetscFunctionReturn(0);
1260 }
1261 
1262 #include <petscdraw.h>
1263 PetscErrorCode MatView_MPIAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1264 {
1265   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)mat->data;
1266   PetscErrorCode    ierr;
1267   PetscMPIInt       rank = aij->rank,size = aij->size;
1268   PetscBool         isdraw,iascii,isbinary;
1269   PetscViewer       sviewer;
1270   PetscViewerFormat format;
1271 
1272   PetscFunctionBegin;
1273   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr);
1274   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
1275   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr);
1276   if (iascii) {
1277     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
1278     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1279       MatInfo   info;
1280       PetscBool inodes;
1281 
1282       ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)mat),&rank);CHKERRQ(ierr);
1283       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
1284       ierr = MatInodeGetInodeSizes(aij->A,NULL,(PetscInt**)&inodes,NULL);CHKERRQ(ierr);
1285       ierr = PetscViewerASCIIPushSynchronized(viewer);CHKERRQ(ierr);
1286       if (!inodes) {
1287         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, not using I-node routines\n",
1288                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1289       } else {
1290         ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %D nz %D nz alloced %D mem %D, using I-node routines\n",
1291                                                   rank,mat->rmap->n,(PetscInt)info.nz_used,(PetscInt)info.nz_allocated,(PetscInt)info.memory);CHKERRQ(ierr);
1292       }
1293       ierr = MatGetInfo(aij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
1294       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1295       ierr = MatGetInfo(aij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
1296       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %D \n",rank,(PetscInt)info.nz_used);CHKERRQ(ierr);
1297       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
1298       ierr = PetscViewerASCIIPopSynchronized(viewer);CHKERRQ(ierr);
1299       ierr = PetscViewerASCIIPrintf(viewer,"Information on VecScatter used in matrix-vector product: \n");CHKERRQ(ierr);
1300       ierr = VecScatterView(aij->Mvctx,viewer);CHKERRQ(ierr);
1301       PetscFunctionReturn(0);
1302     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1303       PetscInt inodecount,inodelimit,*inodes;
1304       ierr = MatInodeGetInodeSizes(aij->A,&inodecount,&inodes,&inodelimit);CHKERRQ(ierr);
1305       if (inodes) {
1306         ierr = PetscViewerASCIIPrintf(viewer,"using I-node (on process 0) routines: found %D nodes, limit used is %D\n",inodecount,inodelimit);CHKERRQ(ierr);
1307       } else {
1308         ierr = PetscViewerASCIIPrintf(viewer,"not using I-node (on process 0) routines\n");CHKERRQ(ierr);
1309       }
1310       PetscFunctionReturn(0);
1311     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1312       PetscFunctionReturn(0);
1313     }
1314   } else if (isbinary) {
1315     if (size == 1) {
1316       ierr = PetscObjectSetName((PetscObject)aij->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1317       ierr = MatView(aij->A,viewer);CHKERRQ(ierr);
1318     } else {
1319       ierr = MatView_MPIAIJ_Binary(mat,viewer);CHKERRQ(ierr);
1320     }
1321     PetscFunctionReturn(0);
1322   } else if (isdraw) {
1323     PetscDraw draw;
1324     PetscBool isnull;
1325     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
1326     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr);
1327     if (isnull) PetscFunctionReturn(0);
1328   }
1329 
1330   {
1331     /* assemble the entire matrix onto first processor. */
1332     Mat        A;
1333     Mat_SeqAIJ *Aloc;
1334     PetscInt   M = mat->rmap->N,N = mat->cmap->N,m,*ai,*aj,row,*cols,i,*ct;
1335     MatScalar  *a;
1336 
1337     ierr = MatCreate(PetscObjectComm((PetscObject)mat),&A);CHKERRQ(ierr);
1338     if (!rank) {
1339       ierr = MatSetSizes(A,M,N,M,N);CHKERRQ(ierr);
1340     } else {
1341       ierr = MatSetSizes(A,0,0,M,N);CHKERRQ(ierr);
1342     }
1343     /* This is just a temporary matrix, so explicitly using MATMPIAIJ is probably best */
1344     ierr = MatSetType(A,MATMPIAIJ);CHKERRQ(ierr);
1345     ierr = MatMPIAIJSetPreallocation(A,0,NULL,0,NULL);CHKERRQ(ierr);
1346     ierr = MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr);
1347     ierr = PetscLogObjectParent((PetscObject)mat,(PetscObject)A);CHKERRQ(ierr);
1348 
1349     /* copy over the A part */
1350     Aloc = (Mat_SeqAIJ*)aij->A->data;
1351     m    = aij->A->rmap->n; ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1352     row  = mat->rmap->rstart;
1353     for (i=0; i<ai[m]; i++) aj[i] += mat->cmap->rstart;
1354     for (i=0; i<m; i++) {
1355       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],aj,a,INSERT_VALUES);CHKERRQ(ierr);
1356       row++;
1357       a += ai[i+1]-ai[i]; aj += ai[i+1]-ai[i];
1358     }
1359     aj = Aloc->j;
1360     for (i=0; i<ai[m]; i++) aj[i] -= mat->cmap->rstart;
1361 
1362     /* copy over the B part */
1363     Aloc = (Mat_SeqAIJ*)aij->B->data;
1364     m    = aij->B->rmap->n;  ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1365     row  = mat->rmap->rstart;
1366     ierr = PetscMalloc1(ai[m]+1,&cols);CHKERRQ(ierr);
1367     ct   = cols;
1368     for (i=0; i<ai[m]; i++) cols[i] = aij->garray[aj[i]];
1369     for (i=0; i<m; i++) {
1370       ierr = MatSetValues(A,1,&row,ai[i+1]-ai[i],cols,a,INSERT_VALUES);CHKERRQ(ierr);
1371       row++;
1372       a += ai[i+1]-ai[i]; cols += ai[i+1]-ai[i];
1373     }
1374     ierr = PetscFree(ct);CHKERRQ(ierr);
1375     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1376     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1377     /*
1378        Everyone has to call to draw the matrix since the graphics waits are
1379        synchronized across all processors that share the PetscDraw object
1380     */
1381     ierr = PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
1382     if (!rank) {
1383       ierr = PetscObjectSetName((PetscObject)((Mat_MPIAIJ*)(A->data))->A,((PetscObject)mat)->name);CHKERRQ(ierr);
1384       ierr = MatView_SeqAIJ(((Mat_MPIAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
1385     }
1386     ierr = PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
1387     ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
1388     ierr = MatDestroy(&A);CHKERRQ(ierr);
1389   }
1390   PetscFunctionReturn(0);
1391 }
1392 
1393 PetscErrorCode MatView_MPIAIJ(Mat mat,PetscViewer viewer)
1394 {
1395   PetscErrorCode ierr;
1396   PetscBool      iascii,isdraw,issocket,isbinary;
1397 
1398   PetscFunctionBegin;
1399   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
1400   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERDRAW,&isdraw);CHKERRQ(ierr);
1401   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERBINARY,&isbinary);CHKERRQ(ierr);
1402   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERSOCKET,&issocket);CHKERRQ(ierr);
1403   if (iascii || isdraw || isbinary || issocket) {
1404     ierr = MatView_MPIAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
1405   }
1406   PetscFunctionReturn(0);
1407 }
1408 
1409 PetscErrorCode MatSOR_MPIAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,PetscInt its,PetscInt lits,Vec xx)
1410 {
1411   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1412   PetscErrorCode ierr;
1413   Vec            bb1 = 0;
1414   PetscBool      hasop;
1415 
1416   PetscFunctionBegin;
1417   if (flag == SOR_APPLY_UPPER) {
1418     ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr);
1419     PetscFunctionReturn(0);
1420   }
1421 
1422   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) {
1423     ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
1424   }
1425 
1426   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1427     if (flag & SOR_ZERO_INITIAL_GUESS) {
1428       ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr);
1429       its--;
1430     }
1431 
1432     while (its--) {
1433       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1434       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1435 
1436       /* update rhs: bb1 = bb - B*x */
1437       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1438       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1439 
1440       /* local sweep */
1441       ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr);
1442     }
1443   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1444     if (flag & SOR_ZERO_INITIAL_GUESS) {
1445       ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr);
1446       its--;
1447     }
1448     while (its--) {
1449       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1450       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1451 
1452       /* update rhs: bb1 = bb - B*x */
1453       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1454       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1455 
1456       /* local sweep */
1457       ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_FORWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr);
1458     }
1459   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1460     if (flag & SOR_ZERO_INITIAL_GUESS) {
1461       ierr = (*mat->A->ops->sor)(mat->A,bb,omega,flag,fshift,lits,1,xx);CHKERRQ(ierr);
1462       its--;
1463     }
1464     while (its--) {
1465       ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1466       ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1467 
1468       /* update rhs: bb1 = bb - B*x */
1469       ierr = VecScale(mat->lvec,-1.0);CHKERRQ(ierr);
1470       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb,bb1);CHKERRQ(ierr);
1471 
1472       /* local sweep */
1473       ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,SOR_BACKWARD_SWEEP,fshift,lits,1,xx);CHKERRQ(ierr);
1474     }
1475   } else if (flag & SOR_EISENSTAT) {
1476     Vec xx1;
1477 
1478     ierr = VecDuplicate(bb,&xx1);CHKERRQ(ierr);
1479     ierr = (*mat->A->ops->sor)(mat->A,bb,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_BACKWARD_SWEEP),fshift,lits,1,xx);CHKERRQ(ierr);
1480 
1481     ierr = VecScatterBegin(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1482     ierr = VecScatterEnd(mat->Mvctx,xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1483     if (!mat->diag) {
1484       ierr = MatCreateVecs(matin,&mat->diag,NULL);CHKERRQ(ierr);
1485       ierr = MatGetDiagonal(matin,mat->diag);CHKERRQ(ierr);
1486     }
1487     ierr = MatHasOperation(matin,MATOP_MULT_DIAGONAL_BLOCK,&hasop);CHKERRQ(ierr);
1488     if (hasop) {
1489       ierr = MatMultDiagonalBlock(matin,xx,bb1);CHKERRQ(ierr);
1490     } else {
1491       ierr = VecPointwiseMult(bb1,mat->diag,xx);CHKERRQ(ierr);
1492     }
1493     ierr = VecAYPX(bb1,(omega-2.0)/omega,bb);CHKERRQ(ierr);
1494 
1495     ierr = MatMultAdd(mat->B,mat->lvec,bb1,bb1);CHKERRQ(ierr);
1496 
1497     /* local sweep */
1498     ierr = (*mat->A->ops->sor)(mat->A,bb1,omega,(MatSORType)(SOR_ZERO_INITIAL_GUESS | SOR_LOCAL_FORWARD_SWEEP),fshift,lits,1,xx1);CHKERRQ(ierr);
1499     ierr = VecAXPY(xx,1.0,xx1);CHKERRQ(ierr);
1500     ierr = VecDestroy(&xx1);CHKERRQ(ierr);
1501   } else SETERRQ(PetscObjectComm((PetscObject)matin),PETSC_ERR_SUP,"Parallel SOR not supported");
1502 
1503   ierr = VecDestroy(&bb1);CHKERRQ(ierr);
1504 
1505   matin->factorerrortype = mat->A->factorerrortype;
1506   PetscFunctionReturn(0);
1507 }
1508 
1509 PetscErrorCode MatPermute_MPIAIJ(Mat A,IS rowp,IS colp,Mat *B)
1510 {
1511   Mat            aA,aB,Aperm;
1512   const PetscInt *rwant,*cwant,*gcols,*ai,*bi,*aj,*bj;
1513   PetscScalar    *aa,*ba;
1514   PetscInt       i,j,m,n,ng,anz,bnz,*dnnz,*onnz,*tdnnz,*tonnz,*rdest,*cdest,*work,*gcdest;
1515   PetscSF        rowsf,sf;
1516   IS             parcolp = NULL;
1517   PetscBool      done;
1518   PetscErrorCode ierr;
1519 
1520   PetscFunctionBegin;
1521   ierr = MatGetLocalSize(A,&m,&n);CHKERRQ(ierr);
1522   ierr = ISGetIndices(rowp,&rwant);CHKERRQ(ierr);
1523   ierr = ISGetIndices(colp,&cwant);CHKERRQ(ierr);
1524   ierr = PetscMalloc3(PetscMax(m,n),&work,m,&rdest,n,&cdest);CHKERRQ(ierr);
1525 
1526   /* Invert row permutation to find out where my rows should go */
1527   ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&rowsf);CHKERRQ(ierr);
1528   ierr = PetscSFSetGraphLayout(rowsf,A->rmap,A->rmap->n,NULL,PETSC_OWN_POINTER,rwant);CHKERRQ(ierr);
1529   ierr = PetscSFSetFromOptions(rowsf);CHKERRQ(ierr);
1530   for (i=0; i<m; i++) work[i] = A->rmap->rstart + i;
1531   ierr = PetscSFReduceBegin(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);CHKERRQ(ierr);
1532   ierr = PetscSFReduceEnd(rowsf,MPIU_INT,work,rdest,MPIU_REPLACE);CHKERRQ(ierr);
1533 
1534   /* Invert column permutation to find out where my columns should go */
1535   ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr);
1536   ierr = PetscSFSetGraphLayout(sf,A->cmap,A->cmap->n,NULL,PETSC_OWN_POINTER,cwant);CHKERRQ(ierr);
1537   ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
1538   for (i=0; i<n; i++) work[i] = A->cmap->rstart + i;
1539   ierr = PetscSFReduceBegin(sf,MPIU_INT,work,cdest,MPIU_REPLACE);CHKERRQ(ierr);
1540   ierr = PetscSFReduceEnd(sf,MPIU_INT,work,cdest,MPIU_REPLACE);CHKERRQ(ierr);
1541   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
1542 
1543   ierr = ISRestoreIndices(rowp,&rwant);CHKERRQ(ierr);
1544   ierr = ISRestoreIndices(colp,&cwant);CHKERRQ(ierr);
1545   ierr = MatMPIAIJGetSeqAIJ(A,&aA,&aB,&gcols);CHKERRQ(ierr);
1546 
1547   /* Find out where my gcols should go */
1548   ierr = MatGetSize(aB,NULL,&ng);CHKERRQ(ierr);
1549   ierr = PetscMalloc1(ng,&gcdest);CHKERRQ(ierr);
1550   ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr);
1551   ierr = PetscSFSetGraphLayout(sf,A->cmap,ng,NULL,PETSC_OWN_POINTER,gcols);CHKERRQ(ierr);
1552   ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
1553   ierr = PetscSFBcastBegin(sf,MPIU_INT,cdest,gcdest);CHKERRQ(ierr);
1554   ierr = PetscSFBcastEnd(sf,MPIU_INT,cdest,gcdest);CHKERRQ(ierr);
1555   ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
1556 
1557   ierr = PetscCalloc4(m,&dnnz,m,&onnz,m,&tdnnz,m,&tonnz);CHKERRQ(ierr);
1558   ierr = MatGetRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);CHKERRQ(ierr);
1559   ierr = MatGetRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);CHKERRQ(ierr);
1560   for (i=0; i<m; i++) {
1561     PetscInt row = rdest[i],rowner;
1562     ierr = PetscLayoutFindOwner(A->rmap,row,&rowner);CHKERRQ(ierr);
1563     for (j=ai[i]; j<ai[i+1]; j++) {
1564       PetscInt cowner,col = cdest[aj[j]];
1565       ierr = PetscLayoutFindOwner(A->cmap,col,&cowner);CHKERRQ(ierr); /* Could build an index for the columns to eliminate this search */
1566       if (rowner == cowner) dnnz[i]++;
1567       else onnz[i]++;
1568     }
1569     for (j=bi[i]; j<bi[i+1]; j++) {
1570       PetscInt cowner,col = gcdest[bj[j]];
1571       ierr = PetscLayoutFindOwner(A->cmap,col,&cowner);CHKERRQ(ierr);
1572       if (rowner == cowner) dnnz[i]++;
1573       else onnz[i]++;
1574     }
1575   }
1576   ierr = PetscSFBcastBegin(rowsf,MPIU_INT,dnnz,tdnnz);CHKERRQ(ierr);
1577   ierr = PetscSFBcastEnd(rowsf,MPIU_INT,dnnz,tdnnz);CHKERRQ(ierr);
1578   ierr = PetscSFBcastBegin(rowsf,MPIU_INT,onnz,tonnz);CHKERRQ(ierr);
1579   ierr = PetscSFBcastEnd(rowsf,MPIU_INT,onnz,tonnz);CHKERRQ(ierr);
1580   ierr = PetscSFDestroy(&rowsf);CHKERRQ(ierr);
1581 
1582   ierr = MatCreateAIJ(PetscObjectComm((PetscObject)A),A->rmap->n,A->cmap->n,A->rmap->N,A->cmap->N,0,tdnnz,0,tonnz,&Aperm);CHKERRQ(ierr);
1583   ierr = MatSeqAIJGetArray(aA,&aa);CHKERRQ(ierr);
1584   ierr = MatSeqAIJGetArray(aB,&ba);CHKERRQ(ierr);
1585   for (i=0; i<m; i++) {
1586     PetscInt *acols = dnnz,*bcols = onnz; /* Repurpose now-unneeded arrays */
1587     PetscInt j0,rowlen;
1588     rowlen = ai[i+1] - ai[i];
1589     for (j0=j=0; j<rowlen; j0=j) { /* rowlen could be larger than number of rows m, so sum in batches */
1590       for ( ; j<PetscMin(rowlen,j0+m); j++) acols[j-j0] = cdest[aj[ai[i]+j]];
1591       ierr = MatSetValues(Aperm,1,&rdest[i],j-j0,acols,aa+ai[i]+j0,INSERT_VALUES);CHKERRQ(ierr);
1592     }
1593     rowlen = bi[i+1] - bi[i];
1594     for (j0=j=0; j<rowlen; j0=j) {
1595       for ( ; j<PetscMin(rowlen,j0+m); j++) bcols[j-j0] = gcdest[bj[bi[i]+j]];
1596       ierr = MatSetValues(Aperm,1,&rdest[i],j-j0,bcols,ba+bi[i]+j0,INSERT_VALUES);CHKERRQ(ierr);
1597     }
1598   }
1599   ierr = MatAssemblyBegin(Aperm,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1600   ierr = MatAssemblyEnd(Aperm,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1601   ierr = MatRestoreRowIJ(aA,0,PETSC_FALSE,PETSC_FALSE,&anz,&ai,&aj,&done);CHKERRQ(ierr);
1602   ierr = MatRestoreRowIJ(aB,0,PETSC_FALSE,PETSC_FALSE,&bnz,&bi,&bj,&done);CHKERRQ(ierr);
1603   ierr = MatSeqAIJRestoreArray(aA,&aa);CHKERRQ(ierr);
1604   ierr = MatSeqAIJRestoreArray(aB,&ba);CHKERRQ(ierr);
1605   ierr = PetscFree4(dnnz,onnz,tdnnz,tonnz);CHKERRQ(ierr);
1606   ierr = PetscFree3(work,rdest,cdest);CHKERRQ(ierr);
1607   ierr = PetscFree(gcdest);CHKERRQ(ierr);
1608   if (parcolp) {ierr = ISDestroy(&colp);CHKERRQ(ierr);}
1609   *B = Aperm;
1610   PetscFunctionReturn(0);
1611 }
1612 
1613 PetscErrorCode  MatGetGhosts_MPIAIJ(Mat mat,PetscInt *nghosts,const PetscInt *ghosts[])
1614 {
1615   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1616   PetscErrorCode ierr;
1617 
1618   PetscFunctionBegin;
1619   ierr = MatGetSize(aij->B,NULL,nghosts);CHKERRQ(ierr);
1620   if (ghosts) *ghosts = aij->garray;
1621   PetscFunctionReturn(0);
1622 }
1623 
1624 PetscErrorCode MatGetInfo_MPIAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1625 {
1626   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1627   Mat            A    = mat->A,B = mat->B;
1628   PetscErrorCode ierr;
1629   PetscReal      isend[5],irecv[5];
1630 
1631   PetscFunctionBegin;
1632   info->block_size = 1.0;
1633   ierr             = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1634 
1635   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1636   isend[3] = info->memory;  isend[4] = info->mallocs;
1637 
1638   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1639 
1640   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1641   isend[3] += info->memory;  isend[4] += info->mallocs;
1642   if (flag == MAT_LOCAL) {
1643     info->nz_used      = isend[0];
1644     info->nz_allocated = isend[1];
1645     info->nz_unneeded  = isend[2];
1646     info->memory       = isend[3];
1647     info->mallocs      = isend[4];
1648   } else if (flag == MAT_GLOBAL_MAX) {
1649     ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr);
1650 
1651     info->nz_used      = irecv[0];
1652     info->nz_allocated = irecv[1];
1653     info->nz_unneeded  = irecv[2];
1654     info->memory       = irecv[3];
1655     info->mallocs      = irecv[4];
1656   } else if (flag == MAT_GLOBAL_SUM) {
1657     ierr = MPIU_Allreduce(isend,irecv,5,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)matin));CHKERRQ(ierr);
1658 
1659     info->nz_used      = irecv[0];
1660     info->nz_allocated = irecv[1];
1661     info->nz_unneeded  = irecv[2];
1662     info->memory       = irecv[3];
1663     info->mallocs      = irecv[4];
1664   }
1665   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1666   info->fill_ratio_needed = 0;
1667   info->factor_mallocs    = 0;
1668   PetscFunctionReturn(0);
1669 }
1670 
1671 PetscErrorCode MatSetOption_MPIAIJ(Mat A,MatOption op,PetscBool flg)
1672 {
1673   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
1674   PetscErrorCode ierr;
1675 
1676   PetscFunctionBegin;
1677   switch (op) {
1678   case MAT_NEW_NONZERO_LOCATIONS:
1679   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1680   case MAT_UNUSED_NONZERO_LOCATION_ERR:
1681   case MAT_KEEP_NONZERO_PATTERN:
1682   case MAT_NEW_NONZERO_LOCATION_ERR:
1683   case MAT_USE_INODES:
1684   case MAT_IGNORE_ZERO_ENTRIES:
1685     MatCheckPreallocated(A,1);
1686     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1687     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1688     break;
1689   case MAT_ROW_ORIENTED:
1690     MatCheckPreallocated(A,1);
1691     a->roworiented = flg;
1692 
1693     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1694     ierr = MatSetOption(a->B,op,flg);CHKERRQ(ierr);
1695     break;
1696   case MAT_NEW_DIAGONALS:
1697     ierr = PetscInfo1(A,"Option %s ignored\n",MatOptions[op]);CHKERRQ(ierr);
1698     break;
1699   case MAT_IGNORE_OFF_PROC_ENTRIES:
1700     a->donotstash = flg;
1701     break;
1702   case MAT_SPD:
1703     A->spd_set = PETSC_TRUE;
1704     A->spd     = flg;
1705     if (flg) {
1706       A->symmetric                  = PETSC_TRUE;
1707       A->structurally_symmetric     = PETSC_TRUE;
1708       A->symmetric_set              = PETSC_TRUE;
1709       A->structurally_symmetric_set = PETSC_TRUE;
1710     }
1711     break;
1712   case MAT_SYMMETRIC:
1713     MatCheckPreallocated(A,1);
1714     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1715     break;
1716   case MAT_STRUCTURALLY_SYMMETRIC:
1717     MatCheckPreallocated(A,1);
1718     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1719     break;
1720   case MAT_HERMITIAN:
1721     MatCheckPreallocated(A,1);
1722     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1723     break;
1724   case MAT_SYMMETRY_ETERNAL:
1725     MatCheckPreallocated(A,1);
1726     ierr = MatSetOption(a->A,op,flg);CHKERRQ(ierr);
1727     break;
1728   case MAT_SUBMAT_SINGLEIS:
1729     A->submat_singleis = flg;
1730     break;
1731   default:
1732     SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"unknown option %d",op);
1733   }
1734   PetscFunctionReturn(0);
1735 }
1736 
1737 PetscErrorCode MatGetRow_MPIAIJ(Mat matin,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1738 {
1739   Mat_MPIAIJ     *mat = (Mat_MPIAIJ*)matin->data;
1740   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1741   PetscErrorCode ierr;
1742   PetscInt       i,*cworkA,*cworkB,**pcA,**pcB,cstart = matin->cmap->rstart;
1743   PetscInt       nztot,nzA,nzB,lrow,rstart = matin->rmap->rstart,rend = matin->rmap->rend;
1744   PetscInt       *cmap,*idx_p;
1745 
1746   PetscFunctionBegin;
1747   if (mat->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Already active");
1748   mat->getrowactive = PETSC_TRUE;
1749 
1750   if (!mat->rowvalues && (idx || v)) {
1751     /*
1752         allocate enough space to hold information from the longest row.
1753     */
1754     Mat_SeqAIJ *Aa = (Mat_SeqAIJ*)mat->A->data,*Ba = (Mat_SeqAIJ*)mat->B->data;
1755     PetscInt   max = 1,tmp;
1756     for (i=0; i<matin->rmap->n; i++) {
1757       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1758       if (max < tmp) max = tmp;
1759     }
1760     ierr = PetscMalloc2(max,&mat->rowvalues,max,&mat->rowindices);CHKERRQ(ierr);
1761   }
1762 
1763   if (row < rstart || row >= rend) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Only local rows");
1764   lrow = row - rstart;
1765 
1766   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1767   if (!v)   {pvA = 0; pvB = 0;}
1768   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1769   ierr  = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1770   ierr  = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1771   nztot = nzA + nzB;
1772 
1773   cmap = mat->garray;
1774   if (v  || idx) {
1775     if (nztot) {
1776       /* Sort by increasing column numbers, assuming A and B already sorted */
1777       PetscInt imark = -1;
1778       if (v) {
1779         *v = v_p = mat->rowvalues;
1780         for (i=0; i<nzB; i++) {
1781           if (cmap[cworkB[i]] < cstart) v_p[i] = vworkB[i];
1782           else break;
1783         }
1784         imark = i;
1785         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1786         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1787       }
1788       if (idx) {
1789         *idx = idx_p = mat->rowindices;
1790         if (imark > -1) {
1791           for (i=0; i<imark; i++) {
1792             idx_p[i] = cmap[cworkB[i]];
1793           }
1794         } else {
1795           for (i=0; i<nzB; i++) {
1796             if (cmap[cworkB[i]] < cstart) idx_p[i] = cmap[cworkB[i]];
1797             else break;
1798           }
1799           imark = i;
1800         }
1801         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart + cworkA[i];
1802         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]];
1803       }
1804     } else {
1805       if (idx) *idx = 0;
1806       if (v)   *v   = 0;
1807     }
1808   }
1809   *nz  = nztot;
1810   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1811   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1812   PetscFunctionReturn(0);
1813 }
1814 
1815 PetscErrorCode MatRestoreRow_MPIAIJ(Mat mat,PetscInt row,PetscInt *nz,PetscInt **idx,PetscScalar **v)
1816 {
1817   Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
1818 
1819   PetscFunctionBegin;
1820   if (!aij->getrowactive) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"MatGetRow() must be called first");
1821   aij->getrowactive = PETSC_FALSE;
1822   PetscFunctionReturn(0);
1823 }
1824 
1825 PetscErrorCode MatNorm_MPIAIJ(Mat mat,NormType type,PetscReal *norm)
1826 {
1827   Mat_MPIAIJ     *aij  = (Mat_MPIAIJ*)mat->data;
1828   Mat_SeqAIJ     *amat = (Mat_SeqAIJ*)aij->A->data,*bmat = (Mat_SeqAIJ*)aij->B->data;
1829   PetscErrorCode ierr;
1830   PetscInt       i,j,cstart = mat->cmap->rstart;
1831   PetscReal      sum = 0.0;
1832   MatScalar      *v;
1833 
1834   PetscFunctionBegin;
1835   if (aij->size == 1) {
1836     ierr =  MatNorm(aij->A,type,norm);CHKERRQ(ierr);
1837   } else {
1838     if (type == NORM_FROBENIUS) {
1839       v = amat->a;
1840       for (i=0; i<amat->nz; i++) {
1841         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1842       }
1843       v = bmat->a;
1844       for (i=0; i<bmat->nz; i++) {
1845         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
1846       }
1847       ierr  = MPIU_Allreduce(&sum,norm,1,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1848       *norm = PetscSqrtReal(*norm);
1849       ierr = PetscLogFlops(2*amat->nz+2*bmat->nz);CHKERRQ(ierr);
1850     } else if (type == NORM_1) { /* max column norm */
1851       PetscReal *tmp,*tmp2;
1852       PetscInt  *jj,*garray = aij->garray;
1853       ierr  = PetscCalloc1(mat->cmap->N+1,&tmp);CHKERRQ(ierr);
1854       ierr  = PetscMalloc1(mat->cmap->N+1,&tmp2);CHKERRQ(ierr);
1855       *norm = 0.0;
1856       v     = amat->a; jj = amat->j;
1857       for (j=0; j<amat->nz; j++) {
1858         tmp[cstart + *jj++] += PetscAbsScalar(*v);  v++;
1859       }
1860       v = bmat->a; jj = bmat->j;
1861       for (j=0; j<bmat->nz; j++) {
1862         tmp[garray[*jj++]] += PetscAbsScalar(*v); v++;
1863       }
1864       ierr = MPIU_Allreduce(tmp,tmp2,mat->cmap->N,MPIU_REAL,MPIU_SUM,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1865       for (j=0; j<mat->cmap->N; j++) {
1866         if (tmp2[j] > *norm) *norm = tmp2[j];
1867       }
1868       ierr = PetscFree(tmp);CHKERRQ(ierr);
1869       ierr = PetscFree(tmp2);CHKERRQ(ierr);
1870       ierr = PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));CHKERRQ(ierr);
1871     } else if (type == NORM_INFINITY) { /* max row norm */
1872       PetscReal ntemp = 0.0;
1873       for (j=0; j<aij->A->rmap->n; j++) {
1874         v   = amat->a + amat->i[j];
1875         sum = 0.0;
1876         for (i=0; i<amat->i[j+1]-amat->i[j]; i++) {
1877           sum += PetscAbsScalar(*v); v++;
1878         }
1879         v = bmat->a + bmat->i[j];
1880         for (i=0; i<bmat->i[j+1]-bmat->i[j]; i++) {
1881           sum += PetscAbsScalar(*v); v++;
1882         }
1883         if (sum > ntemp) ntemp = sum;
1884       }
1885       ierr = MPIU_Allreduce(&ntemp,norm,1,MPIU_REAL,MPIU_MAX,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
1886       ierr = PetscLogFlops(PetscMax(amat->nz+bmat->nz-1,0));CHKERRQ(ierr);
1887     } else SETERRQ(PetscObjectComm((PetscObject)mat),PETSC_ERR_SUP,"No support for two norm");
1888   }
1889   PetscFunctionReturn(0);
1890 }
1891 
1892 PetscErrorCode MatTranspose_MPIAIJ(Mat A,MatReuse reuse,Mat *matout)
1893 {
1894   Mat_MPIAIJ     *a   = (Mat_MPIAIJ*)A->data;
1895   Mat_SeqAIJ     *Aloc=(Mat_SeqAIJ*)a->A->data,*Bloc=(Mat_SeqAIJ*)a->B->data;
1896   PetscErrorCode ierr;
1897   PetscInt       M      = A->rmap->N,N = A->cmap->N,ma,na,mb,nb,*ai,*aj,*bi,*bj,row,*cols,*cols_tmp,i;
1898   PetscInt       cstart = A->cmap->rstart,ncol;
1899   Mat            B;
1900   MatScalar      *array;
1901 
1902   PetscFunctionBegin;
1903   if (reuse == MAT_INPLACE_MATRIX && M != N) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1904 
1905   ma = A->rmap->n; na = A->cmap->n; mb = a->B->rmap->n; nb = a->B->cmap->n;
1906   ai = Aloc->i; aj = Aloc->j;
1907   bi = Bloc->i; bj = Bloc->j;
1908   if (reuse == MAT_INITIAL_MATRIX || *matout == A) {
1909     PetscInt             *d_nnz,*g_nnz,*o_nnz;
1910     PetscSFNode          *oloc;
1911     PETSC_UNUSED PetscSF sf;
1912 
1913     ierr = PetscMalloc4(na,&d_nnz,na,&o_nnz,nb,&g_nnz,nb,&oloc);CHKERRQ(ierr);
1914     /* compute d_nnz for preallocation */
1915     ierr = PetscMemzero(d_nnz,na*sizeof(PetscInt));CHKERRQ(ierr);
1916     for (i=0; i<ai[ma]; i++) {
1917       d_nnz[aj[i]]++;
1918       aj[i] += cstart; /* global col index to be used by MatSetValues() */
1919     }
1920     /* compute local off-diagonal contributions */
1921     ierr = PetscMemzero(g_nnz,nb*sizeof(PetscInt));CHKERRQ(ierr);
1922     for (i=0; i<bi[ma]; i++) g_nnz[bj[i]]++;
1923     /* map those to global */
1924     ierr = PetscSFCreate(PetscObjectComm((PetscObject)A),&sf);CHKERRQ(ierr);
1925     ierr = PetscSFSetGraphLayout(sf,A->cmap,nb,NULL,PETSC_USE_POINTER,a->garray);CHKERRQ(ierr);
1926     ierr = PetscSFSetFromOptions(sf);CHKERRQ(ierr);
1927     ierr = PetscMemzero(o_nnz,na*sizeof(PetscInt));CHKERRQ(ierr);
1928     ierr = PetscSFReduceBegin(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);CHKERRQ(ierr);
1929     ierr = PetscSFReduceEnd(sf,MPIU_INT,g_nnz,o_nnz,MPIU_SUM);CHKERRQ(ierr);
1930     ierr = PetscSFDestroy(&sf);CHKERRQ(ierr);
1931 
1932     ierr = MatCreate(PetscObjectComm((PetscObject)A),&B);CHKERRQ(ierr);
1933     ierr = MatSetSizes(B,A->cmap->n,A->rmap->n,N,M);CHKERRQ(ierr);
1934     ierr = MatSetBlockSizes(B,PetscAbs(A->cmap->bs),PetscAbs(A->rmap->bs));CHKERRQ(ierr);
1935     ierr = MatSetType(B,((PetscObject)A)->type_name);CHKERRQ(ierr);
1936     ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
1937     ierr = PetscFree4(d_nnz,o_nnz,g_nnz,oloc);CHKERRQ(ierr);
1938   } else {
1939     B    = *matout;
1940     ierr = MatSetOption(B,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
1941     for (i=0; i<ai[ma]; i++) aj[i] += cstart; /* global col index to be used by MatSetValues() */
1942   }
1943 
1944   /* copy over the A part */
1945   array = Aloc->a;
1946   row   = A->rmap->rstart;
1947   for (i=0; i<ma; i++) {
1948     ncol = ai[i+1]-ai[i];
1949     ierr = MatSetValues(B,ncol,aj,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1950     row++;
1951     array += ncol; aj += ncol;
1952   }
1953   aj = Aloc->j;
1954   for (i=0; i<ai[ma]; i++) aj[i] -= cstart; /* resume local col index */
1955 
1956   /* copy over the B part */
1957   ierr  = PetscCalloc1(bi[mb],&cols);CHKERRQ(ierr);
1958   array = Bloc->a;
1959   row   = A->rmap->rstart;
1960   for (i=0; i<bi[mb]; i++) cols[i] = a->garray[bj[i]];
1961   cols_tmp = cols;
1962   for (i=0; i<mb; i++) {
1963     ncol = bi[i+1]-bi[i];
1964     ierr = MatSetValues(B,ncol,cols_tmp,1,&row,array,INSERT_VALUES);CHKERRQ(ierr);
1965     row++;
1966     array += ncol; cols_tmp += ncol;
1967   }
1968   ierr = PetscFree(cols);CHKERRQ(ierr);
1969 
1970   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1971   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1972   if (reuse == MAT_INITIAL_MATRIX || reuse == MAT_REUSE_MATRIX) {
1973     *matout = B;
1974   } else {
1975     ierr = MatHeaderMerge(A,&B);CHKERRQ(ierr);
1976   }
1977   PetscFunctionReturn(0);
1978 }
1979 
1980 PetscErrorCode MatDiagonalScale_MPIAIJ(Mat mat,Vec ll,Vec rr)
1981 {
1982   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
1983   Mat            a    = aij->A,b = aij->B;
1984   PetscErrorCode ierr;
1985   PetscInt       s1,s2,s3;
1986 
1987   PetscFunctionBegin;
1988   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1989   if (rr) {
1990     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1991     if (s1!=s3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1992     /* Overlap communication with computation. */
1993     ierr = VecScatterBegin(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
1994   }
1995   if (ll) {
1996     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1997     if (s1!=s2) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1998     ierr = (*b->ops->diagonalscale)(b,ll,0);CHKERRQ(ierr);
1999   }
2000   /* scale  the diagonal block */
2001   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
2002 
2003   if (rr) {
2004     /* Do a scatter end and then right scale the off-diagonal block */
2005     ierr = VecScatterEnd(aij->Mvctx,rr,aij->lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
2006     ierr = (*b->ops->diagonalscale)(b,0,aij->lvec);CHKERRQ(ierr);
2007   }
2008   PetscFunctionReturn(0);
2009 }
2010 
2011 PetscErrorCode MatSetUnfactored_MPIAIJ(Mat A)
2012 {
2013   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2014   PetscErrorCode ierr;
2015 
2016   PetscFunctionBegin;
2017   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
2018   PetscFunctionReturn(0);
2019 }
2020 
2021 PetscErrorCode MatEqual_MPIAIJ(Mat A,Mat B,PetscBool  *flag)
2022 {
2023   Mat_MPIAIJ     *matB = (Mat_MPIAIJ*)B->data,*matA = (Mat_MPIAIJ*)A->data;
2024   Mat            a,b,c,d;
2025   PetscBool      flg;
2026   PetscErrorCode ierr;
2027 
2028   PetscFunctionBegin;
2029   a = matA->A; b = matA->B;
2030   c = matB->A; d = matB->B;
2031 
2032   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
2033   if (flg) {
2034     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
2035   }
2036   ierr = MPIU_Allreduce(&flg,flag,1,MPIU_BOOL,MPI_LAND,PetscObjectComm((PetscObject)A));CHKERRQ(ierr);
2037   PetscFunctionReturn(0);
2038 }
2039 
2040 PetscErrorCode MatCopy_MPIAIJ(Mat A,Mat B,MatStructure str)
2041 {
2042   PetscErrorCode ierr;
2043   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2044   Mat_MPIAIJ     *b = (Mat_MPIAIJ*)B->data;
2045 
2046   PetscFunctionBegin;
2047   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
2048   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
2049     /* because of the column compression in the off-processor part of the matrix a->B,
2050        the number of columns in a->B and b->B may be different, hence we cannot call
2051        the MatCopy() directly on the two parts. If need be, we can provide a more
2052        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
2053        then copying the submatrices */
2054     ierr = MatCopy_Basic(A,B,str);CHKERRQ(ierr);
2055   } else {
2056     ierr = MatCopy(a->A,b->A,str);CHKERRQ(ierr);
2057     ierr = MatCopy(a->B,b->B,str);CHKERRQ(ierr);
2058   }
2059   PetscFunctionReturn(0);
2060 }
2061 
2062 PetscErrorCode MatSetUp_MPIAIJ(Mat A)
2063 {
2064   PetscErrorCode ierr;
2065 
2066   PetscFunctionBegin;
2067   ierr =  MatMPIAIJSetPreallocation(A,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
2068   PetscFunctionReturn(0);
2069 }
2070 
2071 /*
2072    Computes the number of nonzeros per row needed for preallocation when X and Y
2073    have different nonzero structure.
2074 */
2075 PetscErrorCode MatAXPYGetPreallocation_MPIX_private(PetscInt m,const PetscInt *xi,const PetscInt *xj,const PetscInt *xltog,const PetscInt *yi,const PetscInt *yj,const PetscInt *yltog,PetscInt *nnz)
2076 {
2077   PetscInt       i,j,k,nzx,nzy;
2078 
2079   PetscFunctionBegin;
2080   /* Set the number of nonzeros in the new matrix */
2081   for (i=0; i<m; i++) {
2082     const PetscInt *xjj = xj+xi[i],*yjj = yj+yi[i];
2083     nzx = xi[i+1] - xi[i];
2084     nzy = yi[i+1] - yi[i];
2085     nnz[i] = 0;
2086     for (j=0,k=0; j<nzx; j++) {                   /* Point in X */
2087       for (; k<nzy && yltog[yjj[k]]<xltog[xjj[j]]; k++) nnz[i]++; /* Catch up to X */
2088       if (k<nzy && yltog[yjj[k]]==xltog[xjj[j]]) k++;             /* Skip duplicate */
2089       nnz[i]++;
2090     }
2091     for (; k<nzy; k++) nnz[i]++;
2092   }
2093   PetscFunctionReturn(0);
2094 }
2095 
2096 /* This is the same as MatAXPYGetPreallocation_SeqAIJ, except that the local-to-global map is provided */
2097 static PetscErrorCode MatAXPYGetPreallocation_MPIAIJ(Mat Y,const PetscInt *yltog,Mat X,const PetscInt *xltog,PetscInt *nnz)
2098 {
2099   PetscErrorCode ierr;
2100   PetscInt       m = Y->rmap->N;
2101   Mat_SeqAIJ     *x = (Mat_SeqAIJ*)X->data;
2102   Mat_SeqAIJ     *y = (Mat_SeqAIJ*)Y->data;
2103 
2104   PetscFunctionBegin;
2105   ierr = MatAXPYGetPreallocation_MPIX_private(m,x->i,x->j,xltog,y->i,y->j,yltog,nnz);CHKERRQ(ierr);
2106   PetscFunctionReturn(0);
2107 }
2108 
2109 PetscErrorCode MatAXPY_MPIAIJ(Mat Y,PetscScalar a,Mat X,MatStructure str)
2110 {
2111   PetscErrorCode ierr;
2112   Mat_MPIAIJ     *xx = (Mat_MPIAIJ*)X->data,*yy = (Mat_MPIAIJ*)Y->data;
2113   PetscBLASInt   bnz,one=1;
2114   Mat_SeqAIJ     *x,*y;
2115 
2116   PetscFunctionBegin;
2117   if (str == SAME_NONZERO_PATTERN) {
2118     PetscScalar alpha = a;
2119     x    = (Mat_SeqAIJ*)xx->A->data;
2120     ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
2121     y    = (Mat_SeqAIJ*)yy->A->data;
2122     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2123     x    = (Mat_SeqAIJ*)xx->B->data;
2124     y    = (Mat_SeqAIJ*)yy->B->data;
2125     ierr = PetscBLASIntCast(x->nz,&bnz);CHKERRQ(ierr);
2126     PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&bnz,&alpha,x->a,&one,y->a,&one));
2127     ierr = PetscObjectStateIncrease((PetscObject)Y);CHKERRQ(ierr);
2128   } else if (str == SUBSET_NONZERO_PATTERN) { /* nonzeros of X is a subset of Y's */
2129     ierr = MatAXPY_Basic(Y,a,X,str);CHKERRQ(ierr);
2130   } else {
2131     Mat      B;
2132     PetscInt *nnz_d,*nnz_o;
2133     ierr = PetscMalloc1(yy->A->rmap->N,&nnz_d);CHKERRQ(ierr);
2134     ierr = PetscMalloc1(yy->B->rmap->N,&nnz_o);CHKERRQ(ierr);
2135     ierr = MatCreate(PetscObjectComm((PetscObject)Y),&B);CHKERRQ(ierr);
2136     ierr = PetscObjectSetName((PetscObject)B,((PetscObject)Y)->name);CHKERRQ(ierr);
2137     ierr = MatSetSizes(B,Y->rmap->n,Y->cmap->n,Y->rmap->N,Y->cmap->N);CHKERRQ(ierr);
2138     ierr = MatSetBlockSizesFromMats(B,Y,Y);CHKERRQ(ierr);
2139     ierr = MatSetType(B,MATMPIAIJ);CHKERRQ(ierr);
2140     ierr = MatAXPYGetPreallocation_SeqAIJ(yy->A,xx->A,nnz_d);CHKERRQ(ierr);
2141     ierr = MatAXPYGetPreallocation_MPIAIJ(yy->B,yy->garray,xx->B,xx->garray,nnz_o);CHKERRQ(ierr);
2142     ierr = MatMPIAIJSetPreallocation(B,0,nnz_d,0,nnz_o);CHKERRQ(ierr);
2143     ierr = MatAXPY_BasicWithPreallocation(B,Y,a,X,str);CHKERRQ(ierr);
2144     ierr = MatHeaderReplace(Y,&B);CHKERRQ(ierr);
2145     ierr = PetscFree(nnz_d);CHKERRQ(ierr);
2146     ierr = PetscFree(nnz_o);CHKERRQ(ierr);
2147   }
2148   PetscFunctionReturn(0);
2149 }
2150 
2151 extern PetscErrorCode  MatConjugate_SeqAIJ(Mat);
2152 
2153 PetscErrorCode  MatConjugate_MPIAIJ(Mat mat)
2154 {
2155 #if defined(PETSC_USE_COMPLEX)
2156   PetscErrorCode ierr;
2157   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2158 
2159   PetscFunctionBegin;
2160   ierr = MatConjugate_SeqAIJ(aij->A);CHKERRQ(ierr);
2161   ierr = MatConjugate_SeqAIJ(aij->B);CHKERRQ(ierr);
2162 #else
2163   PetscFunctionBegin;
2164 #endif
2165   PetscFunctionReturn(0);
2166 }
2167 
2168 PetscErrorCode MatRealPart_MPIAIJ(Mat A)
2169 {
2170   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2171   PetscErrorCode ierr;
2172 
2173   PetscFunctionBegin;
2174   ierr = MatRealPart(a->A);CHKERRQ(ierr);
2175   ierr = MatRealPart(a->B);CHKERRQ(ierr);
2176   PetscFunctionReturn(0);
2177 }
2178 
2179 PetscErrorCode MatImaginaryPart_MPIAIJ(Mat A)
2180 {
2181   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2182   PetscErrorCode ierr;
2183 
2184   PetscFunctionBegin;
2185   ierr = MatImaginaryPart(a->A);CHKERRQ(ierr);
2186   ierr = MatImaginaryPart(a->B);CHKERRQ(ierr);
2187   PetscFunctionReturn(0);
2188 }
2189 
2190 PetscErrorCode MatGetRowMaxAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2191 {
2192   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2193   PetscErrorCode ierr;
2194   PetscInt       i,*idxb = 0;
2195   PetscScalar    *va,*vb;
2196   Vec            vtmp;
2197 
2198   PetscFunctionBegin;
2199   ierr = MatGetRowMaxAbs(a->A,v,idx);CHKERRQ(ierr);
2200   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2201   if (idx) {
2202     for (i=0; i<A->rmap->n; i++) {
2203       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2204     }
2205   }
2206 
2207   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2208   if (idx) {
2209     ierr = PetscMalloc1(A->rmap->n,&idxb);CHKERRQ(ierr);
2210   }
2211   ierr = MatGetRowMaxAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2212   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2213 
2214   for (i=0; i<A->rmap->n; i++) {
2215     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) {
2216       va[i] = vb[i];
2217       if (idx) idx[i] = a->garray[idxb[i]];
2218     }
2219   }
2220 
2221   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2222   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2223   ierr = PetscFree(idxb);CHKERRQ(ierr);
2224   ierr = VecDestroy(&vtmp);CHKERRQ(ierr);
2225   PetscFunctionReturn(0);
2226 }
2227 
2228 PetscErrorCode MatGetRowMinAbs_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2229 {
2230   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2231   PetscErrorCode ierr;
2232   PetscInt       i,*idxb = 0;
2233   PetscScalar    *va,*vb;
2234   Vec            vtmp;
2235 
2236   PetscFunctionBegin;
2237   ierr = MatGetRowMinAbs(a->A,v,idx);CHKERRQ(ierr);
2238   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2239   if (idx) {
2240     for (i=0; i<A->cmap->n; i++) {
2241       if (PetscAbsScalar(va[i])) idx[i] += A->cmap->rstart;
2242     }
2243   }
2244 
2245   ierr = VecCreateSeq(PETSC_COMM_SELF,A->rmap->n,&vtmp);CHKERRQ(ierr);
2246   if (idx) {
2247     ierr = PetscMalloc1(A->rmap->n,&idxb);CHKERRQ(ierr);
2248   }
2249   ierr = MatGetRowMinAbs(a->B,vtmp,idxb);CHKERRQ(ierr);
2250   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
2251 
2252   for (i=0; i<A->rmap->n; i++) {
2253     if (PetscAbsScalar(va[i]) > PetscAbsScalar(vb[i])) {
2254       va[i] = vb[i];
2255       if (idx) idx[i] = a->garray[idxb[i]];
2256     }
2257   }
2258 
2259   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2260   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
2261   ierr = PetscFree(idxb);CHKERRQ(ierr);
2262   ierr = VecDestroy(&vtmp);CHKERRQ(ierr);
2263   PetscFunctionReturn(0);
2264 }
2265 
2266 PetscErrorCode MatGetRowMin_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2267 {
2268   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2269   PetscInt       n      = A->rmap->n;
2270   PetscInt       cstart = A->cmap->rstart;
2271   PetscInt       *cmap  = mat->garray;
2272   PetscInt       *diagIdx, *offdiagIdx;
2273   Vec            diagV, offdiagV;
2274   PetscScalar    *a, *diagA, *offdiagA;
2275   PetscInt       r;
2276   PetscErrorCode ierr;
2277 
2278   PetscFunctionBegin;
2279   ierr = PetscMalloc2(n,&diagIdx,n,&offdiagIdx);CHKERRQ(ierr);
2280   ierr = VecCreateSeq(PetscObjectComm((PetscObject)A), n, &diagV);CHKERRQ(ierr);
2281   ierr = VecCreateSeq(PetscObjectComm((PetscObject)A), n, &offdiagV);CHKERRQ(ierr);
2282   ierr = MatGetRowMin(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2283   ierr = MatGetRowMin(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2284   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2285   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2286   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2287   for (r = 0; r < n; ++r) {
2288     if (PetscAbsScalar(diagA[r]) <= PetscAbsScalar(offdiagA[r])) {
2289       a[r]   = diagA[r];
2290       idx[r] = cstart + diagIdx[r];
2291     } else {
2292       a[r]   = offdiagA[r];
2293       idx[r] = cmap[offdiagIdx[r]];
2294     }
2295   }
2296   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2297   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2298   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2299   ierr = VecDestroy(&diagV);CHKERRQ(ierr);
2300   ierr = VecDestroy(&offdiagV);CHKERRQ(ierr);
2301   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2302   PetscFunctionReturn(0);
2303 }
2304 
2305 PetscErrorCode MatGetRowMax_MPIAIJ(Mat A, Vec v, PetscInt idx[])
2306 {
2307   Mat_MPIAIJ     *mat   = (Mat_MPIAIJ*) A->data;
2308   PetscInt       n      = A->rmap->n;
2309   PetscInt       cstart = A->cmap->rstart;
2310   PetscInt       *cmap  = mat->garray;
2311   PetscInt       *diagIdx, *offdiagIdx;
2312   Vec            diagV, offdiagV;
2313   PetscScalar    *a, *diagA, *offdiagA;
2314   PetscInt       r;
2315   PetscErrorCode ierr;
2316 
2317   PetscFunctionBegin;
2318   ierr = PetscMalloc2(n,&diagIdx,n,&offdiagIdx);CHKERRQ(ierr);
2319   ierr = VecCreateSeq(PETSC_COMM_SELF, n, &diagV);CHKERRQ(ierr);
2320   ierr = VecCreateSeq(PETSC_COMM_SELF, n, &offdiagV);CHKERRQ(ierr);
2321   ierr = MatGetRowMax(mat->A, diagV,    diagIdx);CHKERRQ(ierr);
2322   ierr = MatGetRowMax(mat->B, offdiagV, offdiagIdx);CHKERRQ(ierr);
2323   ierr = VecGetArray(v,        &a);CHKERRQ(ierr);
2324   ierr = VecGetArray(diagV,    &diagA);CHKERRQ(ierr);
2325   ierr = VecGetArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2326   for (r = 0; r < n; ++r) {
2327     if (PetscAbsScalar(diagA[r]) >= PetscAbsScalar(offdiagA[r])) {
2328       a[r]   = diagA[r];
2329       idx[r] = cstart + diagIdx[r];
2330     } else {
2331       a[r]   = offdiagA[r];
2332       idx[r] = cmap[offdiagIdx[r]];
2333     }
2334   }
2335   ierr = VecRestoreArray(v,        &a);CHKERRQ(ierr);
2336   ierr = VecRestoreArray(diagV,    &diagA);CHKERRQ(ierr);
2337   ierr = VecRestoreArray(offdiagV, &offdiagA);CHKERRQ(ierr);
2338   ierr = VecDestroy(&diagV);CHKERRQ(ierr);
2339   ierr = VecDestroy(&offdiagV);CHKERRQ(ierr);
2340   ierr = PetscFree2(diagIdx, offdiagIdx);CHKERRQ(ierr);
2341   PetscFunctionReturn(0);
2342 }
2343 
2344 PetscErrorCode MatGetSeqNonzeroStructure_MPIAIJ(Mat mat,Mat *newmat)
2345 {
2346   PetscErrorCode ierr;
2347   Mat            *dummy;
2348 
2349   PetscFunctionBegin;
2350   ierr    = MatCreateSubMatrix_MPIAIJ_All(mat,MAT_DO_NOT_GET_VALUES,MAT_INITIAL_MATRIX,&dummy);CHKERRQ(ierr);
2351   *newmat = *dummy;
2352   ierr    = PetscFree(dummy);CHKERRQ(ierr);
2353   PetscFunctionReturn(0);
2354 }
2355 
2356 PetscErrorCode  MatInvertBlockDiagonal_MPIAIJ(Mat A,const PetscScalar **values)
2357 {
2358   Mat_MPIAIJ     *a = (Mat_MPIAIJ*) A->data;
2359   PetscErrorCode ierr;
2360 
2361   PetscFunctionBegin;
2362   ierr = MatInvertBlockDiagonal(a->A,values);CHKERRQ(ierr);
2363   A->factorerrortype = a->A->factorerrortype;
2364   PetscFunctionReturn(0);
2365 }
2366 
2367 static PetscErrorCode  MatSetRandom_MPIAIJ(Mat x,PetscRandom rctx)
2368 {
2369   PetscErrorCode ierr;
2370   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)x->data;
2371 
2372   PetscFunctionBegin;
2373   ierr = MatSetRandom(aij->A,rctx);CHKERRQ(ierr);
2374   ierr = MatSetRandom(aij->B,rctx);CHKERRQ(ierr);
2375   ierr = MatAssemblyBegin(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2376   ierr = MatAssemblyEnd(x,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2377   PetscFunctionReturn(0);
2378 }
2379 
2380 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ(Mat A,PetscBool sc)
2381 {
2382   PetscFunctionBegin;
2383   if (sc) A->ops->increaseoverlap = MatIncreaseOverlap_MPIAIJ_Scalable;
2384   else A->ops->increaseoverlap    = MatIncreaseOverlap_MPIAIJ;
2385   PetscFunctionReturn(0);
2386 }
2387 
2388 /*@
2389    MatMPIAIJSetUseScalableIncreaseOverlap - Determine if the matrix uses a scalable algorithm to compute the overlap
2390 
2391    Collective on Mat
2392 
2393    Input Parameters:
2394 +    A - the matrix
2395 -    sc - PETSC_TRUE indicates use the scalable algorithm (default is not to use the scalable algorithm)
2396 
2397  Level: advanced
2398 
2399 @*/
2400 PetscErrorCode MatMPIAIJSetUseScalableIncreaseOverlap(Mat A,PetscBool sc)
2401 {
2402   PetscErrorCode       ierr;
2403 
2404   PetscFunctionBegin;
2405   ierr = PetscTryMethod(A,"MatMPIAIJSetUseScalableIncreaseOverlap_C",(Mat,PetscBool),(A,sc));CHKERRQ(ierr);
2406   PetscFunctionReturn(0);
2407 }
2408 
2409 PetscErrorCode MatSetFromOptions_MPIAIJ(PetscOptionItems *PetscOptionsObject,Mat A)
2410 {
2411   PetscErrorCode       ierr;
2412   PetscBool            sc = PETSC_FALSE,flg;
2413 
2414   PetscFunctionBegin;
2415   ierr = PetscOptionsHead(PetscOptionsObject,"MPIAIJ options");CHKERRQ(ierr);
2416   ierr = PetscObjectOptionsBegin((PetscObject)A);
2417     if (A->ops->increaseoverlap == MatIncreaseOverlap_MPIAIJ_Scalable) sc = PETSC_TRUE;
2418     ierr = PetscOptionsBool("-mat_increase_overlap_scalable","Use a scalable algorithm to compute the overlap","MatIncreaseOverlap",sc,&sc,&flg);CHKERRQ(ierr);
2419     if (flg) {
2420       ierr = MatMPIAIJSetUseScalableIncreaseOverlap(A,sc);CHKERRQ(ierr);
2421     }
2422   ierr = PetscOptionsEnd();CHKERRQ(ierr);
2423   PetscFunctionReturn(0);
2424 }
2425 
2426 PetscErrorCode MatShift_MPIAIJ(Mat Y,PetscScalar a)
2427 {
2428   PetscErrorCode ierr;
2429   Mat_MPIAIJ     *maij = (Mat_MPIAIJ*)Y->data;
2430   Mat_SeqAIJ     *aij = (Mat_SeqAIJ*)maij->A->data;
2431 
2432   PetscFunctionBegin;
2433   if (!Y->preallocated) {
2434     ierr = MatMPIAIJSetPreallocation(Y,1,NULL,0,NULL);CHKERRQ(ierr);
2435   } else if (!aij->nz) {
2436     PetscInt nonew = aij->nonew;
2437     ierr = MatSeqAIJSetPreallocation(maij->A,1,NULL);CHKERRQ(ierr);
2438     aij->nonew = nonew;
2439   }
2440   ierr = MatShift_Basic(Y,a);CHKERRQ(ierr);
2441   PetscFunctionReturn(0);
2442 }
2443 
2444 PetscErrorCode MatMissingDiagonal_MPIAIJ(Mat A,PetscBool  *missing,PetscInt *d)
2445 {
2446   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
2447   PetscErrorCode ierr;
2448 
2449   PetscFunctionBegin;
2450   if (A->rmap->n != A->cmap->n) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Only works for square matrices");
2451   ierr = MatMissingDiagonal(a->A,missing,d);CHKERRQ(ierr);
2452   if (d) {
2453     PetscInt rstart;
2454     ierr = MatGetOwnershipRange(A,&rstart,NULL);CHKERRQ(ierr);
2455     *d += rstart;
2456 
2457   }
2458   PetscFunctionReturn(0);
2459 }
2460 
2461 
2462 /* -------------------------------------------------------------------*/
2463 static struct _MatOps MatOps_Values = {MatSetValues_MPIAIJ,
2464                                        MatGetRow_MPIAIJ,
2465                                        MatRestoreRow_MPIAIJ,
2466                                        MatMult_MPIAIJ,
2467                                 /* 4*/ MatMultAdd_MPIAIJ,
2468                                        MatMultTranspose_MPIAIJ,
2469                                        MatMultTransposeAdd_MPIAIJ,
2470                                        0,
2471                                        0,
2472                                        0,
2473                                 /*10*/ 0,
2474                                        0,
2475                                        0,
2476                                        MatSOR_MPIAIJ,
2477                                        MatTranspose_MPIAIJ,
2478                                 /*15*/ MatGetInfo_MPIAIJ,
2479                                        MatEqual_MPIAIJ,
2480                                        MatGetDiagonal_MPIAIJ,
2481                                        MatDiagonalScale_MPIAIJ,
2482                                        MatNorm_MPIAIJ,
2483                                 /*20*/ MatAssemblyBegin_MPIAIJ,
2484                                        MatAssemblyEnd_MPIAIJ,
2485                                        MatSetOption_MPIAIJ,
2486                                        MatZeroEntries_MPIAIJ,
2487                                 /*24*/ MatZeroRows_MPIAIJ,
2488                                        0,
2489                                        0,
2490                                        0,
2491                                        0,
2492                                 /*29*/ MatSetUp_MPIAIJ,
2493                                        0,
2494                                        0,
2495                                        MatGetDiagonalBlock_MPIAIJ,
2496                                        0,
2497                                 /*34*/ MatDuplicate_MPIAIJ,
2498                                        0,
2499                                        0,
2500                                        0,
2501                                        0,
2502                                 /*39*/ MatAXPY_MPIAIJ,
2503                                        MatCreateSubMatrices_MPIAIJ,
2504                                        MatIncreaseOverlap_MPIAIJ,
2505                                        MatGetValues_MPIAIJ,
2506                                        MatCopy_MPIAIJ,
2507                                 /*44*/ MatGetRowMax_MPIAIJ,
2508                                        MatScale_MPIAIJ,
2509                                        MatShift_MPIAIJ,
2510                                        MatDiagonalSet_MPIAIJ,
2511                                        MatZeroRowsColumns_MPIAIJ,
2512                                 /*49*/ MatSetRandom_MPIAIJ,
2513                                        0,
2514                                        0,
2515                                        0,
2516                                        0,
2517                                 /*54*/ MatFDColoringCreate_MPIXAIJ,
2518                                        0,
2519                                        MatSetUnfactored_MPIAIJ,
2520                                        MatPermute_MPIAIJ,
2521                                        0,
2522                                 /*59*/ MatCreateSubMatrix_MPIAIJ,
2523                                        MatDestroy_MPIAIJ,
2524                                        MatView_MPIAIJ,
2525                                        0,
2526                                        MatMatMatMult_MPIAIJ_MPIAIJ_MPIAIJ,
2527                                 /*64*/ MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ,
2528                                        MatMatMatMultNumeric_MPIAIJ_MPIAIJ_MPIAIJ,
2529                                        0,
2530                                        0,
2531                                        0,
2532                                 /*69*/ MatGetRowMaxAbs_MPIAIJ,
2533                                        MatGetRowMinAbs_MPIAIJ,
2534                                        0,
2535                                        0,
2536                                        0,
2537                                        0,
2538                                 /*75*/ MatFDColoringApply_AIJ,
2539                                        MatSetFromOptions_MPIAIJ,
2540                                        0,
2541                                        0,
2542                                        MatFindZeroDiagonals_MPIAIJ,
2543                                 /*80*/ 0,
2544                                        0,
2545                                        0,
2546                                 /*83*/ MatLoad_MPIAIJ,
2547                                        0,
2548                                        0,
2549                                        0,
2550                                        0,
2551                                        0,
2552                                 /*89*/ MatMatMult_MPIAIJ_MPIAIJ,
2553                                        MatMatMultSymbolic_MPIAIJ_MPIAIJ,
2554                                        MatMatMultNumeric_MPIAIJ_MPIAIJ,
2555                                        MatPtAP_MPIAIJ_MPIAIJ,
2556                                        MatPtAPSymbolic_MPIAIJ_MPIAIJ,
2557                                 /*94*/ MatPtAPNumeric_MPIAIJ_MPIAIJ,
2558                                        0,
2559                                        0,
2560                                        0,
2561                                        0,
2562                                 /*99*/ 0,
2563                                        0,
2564                                        0,
2565                                        MatConjugate_MPIAIJ,
2566                                        0,
2567                                 /*104*/MatSetValuesRow_MPIAIJ,
2568                                        MatRealPart_MPIAIJ,
2569                                        MatImaginaryPart_MPIAIJ,
2570                                        0,
2571                                        0,
2572                                 /*109*/0,
2573                                        0,
2574                                        MatGetRowMin_MPIAIJ,
2575                                        0,
2576                                        MatMissingDiagonal_MPIAIJ,
2577                                 /*114*/MatGetSeqNonzeroStructure_MPIAIJ,
2578                                        0,
2579                                        MatGetGhosts_MPIAIJ,
2580                                        0,
2581                                        0,
2582                                 /*119*/0,
2583                                        0,
2584                                        0,
2585                                        0,
2586                                        MatGetMultiProcBlock_MPIAIJ,
2587                                 /*124*/MatFindNonzeroRows_MPIAIJ,
2588                                        MatGetColumnNorms_MPIAIJ,
2589                                        MatInvertBlockDiagonal_MPIAIJ,
2590                                        0,
2591                                        MatCreateSubMatricesMPI_MPIAIJ,
2592                                 /*129*/0,
2593                                        MatTransposeMatMult_MPIAIJ_MPIAIJ,
2594                                        MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ,
2595                                        MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ,
2596                                        0,
2597                                 /*134*/0,
2598                                        0,
2599                                        0,
2600                                        0,
2601                                        0,
2602                                 /*139*/MatSetBlockSizes_MPIAIJ,
2603                                        0,
2604                                        0,
2605                                        MatFDColoringSetUp_MPIXAIJ,
2606                                        MatFindOffBlockDiagonalEntries_MPIAIJ,
2607                                 /*144*/MatCreateMPIMatConcatenateSeqMat_MPIAIJ
2608 };
2609 
2610 /* ----------------------------------------------------------------------------------------*/
2611 
2612 PetscErrorCode  MatStoreValues_MPIAIJ(Mat mat)
2613 {
2614   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2615   PetscErrorCode ierr;
2616 
2617   PetscFunctionBegin;
2618   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
2619   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
2620   PetscFunctionReturn(0);
2621 }
2622 
2623 PetscErrorCode  MatRetrieveValues_MPIAIJ(Mat mat)
2624 {
2625   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
2626   PetscErrorCode ierr;
2627 
2628   PetscFunctionBegin;
2629   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
2630   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
2631   PetscFunctionReturn(0);
2632 }
2633 
2634 PetscErrorCode  MatMPIAIJSetPreallocation_MPIAIJ(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
2635 {
2636   Mat_MPIAIJ     *b;
2637   PetscErrorCode ierr;
2638 
2639   PetscFunctionBegin;
2640   ierr = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
2641   ierr = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
2642   b = (Mat_MPIAIJ*)B->data;
2643 
2644 #if defined(PETSC_USE_CTABLE)
2645   ierr = PetscTableDestroy(&b->colmap);CHKERRQ(ierr);
2646 #else
2647   ierr = PetscFree(b->colmap);CHKERRQ(ierr);
2648 #endif
2649   ierr = PetscFree(b->garray);CHKERRQ(ierr);
2650   ierr = VecDestroy(&b->lvec);CHKERRQ(ierr);
2651   ierr = VecScatterDestroy(&b->Mvctx);CHKERRQ(ierr);
2652 
2653   /* Because the B will have been resized we simply destroy it and create a new one each time */
2654   ierr = MatDestroy(&b->B);CHKERRQ(ierr);
2655   ierr = MatCreate(PETSC_COMM_SELF,&b->B);CHKERRQ(ierr);
2656   ierr = MatSetSizes(b->B,B->rmap->n,B->cmap->N,B->rmap->n,B->cmap->N);CHKERRQ(ierr);
2657   ierr = MatSetBlockSizesFromMats(b->B,B,B);CHKERRQ(ierr);
2658   ierr = MatSetType(b->B,MATSEQAIJ);CHKERRQ(ierr);
2659   ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->B);CHKERRQ(ierr);
2660 
2661   if (!B->preallocated) {
2662     ierr = MatCreate(PETSC_COMM_SELF,&b->A);CHKERRQ(ierr);
2663     ierr = MatSetSizes(b->A,B->rmap->n,B->cmap->n,B->rmap->n,B->cmap->n);CHKERRQ(ierr);
2664     ierr = MatSetBlockSizesFromMats(b->A,B,B);CHKERRQ(ierr);
2665     ierr = MatSetType(b->A,MATSEQAIJ);CHKERRQ(ierr);
2666     ierr = PetscLogObjectParent((PetscObject)B,(PetscObject)b->A);CHKERRQ(ierr);
2667   }
2668 
2669   ierr = MatSeqAIJSetPreallocation(b->A,d_nz,d_nnz);CHKERRQ(ierr);
2670   ierr = MatSeqAIJSetPreallocation(b->B,o_nz,o_nnz);CHKERRQ(ierr);
2671   B->preallocated  = PETSC_TRUE;
2672   B->was_assembled = PETSC_FALSE;
2673   B->assembled     = PETSC_FALSE;;
2674   PetscFunctionReturn(0);
2675 }
2676 
2677 PetscErrorCode MatDuplicate_MPIAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2678 {
2679   Mat            mat;
2680   Mat_MPIAIJ     *a,*oldmat = (Mat_MPIAIJ*)matin->data;
2681   PetscErrorCode ierr;
2682 
2683   PetscFunctionBegin;
2684   *newmat = 0;
2685   ierr    = MatCreate(PetscObjectComm((PetscObject)matin),&mat);CHKERRQ(ierr);
2686   ierr    = MatSetSizes(mat,matin->rmap->n,matin->cmap->n,matin->rmap->N,matin->cmap->N);CHKERRQ(ierr);
2687   ierr    = MatSetBlockSizesFromMats(mat,matin,matin);CHKERRQ(ierr);
2688   ierr    = MatSetType(mat,((PetscObject)matin)->type_name);CHKERRQ(ierr);
2689   ierr    = PetscMemcpy(mat->ops,matin->ops,sizeof(struct _MatOps));CHKERRQ(ierr);
2690   a       = (Mat_MPIAIJ*)mat->data;
2691 
2692   mat->factortype   = matin->factortype;
2693   mat->assembled    = PETSC_TRUE;
2694   mat->insertmode   = NOT_SET_VALUES;
2695   mat->preallocated = PETSC_TRUE;
2696 
2697   a->size         = oldmat->size;
2698   a->rank         = oldmat->rank;
2699   a->donotstash   = oldmat->donotstash;
2700   a->roworiented  = oldmat->roworiented;
2701   a->rowindices   = 0;
2702   a->rowvalues    = 0;
2703   a->getrowactive = PETSC_FALSE;
2704 
2705   ierr = PetscLayoutReference(matin->rmap,&mat->rmap);CHKERRQ(ierr);
2706   ierr = PetscLayoutReference(matin->cmap,&mat->cmap);CHKERRQ(ierr);
2707 
2708   if (oldmat->colmap) {
2709 #if defined(PETSC_USE_CTABLE)
2710     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
2711 #else
2712     ierr = PetscMalloc1(mat->cmap->N,&a->colmap);CHKERRQ(ierr);
2713     ierr = PetscLogObjectMemory((PetscObject)mat,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2714     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(mat->cmap->N)*sizeof(PetscInt));CHKERRQ(ierr);
2715 #endif
2716   } else a->colmap = 0;
2717   if (oldmat->garray) {
2718     PetscInt len;
2719     len  = oldmat->B->cmap->n;
2720     ierr = PetscMalloc1(len+1,&a->garray);CHKERRQ(ierr);
2721     ierr = PetscLogObjectMemory((PetscObject)mat,len*sizeof(PetscInt));CHKERRQ(ierr);
2722     if (len) { ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(PetscInt));CHKERRQ(ierr); }
2723   } else a->garray = 0;
2724 
2725   ierr    = VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
2726   ierr    = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->lvec);CHKERRQ(ierr);
2727   ierr    = VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
2728   ierr    = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->Mvctx);CHKERRQ(ierr);
2729   ierr    = MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
2730   ierr    = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->A);CHKERRQ(ierr);
2731   ierr    = MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
2732   ierr    = PetscLogObjectParent((PetscObject)mat,(PetscObject)a->B);CHKERRQ(ierr);
2733   ierr    = PetscFunctionListDuplicate(((PetscObject)matin)->qlist,&((PetscObject)mat)->qlist);CHKERRQ(ierr);
2734   *newmat = mat;
2735   PetscFunctionReturn(0);
2736 }
2737 
2738 
2739 
2740 PetscErrorCode MatLoad_MPIAIJ(Mat newMat, PetscViewer viewer)
2741 {
2742   PetscScalar    *vals,*svals;
2743   MPI_Comm       comm;
2744   PetscErrorCode ierr;
2745   PetscMPIInt    rank,size,tag = ((PetscObject)viewer)->tag;
2746   PetscInt       i,nz,j,rstart,rend,mmax,maxnz = 0;
2747   PetscInt       header[4],*rowlengths = 0,M,N,m,*cols;
2748   PetscInt       *ourlens = NULL,*procsnz = NULL,*offlens = NULL,jj,*mycols,*smycols;
2749   PetscInt       cend,cstart,n,*rowners;
2750   int            fd;
2751   PetscInt       bs = newMat->rmap->bs;
2752 
2753   PetscFunctionBegin;
2754   /* force binary viewer to load .info file if it has not yet done so */
2755   ierr = PetscViewerSetUp(viewer);CHKERRQ(ierr);
2756   ierr = PetscObjectGetComm((PetscObject)viewer,&comm);CHKERRQ(ierr);
2757   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2758   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2759   ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2760   if (!rank) {
2761     ierr = PetscBinaryRead(fd,(char*)header,4,PETSC_INT);CHKERRQ(ierr);
2762     if (header[0] != MAT_FILE_CLASSID) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2763     if (header[3] < 0) SETERRQ(PetscObjectComm((PetscObject)newMat),PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format on disk,cannot load as MATMPIAIJ");
2764   }
2765 
2766   ierr = PetscOptionsBegin(comm,NULL,"Options for loading MATMPIAIJ matrix","Mat");CHKERRQ(ierr);
2767   ierr = PetscOptionsInt("-matload_block_size","Set the blocksize used to store the matrix","MatLoad",bs,&bs,NULL);CHKERRQ(ierr);
2768   ierr = PetscOptionsEnd();CHKERRQ(ierr);
2769   if (bs < 0) bs = 1;
2770 
2771   ierr = MPI_Bcast(header+1,3,MPIU_INT,0,comm);CHKERRQ(ierr);
2772   M    = header[1]; N = header[2];
2773 
2774   /* If global sizes are set, check if they are consistent with that given in the file */
2775   if (newMat->rmap->N >= 0 && newMat->rmap->N != M) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of rows:Matrix in file has (%D) and input matrix has (%D)",newMat->rmap->N,M);
2776   if (newMat->cmap->N >=0 && newMat->cmap->N != N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED,"Inconsistent # of cols:Matrix in file has (%D) and input matrix has (%D)",newMat->cmap->N,N);
2777 
2778   /* determine ownership of all (block) rows */
2779   if (M%bs) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_FILE_UNEXPECTED, "Inconsistent # of rows (%d) and block size (%d)",M,bs);
2780   if (newMat->rmap->n < 0) m = bs*((M/bs)/size + (((M/bs) % size) > rank));    /* PETSC_DECIDE */
2781   else m = newMat->rmap->n; /* Set by user */
2782 
2783   ierr = PetscMalloc1(size+1,&rowners);CHKERRQ(ierr);
2784   ierr = MPI_Allgather(&m,1,MPIU_INT,rowners+1,1,MPIU_INT,comm);CHKERRQ(ierr);
2785 
2786   /* First process needs enough room for process with most rows */
2787   if (!rank) {
2788     mmax = rowners[1];
2789     for (i=2; i<=size; i++) {
2790       mmax = PetscMax(mmax, rowners[i]);
2791     }
2792   } else mmax = -1;             /* unused, but compilers complain */
2793 
2794   rowners[0] = 0;
2795   for (i=2; i<=size; i++) {
2796     rowners[i] += rowners[i-1];
2797   }
2798   rstart = rowners[rank];
2799   rend   = rowners[rank+1];
2800 
2801   /* distribute row lengths to all processors */
2802   ierr = PetscMalloc2(m,&ourlens,m,&offlens);CHKERRQ(ierr);
2803   if (!rank) {
2804     ierr = PetscBinaryRead(fd,ourlens,m,PETSC_INT);CHKERRQ(ierr);
2805     ierr = PetscMalloc1(mmax,&rowlengths);CHKERRQ(ierr);
2806     ierr = PetscCalloc1(size,&procsnz);CHKERRQ(ierr);
2807     for (j=0; j<m; j++) {
2808       procsnz[0] += ourlens[j];
2809     }
2810     for (i=1; i<size; i++) {
2811       ierr = PetscBinaryRead(fd,rowlengths,rowners[i+1]-rowners[i],PETSC_INT);CHKERRQ(ierr);
2812       /* calculate the number of nonzeros on each processor */
2813       for (j=0; j<rowners[i+1]-rowners[i]; j++) {
2814         procsnz[i] += rowlengths[j];
2815       }
2816       ierr = MPIULong_Send(rowlengths,rowners[i+1]-rowners[i],MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2817     }
2818     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2819   } else {
2820     ierr = MPIULong_Recv(ourlens,m,MPIU_INT,0,tag,comm);CHKERRQ(ierr);
2821   }
2822 
2823   if (!rank) {
2824     /* determine max buffer needed and allocate it */
2825     maxnz = 0;
2826     for (i=0; i<size; i++) {
2827       maxnz = PetscMax(maxnz,procsnz[i]);
2828     }
2829     ierr = PetscMalloc1(maxnz,&cols);CHKERRQ(ierr);
2830 
2831     /* read in my part of the matrix column indices  */
2832     nz   = procsnz[0];
2833     ierr = PetscMalloc1(nz,&mycols);CHKERRQ(ierr);
2834     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2835 
2836     /* read in every one elses and ship off */
2837     for (i=1; i<size; i++) {
2838       nz   = procsnz[i];
2839       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2840       ierr = MPIULong_Send(cols,nz,MPIU_INT,i,tag,comm);CHKERRQ(ierr);
2841     }
2842     ierr = PetscFree(cols);CHKERRQ(ierr);
2843   } else {
2844     /* determine buffer space needed for message */
2845     nz = 0;
2846     for (i=0; i<m; i++) {
2847       nz += ourlens[i];
2848     }
2849     ierr = PetscMalloc1(nz,&mycols);CHKERRQ(ierr);
2850 
2851     /* receive message of column indices*/
2852     ierr = MPIULong_Recv(mycols,nz,MPIU_INT,0,tag,comm);CHKERRQ(ierr);
2853   }
2854 
2855   /* determine column ownership if matrix is not square */
2856   if (N != M) {
2857     if (newMat->cmap->n < 0) n = N/size + ((N % size) > rank);
2858     else n = newMat->cmap->n;
2859     ierr   = MPI_Scan(&n,&cend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
2860     cstart = cend - n;
2861   } else {
2862     cstart = rstart;
2863     cend   = rend;
2864     n      = cend - cstart;
2865   }
2866 
2867   /* loop over local rows, determining number of off diagonal entries */
2868   ierr = PetscMemzero(offlens,m*sizeof(PetscInt));CHKERRQ(ierr);
2869   jj   = 0;
2870   for (i=0; i<m; i++) {
2871     for (j=0; j<ourlens[i]; j++) {
2872       if (mycols[jj] < cstart || mycols[jj] >= cend) offlens[i]++;
2873       jj++;
2874     }
2875   }
2876 
2877   for (i=0; i<m; i++) {
2878     ourlens[i] -= offlens[i];
2879   }
2880   ierr = MatSetSizes(newMat,m,n,M,N);CHKERRQ(ierr);
2881 
2882   if (bs > 1) {ierr = MatSetBlockSize(newMat,bs);CHKERRQ(ierr);}
2883 
2884   ierr = MatMPIAIJSetPreallocation(newMat,0,ourlens,0,offlens);CHKERRQ(ierr);
2885 
2886   for (i=0; i<m; i++) {
2887     ourlens[i] += offlens[i];
2888   }
2889 
2890   if (!rank) {
2891     ierr = PetscMalloc1(maxnz+1,&vals);CHKERRQ(ierr);
2892 
2893     /* read in my part of the matrix numerical values  */
2894     nz   = procsnz[0];
2895     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2896 
2897     /* insert into matrix */
2898     jj      = rstart;
2899     smycols = mycols;
2900     svals   = vals;
2901     for (i=0; i<m; i++) {
2902       ierr     = MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2903       smycols += ourlens[i];
2904       svals   += ourlens[i];
2905       jj++;
2906     }
2907 
2908     /* read in other processors and ship out */
2909     for (i=1; i<size; i++) {
2910       nz   = procsnz[i];
2911       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2912       ierr = MPIULong_Send(vals,nz,MPIU_SCALAR,i,((PetscObject)newMat)->tag,comm);CHKERRQ(ierr);
2913     }
2914     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2915   } else {
2916     /* receive numeric values */
2917     ierr = PetscMalloc1(nz+1,&vals);CHKERRQ(ierr);
2918 
2919     /* receive message of values*/
2920     ierr = MPIULong_Recv(vals,nz,MPIU_SCALAR,0,((PetscObject)newMat)->tag,comm);CHKERRQ(ierr);
2921 
2922     /* insert into matrix */
2923     jj      = rstart;
2924     smycols = mycols;
2925     svals   = vals;
2926     for (i=0; i<m; i++) {
2927       ierr     = MatSetValues_MPIAIJ(newMat,1,&jj,ourlens[i],smycols,svals,INSERT_VALUES);CHKERRQ(ierr);
2928       smycols += ourlens[i];
2929       svals   += ourlens[i];
2930       jj++;
2931     }
2932   }
2933   ierr = PetscFree2(ourlens,offlens);CHKERRQ(ierr);
2934   ierr = PetscFree(vals);CHKERRQ(ierr);
2935   ierr = PetscFree(mycols);CHKERRQ(ierr);
2936   ierr = PetscFree(rowners);CHKERRQ(ierr);
2937   ierr = MatAssemblyBegin(newMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2938   ierr = MatAssemblyEnd(newMat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2939   PetscFunctionReturn(0);
2940 }
2941 
2942 /* Not scalable because of ISAllGather() unless getting all columns. */
2943 PetscErrorCode ISGetSeqIS_Private(Mat mat,IS iscol,IS *isseq)
2944 {
2945   PetscErrorCode ierr;
2946   IS             iscol_local;
2947   PetscBool      isstride;
2948   PetscMPIInt    lisstride=0,gisstride;
2949 
2950   PetscFunctionBegin;
2951   /* check if we are grabbing all columns*/
2952   ierr = PetscObjectTypeCompare((PetscObject)iscol,ISSTRIDE,&isstride);CHKERRQ(ierr);
2953 
2954   if (isstride) {
2955     PetscInt  start,len,mstart,mlen;
2956     ierr = ISStrideGetInfo(iscol,&start,NULL);CHKERRQ(ierr);
2957     ierr = ISGetLocalSize(iscol,&len);CHKERRQ(ierr);
2958     ierr = MatGetOwnershipRangeColumn(mat,&mstart,&mlen);CHKERRQ(ierr);
2959     if (mstart == start && mlen-mstart == len) lisstride = 1;
2960   }
2961 
2962   ierr = MPIU_Allreduce(&lisstride,&gisstride,1,MPI_INT,MPI_MIN,PetscObjectComm((PetscObject)mat));CHKERRQ(ierr);
2963   if (gisstride) {
2964     PetscInt N;
2965     ierr = MatGetSize(mat,NULL,&N);CHKERRQ(ierr);
2966     ierr = ISCreateStride(PetscObjectComm((PetscObject)mat),N,0,1,&iscol_local);CHKERRQ(ierr);
2967     ierr = ISSetIdentity(iscol_local);CHKERRQ(ierr);
2968     ierr = PetscInfo(mat,"Optimizing for obtaining all columns of the matrix; skipping ISAllGather()\n");CHKERRQ(ierr);
2969   } else {
2970     PetscInt cbs;
2971     ierr = ISGetBlockSize(iscol,&cbs);CHKERRQ(ierr);
2972     ierr = ISAllGather(iscol,&iscol_local);CHKERRQ(ierr);
2973     ierr = ISSetBlockSize(iscol_local,cbs);CHKERRQ(ierr);
2974   }
2975 
2976   *isseq = iscol_local;
2977   PetscFunctionReturn(0);
2978 }
2979 
2980 extern PetscErrorCode MatCreateSubMatrix_MPIAIJ_Private_SameDist(Mat,IS,IS,PetscInt,MatReuse,Mat*);
2981 
2982 /* isrow has same processor distribution as mat, avoid iscol_local which uses O(mat->cmap->N) ctable */
2983 PetscErrorCode MatCreateSubMatrix_MPIAIJ_SameDist(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
2984 {
2985   PetscErrorCode ierr;
2986   IS             iscol_local;
2987   PetscInt       csize;
2988   MPI_Comm       comm;
2989   PetscMPIInt    rank;
2990   PetscBool      sorted,sameDist = PETSC_FALSE,tsameDist;
2991   PetscInt       n,i,j,cstart,cend,N;
2992 
2993   PetscFunctionBegin;
2994   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
2995   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2996   //ierr = ISView(iscol,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
2997 
2998   /* If iscol has same processor distribution as mat, then use a scalable routine */
2999   ierr = ISSorted(iscol,&sorted);CHKERRQ(ierr); //must be locally sorted???
3000   if (!sorted) printf("[%d] iscol !sorted\n",rank);
3001 
3002   ierr = ISGetLocalSize(iscol,&n);CHKERRQ(ierr);
3003   ierr = ISGetSize(iscol,&N);CHKERRQ(ierr);
3004   if (!n) {
3005     sameDist = PETSC_TRUE;
3006   } else {
3007     ierr = ISGetMinMax(isrow,&i,&j);CHKERRQ(ierr);
3008     ierr = MatGetOwnershipRangeColumn(mat,&cstart,&cend);CHKERRQ(ierr);
3009     if (i >= cstart && j < cend) sameDist = PETSC_TRUE;
3010   }
3011   ierr = MPIU_Allreduce(&sameDist,&tsameDist,1,MPIU_BOOL,MPI_LAND,comm);CHKERRQ(ierr);
3012   if (!rank) printf("..SubMatrix_MPIAIJ_SameDist ...reuse %d, sameDist for column %d \n",call,tsameDist);
3013 
3014   if (sameDist) { /* get iscol_sub without calling ISAllGather() */
3015     Vec x;
3016     const PetscInt *is_idx;
3017     PetscScalar    *xarray;
3018 
3019     /* (1) iscol is a sub-vector of mat, pad it with '-1.' to form a full vector x */
3020     ierr = MatCreateVecs(mat,&x,NULL);CHKERRQ(ierr);
3021     ierr = VecSet(x,-1.0);CHKERRQ(ierr);
3022     ierr = ISGetIndices(iscol,&is_idx);CHKERRQ(ierr);
3023     ierr = VecGetArray(x,&xarray);CHKERRQ(ierr);
3024     for (i=0; i<n; i++) {
3025       //ierr = VecSetValues(x,1,&is_idx[i],&is_idx[i],);
3026       xarray[is_idx[i]-cstart] = (PetscScalar)is_idx[i];
3027     }
3028     ierr = VecRestoreArray(x,&xarray);CHKERRQ(ierr);
3029     ierr = ISRestoreIndices(iscol,&is_idx);CHKERRQ(ierr);
3030     //ierr = VecView(x,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);
3031 
3032     /* (2) scatter x using aij->Mvctx to get off-process portion of x (see MatMult_MPIAIJ) */
3033     Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3034     Mat            B=a->B;
3035     Vec            lvec = a->lvec;
3036     PetscInt       Bn=B->cmap->N;
3037     PetscInt       *garray = a->garray;
3038 
3039     ierr = VecScatterBegin(a->Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
3040     ierr = VecScatterEnd(a->Mvctx,x,lvec,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
3041 
3042     //printf("[%d] n(lvec) %d, Bn %d\n",rank,a->lvec->map->n,Bn);
3043     if (lvec->map->n != Bn) SETERRQ2(PETSC_COMM_SELF,0,"n(lvec) %d != Bn %d",lvec->map->n,Bn);
3044     if (rank == -1) {
3045       printf("[%d] lvec:\n",rank);
3046       ierr = VecView(lvec,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
3047     }
3048 
3049     /* create scalable iscol_sub (a subset of iscol) */
3050     PetscInt *idx,*cmap1,count;
3051 
3052     ierr = PetscMalloc2(n+Bn,&idx,n+Bn,&cmap1);CHKERRQ(ierr);
3053     //ierr = ISGetIndices(iscol,&is_idx);CHKERRQ(ierr);
3054     count = 0;
3055 
3056     /* A part */
3057     ierr = ISGetIndices(iscol,&is_idx);CHKERRQ(ierr);
3058     j = cstart;
3059     for (i=0; i<n; i++) {
3060       if (j >= cend) break;
3061       if (is_idx[i] == j) {
3062         idx[count]  = j;
3063         cmap1[count] = i; /* column index in submat */
3064         count++; j++;
3065       } else if (is_idx[i] > j) {
3066         while (is_idx[i] > j && j < cend-1) j++;
3067         if (is_idx[i] == j) {
3068           idx[count]  = j;
3069           cmap1[count] = i; /* column index in submat */
3070           count++; j++;
3071         }
3072       }
3073     }
3074     ierr = ISRestoreIndices(iscol,&is_idx);CHKERRQ(ierr);
3075 
3076     /* B part */
3077     ierr = VecGetArray(lvec,&xarray);CHKERRQ(ierr);
3078     j = 0;
3079     for (i=0; i<Bn; i++) {
3080       if (j >= Bn) break;
3081       if ((PetscInt)xarray[i] == garray[j]) {
3082         idx[count]  = garray[j];
3083         cmap1[count] = i;  /* column index in submat */
3084         count++; j++;
3085       } else if ((PetscInt)xarray[i] > garray[j]) {
3086         while ((PetscInt)xarray[i] > garray[j] && j < Bn-1) j++;
3087         if ((PetscInt)xarray[i] == garray[j]) {
3088           idx[count]  = garray[j];
3089           cmap1[count] = i; /* column index in submat */
3090           count++; j++;
3091         }
3092       }
3093     }
3094     ierr = VecRestoreArray(lvec,&xarray);CHKERRQ(ierr);
3095     printf("[%d] n %d + Bn %d = %d; count %d, isN %d, matN %d\n",rank,n,Bn,n+Bn,count,N,mat->cmap->N);
3096 
3097     IS iscol_sub,iscmap;
3098     ierr = PetscSortInt(count,idx);CHKERRQ(ierr);
3099     ierr = PetscSortInt(count,cmap1);CHKERRQ(ierr);
3100 
3101     ierr = ISCreateGeneral(PETSC_COMM_SELF,count,idx,PETSC_COPY_VALUES,&iscol_sub);CHKERRQ(ierr);
3102     ierr = ISCreateGeneral(PETSC_COMM_SELF,count,cmap1,PETSC_COPY_VALUES,&iscmap);CHKERRQ(ierr);
3103 
3104     if (rank == -1) {
3105       printf("[%d] iscmap:\n",rank);
3106       ierr = ISView(iscmap,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
3107     }
3108 
3109     ierr = ISDestroy(&iscol_sub);CHKERRQ(ierr);
3110     ierr = ISDestroy(&iscmap);CHKERRQ(ierr);
3111     ierr = PetscFree2(idx,cmap1);CHKERRQ(ierr);
3112     ierr = VecDestroy(&x);CHKERRQ(ierr);
3113   }
3114 
3115   //-----------------------------------------------
3116 
3117   /* Get nonscalable iscol_local -- will replace it with iscol_sub! */
3118   if (call == MAT_REUSE_MATRIX) {
3119     ierr = PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);CHKERRQ(ierr);
3120     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3121   } else {
3122     ierr = ISGetSeqIS_Private(mat,iscol,&iscol_local);CHKERRQ(ierr);
3123     PetscBool      sorted;
3124     ierr = ISSorted(iscol_local,&sorted);CHKERRQ(ierr);
3125     if (!sorted) printf("[%d] iscol_local !sorted\n",rank);
3126   }
3127 
3128   ierr = ISGetLocalSize(iscol,&csize);CHKERRQ(ierr);
3129   ierr = MatCreateSubMatrix_MPIAIJ_Private_SameDist(mat,isrow,iscol_local,csize,call,newmat);CHKERRQ(ierr);
3130 
3131   if (call == MAT_INITIAL_MATRIX) {
3132     ierr = PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);CHKERRQ(ierr);
3133     ierr = ISDestroy(&iscol_local);CHKERRQ(ierr);
3134   }
3135   PetscFunctionReturn(0);
3136 }
3137 
3138 PetscErrorCode MatCreateSubMatrix_MPIAIJ(Mat mat,IS isrow,IS iscol,MatReuse call,Mat *newmat)
3139 {
3140   PetscErrorCode ierr;
3141   IS             iscol_local;
3142   PetscInt       csize;
3143   PetscInt       n,i,j,rstart,rend;
3144   PetscBool      sameDist=PETSC_FALSE,tsameDist;
3145   MPI_Comm       comm;
3146 
3147   PetscFunctionBegin;
3148   /* If isrow has same processor distribution as mat, then use a scalable routine */
3149   if (call == MAT_REUSE_MATRIX) {
3150     ierr = PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_local);CHKERRQ(ierr);
3151     if (iscol_local) tsameDist = PETSC_TRUE;
3152   } else {
3153     ierr = ISGetLocalSize(isrow,&n);CHKERRQ(ierr);
3154     if (!n) {
3155       sameDist = PETSC_TRUE;
3156     } else {
3157       ierr = ISGetMinMax(isrow,&i,&j);CHKERRQ(ierr);
3158       ierr = MatGetOwnershipRange(mat,&rstart,&rend);CHKERRQ(ierr);
3159       if (i >= rstart && j < rend) sameDist = PETSC_TRUE;
3160     }
3161     ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
3162     ierr = MPIU_Allreduce(&sameDist,&tsameDist,1,MPIU_BOOL,MPI_LAND,comm);CHKERRQ(ierr);
3163   }
3164 
3165   if (tsameDist) {
3166     ierr = MatCreateSubMatrix_MPIAIJ_SameDist(mat,isrow,iscol,call,newmat);CHKERRQ(ierr);
3167     PetscFunctionReturn(0);
3168   }
3169 
3170   /* General case                            */
3171   /* --------------------------------------- */
3172   if (call == MAT_REUSE_MATRIX) {
3173     ierr = PetscObjectQuery((PetscObject)*newmat,"ISAllGather",(PetscObject*)&iscol_local);CHKERRQ(ierr);
3174     if (!iscol_local) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3175   } else {
3176     ierr = ISGetSeqIS_Private(mat,iscol,&iscol_local);CHKERRQ(ierr);
3177   }
3178 
3179   ierr = ISGetLocalSize(iscol,&csize);CHKERRQ(ierr);
3180   ierr = MatCreateSubMatrix_MPIAIJ_Private(mat,isrow,iscol_local,csize,call,newmat);CHKERRQ(ierr);
3181 
3182   if (call == MAT_INITIAL_MATRIX) {
3183     ierr = PetscObjectCompose((PetscObject)*newmat,"ISAllGather",(PetscObject)iscol_local);CHKERRQ(ierr);
3184     ierr = ISDestroy(&iscol_local);CHKERRQ(ierr);
3185   }
3186   PetscFunctionReturn(0);
3187 }
3188 
3189 extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,Mat*);
3190 extern PetscErrorCode MatCreateSubMatrices_MPIAIJ_SingleIS_Local(Mat,PetscInt,const IS[],const IS[],MatReuse,PetscBool,Mat*);
3191 
3192 PetscErrorCode MatCreateSubMatrix_MPIAIJ_Private_SameDist(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3193 {
3194   PetscErrorCode ierr;
3195   PetscInt       i,m,n,rstart,row,rend,nz,j,bs,cbs;
3196   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3197   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)mat->data;
3198   Mat            M,Msub,B=a->B;
3199   MatScalar      *aa;
3200   Mat_SeqAIJ     *aij;
3201   PetscInt       *garray = a->garray,*colsub;
3202   PetscInt       count,Bn=B->cmap->N,cstart=mat->cmap->rstart,cend=mat->cmap->rend;
3203   IS             iscol_sub,iscmap;
3204   const PetscInt *is_idx,*cmap;
3205 
3206   PetscFunctionBegin;
3207   MPI_Comm       comm;
3208   PetscMPIInt    rank;
3209   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
3210   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3211   //printf("SubMatrix_MPIAIJ_Private_SameDist ...\n");
3212   ierr = ISGetLocalSize(iscol,&n);CHKERRQ(ierr); /* iscol should be sequential */
3213 
3214   if (call == MAT_INITIAL_MATRIX) {
3215     /* create scalable iscol_sub (a subset of iscol) */
3216     PetscInt *idx,*cmap1;
3217 
3218     PetscBool      sorted;
3219     ierr = ISSorted(iscol,&sorted);CHKERRQ(ierr);
3220     if (!sorted) printf("iscol_local !sorted\n");
3221 
3222     ierr = PetscMalloc2(n,&idx,n,&cmap1);CHKERRQ(ierr);
3223     ierr = ISGetIndices(iscol,&is_idx);CHKERRQ(ierr);
3224     count = 0;
3225 
3226     /* A part */
3227     j = cstart;
3228     for (i=0; i<n; i++) {
3229       if (j >= cend) break;
3230       if (is_idx[i] == j) {
3231         idx[count]  = j;
3232         cmap1[count] = i; /* column index in submat */
3233         count++; j++;
3234       } else if (is_idx[i] > j) {
3235         while (is_idx[i] > j && j < cend-1) j++;
3236         if (is_idx[i] == j) {
3237           idx[count]  = j;
3238           cmap1[count] = i; /* column index in submat */
3239           count++; j++;
3240         }
3241       }
3242     }
3243 
3244     /* B part */
3245     j = 0;
3246     for (i=0; i<n; i++) {
3247       if (j >= Bn) break;
3248       if (is_idx[i] == garray[j]) {
3249         idx[count]  = garray[j];
3250         cmap1[count] = i;  /* column index in submat */
3251         count++; j++;
3252       } else if (is_idx[i] > garray[j]) {
3253         while (is_idx[i] > garray[j] && j < Bn-1) j++;
3254         if (is_idx[i] == garray[j]) {
3255           idx[count]  = garray[j];
3256           cmap1[count] = i; /* column index in submat */
3257           count++; j++;
3258         }
3259       }
3260     }
3261     ierr = PetscSortInt(count,cmap1);CHKERRQ(ierr);
3262     ierr = ISRestoreIndices(iscol,&is_idx);CHKERRQ(ierr);
3263 
3264     ierr = ISCreateGeneral(PetscObjectComm((PetscObject)iscol),count,idx,PETSC_COPY_VALUES,&iscol_sub);CHKERRQ(ierr);
3265     ierr = ISSort(iscol_sub);CHKERRQ(ierr);
3266 
3267     ierr = ISCreateGeneral(PetscObjectComm((PetscObject)iscol),count,cmap1,PETSC_COPY_VALUES,&iscmap);CHKERRQ(ierr);
3268     ierr = PetscFree2(idx,cmap1);CHKERRQ(ierr);
3269 
3270     printf("[%d] old count %d, isN %d, matN %d\n",rank,count,n,mat->cmap->N);
3271     if (rank == -1) {
3272       printf("[%d] old iscmap:\n",rank);
3273       ierr = ISView(iscmap,PETSC_VIEWER_STDOUT_SELF);CHKERRQ(ierr);
3274     }
3275 
3276     ierr = MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_INITIAL_MATRIX,PETSC_FALSE,&Msub);CHKERRQ(ierr);
3277 
3278   } else { /* call ==  MAT_REUSE_MATRIX */
3279     ierr = PetscObjectQuery((PetscObject)*newmat,"SubIScol",(PetscObject*)&iscol_sub);CHKERRQ(ierr);
3280     if (!iscol_sub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"SubIScol passed in was not used before, cannot reuse");
3281     ierr = ISGetLocalSize(iscol_sub,&count);CHKERRQ(ierr);
3282 
3283     ierr = PetscObjectQuery((PetscObject)*newmat,"Subcmap",(PetscObject*)&iscmap);CHKERRQ(ierr);
3284     if (!iscmap) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Subcmap passed in was not used before, cannot reuse");
3285 
3286     ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Msub);CHKERRQ(ierr);
3287     if (!Msub) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3288 
3289     ierr = MatCreateSubMatrices_MPIAIJ_SingleIS_Local(mat,1,&isrow,&iscol_sub,MAT_REUSE_MATRIX,PETSC_FALSE,&Msub);CHKERRQ(ierr);
3290   }
3291 
3292   aij = (Mat_SeqAIJ*)(Msub)->data;
3293   ii  = aij->i;
3294   ierr = ISGetIndices(iscmap,&cmap);CHKERRQ(ierr);
3295 
3296   /*
3297       m - number of local rows
3298       n - number of columns (same on all processors)
3299       rstart - first row in new global matrix generated
3300   */
3301   ierr = MatGetSize(Msub,&m,NULL);CHKERRQ(ierr);
3302 
3303   if (call == MAT_INITIAL_MATRIX) {
3304     MPI_Comm       comm;
3305     PetscMPIInt    rank,size;
3306 
3307     ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
3308     ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3309     ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3310 
3311     /*
3312         Determine the number of non-zeros in the diagonal and off-diagonal
3313         portions of the matrix in order to do correct preallocation
3314     */
3315 
3316     /* first get start and end of "diagonal" columns */
3317     if (csize == PETSC_DECIDE) {
3318       ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr);
3319       if (mglobal == n) { /* square matrix */
3320         nlocal = m;
3321       } else {
3322         nlocal = n/size + ((n % size) > rank);
3323       }
3324     } else {
3325       nlocal = csize;
3326     }
3327     ierr   = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3328     rstart = rend - nlocal;
3329     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3330 
3331     /* next, compute all the lengths */
3332     jj    = aij->j;
3333     ierr  = PetscMalloc1(2*m+1,&dlens);CHKERRQ(ierr);
3334     olens = dlens + m;
3335     for (i=0; i<m; i++) {
3336       jend = ii[i+1] - ii[i];
3337       olen = 0;
3338       dlen = 0;
3339       for (j=0; j<jend; j++) {
3340         if (cmap[*jj] < rstart || cmap[*jj] >= rend) olen++;
3341         else dlen++;
3342         jj++;
3343       }
3344       olens[i] = olen;
3345       dlens[i] = dlen;
3346     }
3347     ierr = MatGetBlockSizes(Msub,&bs,&cbs);CHKERRQ(ierr);
3348 
3349     ierr = MatCreate(comm,&M);CHKERRQ(ierr);
3350     ierr = MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);CHKERRQ(ierr);
3351     ierr = MatSetBlockSizes(M,bs,cbs);CHKERRQ(ierr);
3352     ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr);
3353     ierr = MatMPIAIJSetPreallocation(M,0,dlens,0,olens);CHKERRQ(ierr);
3354     ierr = PetscFree(dlens);CHKERRQ(ierr);
3355   } else {
3356     M    = *newmat;
3357     ierr = MatGetLocalSize(M,&i,NULL);CHKERRQ(ierr);
3358     if (i != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3359     ierr = MatZeroEntries(M);CHKERRQ(ierr);
3360     /*
3361          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3362        rather than the slower MatSetValues().
3363     */
3364     M->was_assembled = PETSC_TRUE;
3365     M->assembled     = PETSC_FALSE;
3366   }
3367 
3368   /* set values of Msub to *newmat */
3369   ierr = PetscMalloc1(count,&colsub);CHKERRQ(ierr);
3370   ierr = MatGetOwnershipRange(M,&rstart,NULL);CHKERRQ(ierr);
3371 
3372   jj   = aij->j;
3373   aa   = aij->a;
3374   for (i=0; i<m; i++) {
3375     row = rstart + i;
3376     nz  = ii[i+1] - ii[i];
3377     for (j=0; j<nz; j++) colsub[j] = cmap[jj[j]];
3378     ierr  = MatSetValues_MPIAIJ(M,1,&row,nz,colsub,aa,INSERT_VALUES);CHKERRQ(ierr);
3379     jj += nz; aa += nz;
3380   }
3381   ierr = ISRestoreIndices(iscmap,&cmap);CHKERRQ(ierr);
3382 
3383   ierr    = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3384   ierr    = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3385   *newmat = M;
3386 
3387   ierr = PetscFree(colsub);CHKERRQ(ierr);
3388 
3389   /* save Msub, iscol_sub and iscmap used in processor for next request */
3390   if (call ==  MAT_INITIAL_MATRIX) {
3391     ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Msub);CHKERRQ(ierr);
3392     ierr = MatDestroy(&Msub);CHKERRQ(ierr);
3393 
3394     ierr = PetscObjectCompose((PetscObject)M,"SubIScol",(PetscObject)iscol_sub);CHKERRQ(ierr);
3395     ierr = ISDestroy(&iscol_sub);CHKERRQ(ierr);
3396 
3397     ierr = PetscObjectCompose((PetscObject)M,"Subcmap",(PetscObject)iscmap);CHKERRQ(ierr);
3398     ierr = ISDestroy(&iscmap);CHKERRQ(ierr);
3399   }
3400   PetscFunctionReturn(0);
3401 }
3402 
3403 /*
3404     Not great since it makes two copies of the submatrix, first an SeqAIJ
3405   in local and then by concatenating the local matrices the end result.
3406   Writing it directly would be much like MatCreateSubMatrices_MPIAIJ()
3407 
3408   Note: This requires a sequential iscol with all indices.
3409 */
3410 PetscErrorCode MatCreateSubMatrix_MPIAIJ_Private(Mat mat,IS isrow,IS iscol,PetscInt csize,MatReuse call,Mat *newmat)
3411 {
3412   PetscErrorCode ierr;
3413   PetscMPIInt    rank,size;
3414   PetscInt       i,m,n,rstart,row,rend,nz,*cwork,j,bs,cbs;
3415   PetscInt       *ii,*jj,nlocal,*dlens,*olens,dlen,olen,jend,mglobal;
3416   Mat            M,Mreuse;
3417   MatScalar      *aa,*vwork;
3418   MPI_Comm       comm;
3419   Mat_SeqAIJ     *aij;
3420   //PetscBool      sameDist=PETSC_FALSE,tsameDist;
3421 
3422   PetscFunctionBegin;
3423   ierr = PetscObjectGetComm((PetscObject)mat,&comm);CHKERRQ(ierr);
3424   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
3425   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
3426 #if 0
3427   /* If isrow has same processor distribution as mat, then use a scalable routine */
3428   ierr = ISGetLocalSize(isrow,&n);CHKERRQ(ierr);
3429   if (!n) {
3430     sameDist=PETSC_TRUE;
3431   } else {
3432     ierr = ISGetMinMax(isrow,&i,&j);CHKERRQ(ierr);
3433     ierr = MatGetOwnershipRange(mat,&rstart,&rend);CHKERRQ(ierr);
3434     if (i >= rstart && j < rend) sameDist=PETSC_TRUE;
3435   }
3436   ierr = MPIU_Allreduce(&sameDist,&tsameDist,1,MPIU_BOOL,MPI_LAND,comm);CHKERRQ(ierr);
3437   if (tsameDist) {
3438     ierr = MatCreateSubMatrix_MPIAIJ_Private_SameDist(mat,isrow,iscol,csize,call,newmat);CHKERRQ(ierr);
3439     PetscFunctionReturn(0);
3440   }
3441 #endif
3442 
3443   if (call ==  MAT_REUSE_MATRIX) {
3444     ierr = PetscObjectQuery((PetscObject)*newmat,"SubMatrix",(PetscObject*)&Mreuse);CHKERRQ(ierr);
3445     if (!Mreuse) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Submatrix passed in was not used before, cannot reuse");
3446     ierr = MatCreateSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_REUSE_MATRIX,&Mreuse);CHKERRQ(ierr);
3447   } else {
3448     ierr = MatCreateSubMatrices_MPIAIJ_Local(mat,1,&isrow,&iscol,MAT_INITIAL_MATRIX,&Mreuse);CHKERRQ(ierr);
3449   }
3450 
3451   /*
3452       m - number of local rows
3453       n - number of columns (same on all processors)
3454       rstart - first row in new global matrix generated
3455   */
3456   ierr = MatGetSize(Mreuse,&m,&n);CHKERRQ(ierr);
3457   ierr = MatGetBlockSizes(Mreuse,&bs,&cbs);CHKERRQ(ierr);
3458   if (call == MAT_INITIAL_MATRIX) {
3459     aij = (Mat_SeqAIJ*)(Mreuse)->data;
3460     ii  = aij->i;
3461     jj  = aij->j;
3462 
3463     /*
3464         Determine the number of non-zeros in the diagonal and off-diagonal
3465         portions of the matrix in order to do correct preallocation
3466     */
3467 
3468     /* first get start and end of "diagonal" columns */
3469     if (csize == PETSC_DECIDE) {
3470       ierr = ISGetSize(isrow,&mglobal);CHKERRQ(ierr);
3471       if (mglobal == n) { /* square matrix */
3472         nlocal = m;
3473       } else {
3474         nlocal = n/size + ((n % size) > rank);
3475       }
3476     } else {
3477       nlocal = csize;
3478     }
3479     ierr   = MPI_Scan(&nlocal,&rend,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
3480     rstart = rend - nlocal;
3481     if (rank == size - 1 && rend != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Local column sizes %D do not add up to total number of columns %D",rend,n);
3482 
3483     /* next, compute all the lengths */
3484     ierr  = PetscMalloc1(2*m+1,&dlens);CHKERRQ(ierr);
3485     olens = dlens + m;
3486     for (i=0; i<m; i++) {
3487       jend = ii[i+1] - ii[i];
3488       olen = 0;
3489       dlen = 0;
3490       for (j=0; j<jend; j++) {
3491         if (*jj < rstart || *jj >= rend) olen++;
3492         else dlen++;
3493         jj++;
3494       }
3495       olens[i] = olen;
3496       dlens[i] = dlen;
3497     }
3498     ierr = MatCreate(comm,&M);CHKERRQ(ierr);
3499     ierr = MatSetSizes(M,m,nlocal,PETSC_DECIDE,n);CHKERRQ(ierr);
3500     ierr = MatSetBlockSizes(M,bs,cbs);CHKERRQ(ierr);
3501     ierr = MatSetType(M,((PetscObject)mat)->type_name);CHKERRQ(ierr);
3502     ierr = MatMPIAIJSetPreallocation(M,0,dlens,0,olens);CHKERRQ(ierr);
3503     ierr = PetscFree(dlens);CHKERRQ(ierr);
3504   } else {
3505     PetscInt ml,nl;
3506 
3507     M    = *newmat;
3508     ierr = MatGetLocalSize(M,&ml,&nl);CHKERRQ(ierr);
3509     if (ml != m) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Previous matrix must be same size/layout as request");
3510     ierr = MatZeroEntries(M);CHKERRQ(ierr);
3511     /*
3512          The next two lines are needed so we may call MatSetValues_MPIAIJ() below directly,
3513        rather than the slower MatSetValues().
3514     */
3515     M->was_assembled = PETSC_TRUE;
3516     M->assembled     = PETSC_FALSE;
3517   }
3518   ierr = MatGetOwnershipRange(M,&rstart,&rend);CHKERRQ(ierr);
3519   aij  = (Mat_SeqAIJ*)(Mreuse)->data;
3520   ii   = aij->i;
3521   jj   = aij->j;
3522   aa   = aij->a;
3523   for (i=0; i<m; i++) {
3524     row   = rstart + i;
3525     nz    = ii[i+1] - ii[i];
3526     cwork = jj;     jj += nz;
3527     vwork = aa;     aa += nz;
3528     ierr  = MatSetValues_MPIAIJ(M,1,&row,nz,cwork,vwork,INSERT_VALUES);CHKERRQ(ierr);
3529   }
3530 
3531   ierr    = MatAssemblyBegin(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3532   ierr    = MatAssemblyEnd(M,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3533   *newmat = M;
3534 
3535   /* save submatrix used in processor for next request */
3536   if (call ==  MAT_INITIAL_MATRIX) {
3537     ierr = PetscObjectCompose((PetscObject)M,"SubMatrix",(PetscObject)Mreuse);CHKERRQ(ierr);
3538     ierr = MatDestroy(&Mreuse);CHKERRQ(ierr);
3539   }
3540   PetscFunctionReturn(0);
3541 }
3542 
3543 PetscErrorCode  MatMPIAIJSetPreallocationCSR_MPIAIJ(Mat B,const PetscInt Ii[],const PetscInt J[],const PetscScalar v[])
3544 {
3545   PetscInt       m,cstart, cend,j,nnz,i,d;
3546   PetscInt       *d_nnz,*o_nnz,nnz_max = 0,rstart,ii;
3547   const PetscInt *JJ;
3548   PetscScalar    *values;
3549   PetscErrorCode ierr;
3550   PetscBool      nooffprocentries;
3551 
3552   PetscFunctionBegin;
3553   if (Ii[0]) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Ii[0] must be 0 it is %D",Ii[0]);
3554 
3555   ierr   = PetscLayoutSetUp(B->rmap);CHKERRQ(ierr);
3556   ierr   = PetscLayoutSetUp(B->cmap);CHKERRQ(ierr);
3557   m      = B->rmap->n;
3558   cstart = B->cmap->rstart;
3559   cend   = B->cmap->rend;
3560   rstart = B->rmap->rstart;
3561 
3562   ierr = PetscMalloc2(m,&d_nnz,m,&o_nnz);CHKERRQ(ierr);
3563 
3564 #if defined(PETSC_USE_DEBUGGING)
3565   for (i=0; i<m; i++) {
3566     nnz = Ii[i+1]- Ii[i];
3567     JJ  = J + Ii[i];
3568     if (nnz < 0) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Local row %D has a negative %D number of columns",i,nnz);
3569     if (nnz && (JJ[0] < 0)) SETERRRQ1(PETSC_ERR_ARG_WRONGSTATE,"Row %D starts with negative column index",i,j);
3570     if (nnz && (JJ[nnz-1] >= B->cmap->N) SETERRRQ3(PETSC_ERR_ARG_WRONGSTATE,"Row %D ends with too large a column index %D (max allowed %D)",i,JJ[nnz-1],B->cmap->N);
3571   }
3572 #endif
3573 
3574   for (i=0; i<m; i++) {
3575     nnz     = Ii[i+1]- Ii[i];
3576     JJ      = J + Ii[i];
3577     nnz_max = PetscMax(nnz_max,nnz);
3578     d       = 0;
3579     for (j=0; j<nnz; j++) {
3580       if (cstart <= JJ[j] && JJ[j] < cend) d++;
3581     }
3582     d_nnz[i] = d;
3583     o_nnz[i] = nnz - d;
3584   }
3585   ierr = MatMPIAIJSetPreallocation(B,0,d_nnz,0,o_nnz);CHKERRQ(ierr);
3586   ierr = PetscFree2(d_nnz,o_nnz);CHKERRQ(ierr);
3587 
3588   if (v) values = (PetscScalar*)v;
3589   else {
3590     ierr = PetscCalloc1(nnz_max+1,&values);CHKERRQ(ierr);
3591   }
3592 
3593   for (i=0; i<m; i++) {
3594     ii   = i + rstart;
3595     nnz  = Ii[i+1]- Ii[i];
3596     ierr = MatSetValues_MPIAIJ(B,1,&ii,nnz,J+Ii[i],values+(v ? Ii[i] : 0),INSERT_VALUES);CHKERRQ(ierr);
3597   }
3598   nooffprocentries    = B->nooffprocentries;
3599   B->nooffprocentries = PETSC_TRUE;
3600   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3601   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
3602   B->nooffprocentries = nooffprocentries;
3603 
3604   if (!v) {
3605     ierr = PetscFree(values);CHKERRQ(ierr);
3606   }
3607   ierr = MatSetOption(B,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
3608   PetscFunctionReturn(0);
3609 }
3610 
3611 /*@
3612    MatMPIAIJSetPreallocationCSR - Allocates memory for a sparse parallel matrix in AIJ format
3613    (the default parallel PETSc format).
3614 
3615    Collective on MPI_Comm
3616 
3617    Input Parameters:
3618 +  B - the matrix
3619 .  i - the indices into j for the start of each local row (starts with zero)
3620 .  j - the column indices for each local row (starts with zero)
3621 -  v - optional values in the matrix
3622 
3623    Level: developer
3624 
3625    Notes:
3626        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3627      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3628      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3629 
3630        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3631 
3632        The format which is used for the sparse matrix input, is equivalent to a
3633     row-major ordering.. i.e for the following matrix, the input data expected is
3634     as shown
3635 
3636 $        1 0 0
3637 $        2 0 3     P0
3638 $       -------
3639 $        4 5 6     P1
3640 $
3641 $     Process0 [P0]: rows_owned=[0,1]
3642 $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3643 $        j =  {0,0,2}  [size = 3]
3644 $        v =  {1,2,3}  [size = 3]
3645 $
3646 $     Process1 [P1]: rows_owned=[2]
3647 $        i =  {0,3}    [size = nrow+1  = 1+1]
3648 $        j =  {0,1,2}  [size = 3]
3649 $        v =  {4,5,6}  [size = 3]
3650 
3651 .keywords: matrix, aij, compressed row, sparse, parallel
3652 
3653 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatCreateAIJ(), MATMPIAIJ,
3654           MatCreateSeqAIJWithArrays(), MatCreateMPIAIJWithSplitArrays()
3655 @*/
3656 PetscErrorCode  MatMPIAIJSetPreallocationCSR(Mat B,const PetscInt i[],const PetscInt j[], const PetscScalar v[])
3657 {
3658   PetscErrorCode ierr;
3659 
3660   PetscFunctionBegin;
3661   ierr = PetscTryMethod(B,"MatMPIAIJSetPreallocationCSR_C",(Mat,const PetscInt[],const PetscInt[],const PetscScalar[]),(B,i,j,v));CHKERRQ(ierr);
3662   PetscFunctionReturn(0);
3663 }
3664 
3665 /*@C
3666    MatMPIAIJSetPreallocation - Preallocates memory for a sparse parallel matrix in AIJ format
3667    (the default parallel PETSc format).  For good matrix assembly performance
3668    the user should preallocate the matrix storage by setting the parameters
3669    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3670    performance can be increased by more than a factor of 50.
3671 
3672    Collective on MPI_Comm
3673 
3674    Input Parameters:
3675 +  B - the matrix
3676 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3677            (same value is used for all local rows)
3678 .  d_nnz - array containing the number of nonzeros in the various rows of the
3679            DIAGONAL portion of the local submatrix (possibly different for each row)
3680            or NULL (PETSC_NULL_INTEGER in Fortran), if d_nz is used to specify the nonzero structure.
3681            The size of this array is equal to the number of local rows, i.e 'm'.
3682            For matrices that will be factored, you must leave room for (and set)
3683            the diagonal entry even if it is zero.
3684 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3685            submatrix (same value is used for all local rows).
3686 -  o_nnz - array containing the number of nonzeros in the various rows of the
3687            OFF-DIAGONAL portion of the local submatrix (possibly different for
3688            each row) or NULL (PETSC_NULL_INTEGER in Fortran), if o_nz is used to specify the nonzero
3689            structure. The size of this array is equal to the number
3690            of local rows, i.e 'm'.
3691 
3692    If the *_nnz parameter is given then the *_nz parameter is ignored
3693 
3694    The AIJ format (also called the Yale sparse matrix format or
3695    compressed row storage (CSR)), is fully compatible with standard Fortran 77
3696    storage.  The stored row and column indices begin with zero.
3697    See Users-Manual: ch_mat for details.
3698 
3699    The parallel matrix is partitioned such that the first m0 rows belong to
3700    process 0, the next m1 rows belong to process 1, the next m2 rows belong
3701    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
3702 
3703    The DIAGONAL portion of the local submatrix of a processor can be defined
3704    as the submatrix which is obtained by extraction the part corresponding to
3705    the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
3706    first row that belongs to the processor, r2 is the last row belonging to
3707    the this processor, and c1-c2 is range of indices of the local part of a
3708    vector suitable for applying the matrix to.  This is an mxn matrix.  In the
3709    common case of a square matrix, the row and column ranges are the same and
3710    the DIAGONAL part is also square. The remaining portion of the local
3711    submatrix (mxN) constitute the OFF-DIAGONAL portion.
3712 
3713    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3714 
3715    You can call MatGetInfo() to get information on how effective the preallocation was;
3716    for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
3717    You can also run with the option -info and look for messages with the string
3718    malloc in them to see if additional memory allocation was needed.
3719 
3720    Example usage:
3721 
3722    Consider the following 8x8 matrix with 34 non-zero values, that is
3723    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3724    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3725    as follows:
3726 
3727 .vb
3728             1  2  0  |  0  3  0  |  0  4
3729     Proc0   0  5  6  |  7  0  0  |  8  0
3730             9  0 10  | 11  0  0  | 12  0
3731     -------------------------------------
3732            13  0 14  | 15 16 17  |  0  0
3733     Proc1   0 18  0  | 19 20 21  |  0  0
3734             0  0  0  | 22 23  0  | 24  0
3735     -------------------------------------
3736     Proc2  25 26 27  |  0  0 28  | 29  0
3737            30  0  0  | 31 32 33  |  0 34
3738 .ve
3739 
3740    This can be represented as a collection of submatrices as:
3741 
3742 .vb
3743       A B C
3744       D E F
3745       G H I
3746 .ve
3747 
3748    Where the submatrices A,B,C are owned by proc0, D,E,F are
3749    owned by proc1, G,H,I are owned by proc2.
3750 
3751    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3752    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
3753    The 'M','N' parameters are 8,8, and have the same values on all procs.
3754 
3755    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
3756    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
3757    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
3758    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
3759    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
3760    matrix, ans [DF] as another SeqAIJ matrix.
3761 
3762    When d_nz, o_nz parameters are specified, d_nz storage elements are
3763    allocated for every row of the local diagonal submatrix, and o_nz
3764    storage locations are allocated for every row of the OFF-DIAGONAL submat.
3765    One way to choose d_nz and o_nz is to use the max nonzerors per local
3766    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
3767    In this case, the values of d_nz,o_nz are:
3768 .vb
3769      proc0 : dnz = 2, o_nz = 2
3770      proc1 : dnz = 3, o_nz = 2
3771      proc2 : dnz = 1, o_nz = 4
3772 .ve
3773    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
3774    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
3775    for proc3. i.e we are using 12+15+10=37 storage locations to store
3776    34 values.
3777 
3778    When d_nnz, o_nnz parameters are specified, the storage is specified
3779    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
3780    In the above case the values for d_nnz,o_nnz are:
3781 .vb
3782      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
3783      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
3784      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
3785 .ve
3786    Here the space allocated is sum of all the above values i.e 34, and
3787    hence pre-allocation is perfect.
3788 
3789    Level: intermediate
3790 
3791 .keywords: matrix, aij, compressed row, sparse, parallel
3792 
3793 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatCreateAIJ(), MatMPIAIJSetPreallocationCSR(),
3794           MATMPIAIJ, MatGetInfo(), PetscSplitOwnership()
3795 @*/
3796 PetscErrorCode  MatMPIAIJSetPreallocation(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])
3797 {
3798   PetscErrorCode ierr;
3799 
3800   PetscFunctionBegin;
3801   PetscValidHeaderSpecific(B,MAT_CLASSID,1);
3802   PetscValidType(B,1);
3803   ierr = PetscTryMethod(B,"MatMPIAIJSetPreallocation_C",(Mat,PetscInt,const PetscInt[],PetscInt,const PetscInt[]),(B,d_nz,d_nnz,o_nz,o_nnz));CHKERRQ(ierr);
3804   PetscFunctionReturn(0);
3805 }
3806 
3807 /*@
3808      MatCreateMPIAIJWithArrays - creates a MPI AIJ matrix using arrays that contain in standard
3809          CSR format the local rows.
3810 
3811    Collective on MPI_Comm
3812 
3813    Input Parameters:
3814 +  comm - MPI communicator
3815 .  m - number of local rows (Cannot be PETSC_DECIDE)
3816 .  n - This value should be the same as the local size used in creating the
3817        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3818        calculated if N is given) For square matrices n is almost always m.
3819 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3820 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3821 .   i - row indices
3822 .   j - column indices
3823 -   a - matrix values
3824 
3825    Output Parameter:
3826 .   mat - the matrix
3827 
3828    Level: intermediate
3829 
3830    Notes:
3831        The i, j, and a arrays ARE copied by this routine into the internal format used by PETSc;
3832      thus you CANNOT change the matrix entries by changing the values of a[] after you have
3833      called this routine. Use MatCreateMPIAIJWithSplitArrays() to avoid needing to copy the arrays.
3834 
3835        The i and j indices are 0 based, and i indices are indices corresponding to the local j array.
3836 
3837        The format which is used for the sparse matrix input, is equivalent to a
3838     row-major ordering.. i.e for the following matrix, the input data expected is
3839     as shown
3840 
3841 $        1 0 0
3842 $        2 0 3     P0
3843 $       -------
3844 $        4 5 6     P1
3845 $
3846 $     Process0 [P0]: rows_owned=[0,1]
3847 $        i =  {0,1,3}  [size = nrow+1  = 2+1]
3848 $        j =  {0,0,2}  [size = 3]
3849 $        v =  {1,2,3}  [size = 3]
3850 $
3851 $     Process1 [P1]: rows_owned=[2]
3852 $        i =  {0,3}    [size = nrow+1  = 1+1]
3853 $        j =  {0,1,2}  [size = 3]
3854 $        v =  {4,5,6}  [size = 3]
3855 
3856 .keywords: matrix, aij, compressed row, sparse, parallel
3857 
3858 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
3859           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithSplitArrays()
3860 @*/
3861 PetscErrorCode  MatCreateMPIAIJWithArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,const PetscInt i[],const PetscInt j[],const PetscScalar a[],Mat *mat)
3862 {
3863   PetscErrorCode ierr;
3864 
3865   PetscFunctionBegin;
3866   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
3867   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
3868   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
3869   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
3870   /* ierr = MatSetBlockSizes(M,bs,cbs);CHKERRQ(ierr); */
3871   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
3872   ierr = MatMPIAIJSetPreallocationCSR(*mat,i,j,a);CHKERRQ(ierr);
3873   PetscFunctionReturn(0);
3874 }
3875 
3876 /*@C
3877    MatCreateAIJ - Creates a sparse parallel matrix in AIJ format
3878    (the default parallel PETSc format).  For good matrix assembly performance
3879    the user should preallocate the matrix storage by setting the parameters
3880    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
3881    performance can be increased by more than a factor of 50.
3882 
3883    Collective on MPI_Comm
3884 
3885    Input Parameters:
3886 +  comm - MPI communicator
3887 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
3888            This value should be the same as the local size used in creating the
3889            y vector for the matrix-vector product y = Ax.
3890 .  n - This value should be the same as the local size used in creating the
3891        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
3892        calculated if N is given) For square matrices n is almost always m.
3893 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
3894 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
3895 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
3896            (same value is used for all local rows)
3897 .  d_nnz - array containing the number of nonzeros in the various rows of the
3898            DIAGONAL portion of the local submatrix (possibly different for each row)
3899            or NULL, if d_nz is used to specify the nonzero structure.
3900            The size of this array is equal to the number of local rows, i.e 'm'.
3901 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
3902            submatrix (same value is used for all local rows).
3903 -  o_nnz - array containing the number of nonzeros in the various rows of the
3904            OFF-DIAGONAL portion of the local submatrix (possibly different for
3905            each row) or NULL, if o_nz is used to specify the nonzero
3906            structure. The size of this array is equal to the number
3907            of local rows, i.e 'm'.
3908 
3909    Output Parameter:
3910 .  A - the matrix
3911 
3912    It is recommended that one use the MatCreate(), MatSetType() and/or MatSetFromOptions(),
3913    MatXXXXSetPreallocation() paradgm instead of this routine directly.
3914    [MatXXXXSetPreallocation() is, for example, MatSeqAIJSetPreallocation]
3915 
3916    Notes:
3917    If the *_nnz parameter is given then the *_nz parameter is ignored
3918 
3919    m,n,M,N parameters specify the size of the matrix, and its partitioning across
3920    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
3921    storage requirements for this matrix.
3922 
3923    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one
3924    processor than it must be used on all processors that share the object for
3925    that argument.
3926 
3927    The user MUST specify either the local or global matrix dimensions
3928    (possibly both).
3929 
3930    The parallel matrix is partitioned across processors such that the
3931    first m0 rows belong to process 0, the next m1 rows belong to
3932    process 1, the next m2 rows belong to process 2 etc.. where
3933    m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
3934    values corresponding to [m x N] submatrix.
3935 
3936    The columns are logically partitioned with the n0 columns belonging
3937    to 0th partition, the next n1 columns belonging to the next
3938    partition etc.. where n0,n1,n2... are the input parameter 'n'.
3939 
3940    The DIAGONAL portion of the local submatrix on any given processor
3941    is the submatrix corresponding to the rows and columns m,n
3942    corresponding to the given processor. i.e diagonal matrix on
3943    process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
3944    etc. The remaining portion of the local submatrix [m x (N-n)]
3945    constitute the OFF-DIAGONAL portion. The example below better
3946    illustrates this concept.
3947 
3948    For a square global matrix we define each processor's diagonal portion
3949    to be its local rows and the corresponding columns (a square submatrix);
3950    each processor's off-diagonal portion encompasses the remainder of the
3951    local matrix (a rectangular submatrix).
3952 
3953    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.
3954 
3955    When calling this routine with a single process communicator, a matrix of
3956    type SEQAIJ is returned.  If a matrix of type MATMPIAIJ is desired for this
3957    type of communicator, use the construction mechanism:
3958      MatCreate(...,&A); MatSetType(A,MATMPIAIJ); MatSetSizes(A, m,n,M,N); MatMPIAIJSetPreallocation(A,...);
3959 
3960    By default, this format uses inodes (identical nodes) when possible.
3961    We search for consecutive rows with the same nonzero structure, thereby
3962    reusing matrix information to achieve increased efficiency.
3963 
3964    Options Database Keys:
3965 +  -mat_no_inode  - Do not use inodes
3966 .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
3967 -  -mat_aij_oneindex - Internally use indexing starting at 1
3968         rather than 0.  Note that when calling MatSetValues(),
3969         the user still MUST index entries starting at 0!
3970 
3971 
3972    Example usage:
3973 
3974    Consider the following 8x8 matrix with 34 non-zero values, that is
3975    assembled across 3 processors. Lets assume that proc0 owns 3 rows,
3976    proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
3977    as follows:
3978 
3979 .vb
3980             1  2  0  |  0  3  0  |  0  4
3981     Proc0   0  5  6  |  7  0  0  |  8  0
3982             9  0 10  | 11  0  0  | 12  0
3983     -------------------------------------
3984            13  0 14  | 15 16 17  |  0  0
3985     Proc1   0 18  0  | 19 20 21  |  0  0
3986             0  0  0  | 22 23  0  | 24  0
3987     -------------------------------------
3988     Proc2  25 26 27  |  0  0 28  | 29  0
3989            30  0  0  | 31 32 33  |  0 34
3990 .ve
3991 
3992    This can be represented as a collection of submatrices as:
3993 
3994 .vb
3995       A B C
3996       D E F
3997       G H I
3998 .ve
3999 
4000    Where the submatrices A,B,C are owned by proc0, D,E,F are
4001    owned by proc1, G,H,I are owned by proc2.
4002 
4003    The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4004    The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
4005    The 'M','N' parameters are 8,8, and have the same values on all procs.
4006 
4007    The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
4008    submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
4009    corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
4010    Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
4011    part as SeqAIJ matrices. for eg: proc1 will store [E] as a SeqAIJ
4012    matrix, ans [DF] as another SeqAIJ matrix.
4013 
4014    When d_nz, o_nz parameters are specified, d_nz storage elements are
4015    allocated for every row of the local diagonal submatrix, and o_nz
4016    storage locations are allocated for every row of the OFF-DIAGONAL submat.
4017    One way to choose d_nz and o_nz is to use the max nonzerors per local
4018    rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
4019    In this case, the values of d_nz,o_nz are:
4020 .vb
4021      proc0 : dnz = 2, o_nz = 2
4022      proc1 : dnz = 3, o_nz = 2
4023      proc2 : dnz = 1, o_nz = 4
4024 .ve
4025    We are allocating m*(d_nz+o_nz) storage locations for every proc. This
4026    translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
4027    for proc3. i.e we are using 12+15+10=37 storage locations to store
4028    34 values.
4029 
4030    When d_nnz, o_nnz parameters are specified, the storage is specified
4031    for every row, coresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
4032    In the above case the values for d_nnz,o_nnz are:
4033 .vb
4034      proc0: d_nnz = [2,2,2] and o_nnz = [2,2,2]
4035      proc1: d_nnz = [3,3,2] and o_nnz = [2,1,1]
4036      proc2: d_nnz = [1,1]   and o_nnz = [4,4]
4037 .ve
4038    Here the space allocated is sum of all the above values i.e 34, and
4039    hence pre-allocation is perfect.
4040 
4041    Level: intermediate
4042 
4043 .keywords: matrix, aij, compressed row, sparse, parallel
4044 
4045 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
4046           MATMPIAIJ, MatCreateMPIAIJWithArrays()
4047 @*/
4048 PetscErrorCode  MatCreateAIJ(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
4049 {
4050   PetscErrorCode ierr;
4051   PetscMPIInt    size;
4052 
4053   PetscFunctionBegin;
4054   ierr = MatCreate(comm,A);CHKERRQ(ierr);
4055   ierr = MatSetSizes(*A,m,n,M,N);CHKERRQ(ierr);
4056   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4057   if (size > 1) {
4058     ierr = MatSetType(*A,MATMPIAIJ);CHKERRQ(ierr);
4059     ierr = MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
4060   } else {
4061     ierr = MatSetType(*A,MATSEQAIJ);CHKERRQ(ierr);
4062     ierr = MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);CHKERRQ(ierr);
4063   }
4064   PetscFunctionReturn(0);
4065 }
4066 
4067 PetscErrorCode  MatMPIAIJGetSeqAIJ(Mat A,Mat *Ad,Mat *Ao,const PetscInt *colmap[])
4068 {
4069   Mat_MPIAIJ     *a = (Mat_MPIAIJ*)A->data;
4070   PetscBool      flg;
4071   PetscErrorCode ierr;
4072 
4073   PetscFunctionBegin;
4074   ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&flg);CHKERRQ(ierr);
4075   if (!flg) SETERRQ(PetscObjectComm((PetscObject)A),PETSC_ERR_SUP,"This function requires a MATMPIAIJ matrix as input");
4076   if (Ad)     *Ad     = a->A;
4077   if (Ao)     *Ao     = a->B;
4078   if (colmap) *colmap = a->garray;
4079   PetscFunctionReturn(0);
4080 }
4081 
4082 PetscErrorCode MatCreateMPIMatConcatenateSeqMat_MPIAIJ(MPI_Comm comm,Mat inmat,PetscInt n,MatReuse scall,Mat *outmat)
4083 {
4084   PetscErrorCode ierr;
4085   PetscInt       m,N,i,rstart,nnz,Ii;
4086   PetscInt       *indx;
4087   PetscScalar    *values;
4088 
4089   PetscFunctionBegin;
4090   ierr = MatGetSize(inmat,&m,&N);CHKERRQ(ierr);
4091   if (scall == MAT_INITIAL_MATRIX) { /* symbolic phase */
4092     PetscInt       *dnz,*onz,sum,bs,cbs;
4093 
4094     if (n == PETSC_DECIDE) {
4095       ierr = PetscSplitOwnership(comm,&n,&N);CHKERRQ(ierr);
4096     }
4097     /* Check sum(n) = N */
4098     ierr = MPIU_Allreduce(&n,&sum,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
4099     if (sum != N) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_INCOMP,"Sum of local columns != global columns %d",N);
4100 
4101     ierr    = MPI_Scan(&m, &rstart,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
4102     rstart -= m;
4103 
4104     ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
4105     for (i=0; i<m; i++) {
4106       ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);CHKERRQ(ierr);
4107       ierr = MatPreallocateSet(i+rstart,nnz,indx,dnz,onz);CHKERRQ(ierr);
4108       ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,NULL);CHKERRQ(ierr);
4109     }
4110 
4111     ierr = MatCreate(comm,outmat);CHKERRQ(ierr);
4112     ierr = MatSetSizes(*outmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4113     ierr = MatGetBlockSizes(inmat,&bs,&cbs);CHKERRQ(ierr);
4114     ierr = MatSetBlockSizes(*outmat,bs,cbs);CHKERRQ(ierr);
4115     ierr = MatSetType(*outmat,MATAIJ);CHKERRQ(ierr);
4116     ierr = MatSeqAIJSetPreallocation(*outmat,0,dnz);CHKERRQ(ierr);
4117     ierr = MatMPIAIJSetPreallocation(*outmat,0,dnz,0,onz);CHKERRQ(ierr);
4118     ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
4119   }
4120 
4121   /* numeric phase */
4122   ierr = MatGetOwnershipRange(*outmat,&rstart,NULL);CHKERRQ(ierr);
4123   for (i=0; i<m; i++) {
4124     ierr = MatGetRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
4125     Ii   = i + rstart;
4126     ierr = MatSetValues(*outmat,1,&Ii,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
4127     ierr = MatRestoreRow_SeqAIJ(inmat,i,&nnz,&indx,&values);CHKERRQ(ierr);
4128   }
4129   ierr = MatAssemblyBegin(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4130   ierr = MatAssemblyEnd(*outmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4131   PetscFunctionReturn(0);
4132 }
4133 
4134 PetscErrorCode MatFileSplit(Mat A,char *outfile)
4135 {
4136   PetscErrorCode    ierr;
4137   PetscMPIInt       rank;
4138   PetscInt          m,N,i,rstart,nnz;
4139   size_t            len;
4140   const PetscInt    *indx;
4141   PetscViewer       out;
4142   char              *name;
4143   Mat               B;
4144   const PetscScalar *values;
4145 
4146   PetscFunctionBegin;
4147   ierr = MatGetLocalSize(A,&m,0);CHKERRQ(ierr);
4148   ierr = MatGetSize(A,0,&N);CHKERRQ(ierr);
4149   /* Should this be the type of the diagonal block of A? */
4150   ierr = MatCreate(PETSC_COMM_SELF,&B);CHKERRQ(ierr);
4151   ierr = MatSetSizes(B,m,N,m,N);CHKERRQ(ierr);
4152   ierr = MatSetBlockSizesFromMats(B,A,A);CHKERRQ(ierr);
4153   ierr = MatSetType(B,MATSEQAIJ);CHKERRQ(ierr);
4154   ierr = MatSeqAIJSetPreallocation(B,0,NULL);CHKERRQ(ierr);
4155   ierr = MatGetOwnershipRange(A,&rstart,0);CHKERRQ(ierr);
4156   for (i=0; i<m; i++) {
4157     ierr = MatGetRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
4158     ierr = MatSetValues(B,1,&i,nnz,indx,values,INSERT_VALUES);CHKERRQ(ierr);
4159     ierr = MatRestoreRow(A,i+rstart,&nnz,&indx,&values);CHKERRQ(ierr);
4160   }
4161   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4162   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4163 
4164   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)A),&rank);CHKERRQ(ierr);
4165   ierr = PetscStrlen(outfile,&len);CHKERRQ(ierr);
4166   ierr = PetscMalloc1(len+5,&name);CHKERRQ(ierr);
4167   sprintf(name,"%s.%d",outfile,rank);
4168   ierr = PetscViewerBinaryOpen(PETSC_COMM_SELF,name,FILE_MODE_APPEND,&out);CHKERRQ(ierr);
4169   ierr = PetscFree(name);CHKERRQ(ierr);
4170   ierr = MatView(B,out);CHKERRQ(ierr);
4171   ierr = PetscViewerDestroy(&out);CHKERRQ(ierr);
4172   ierr = MatDestroy(&B);CHKERRQ(ierr);
4173   PetscFunctionReturn(0);
4174 }
4175 
4176 PetscErrorCode  MatDestroy_MPIAIJ_SeqsToMPI(Mat A)
4177 {
4178   PetscErrorCode      ierr;
4179   Mat_Merge_SeqsToMPI *merge;
4180   PetscContainer      container;
4181 
4182   PetscFunctionBegin;
4183   ierr = PetscObjectQuery((PetscObject)A,"MatMergeSeqsToMPI",(PetscObject*)&container);CHKERRQ(ierr);
4184   if (container) {
4185     ierr = PetscContainerGetPointer(container,(void**)&merge);CHKERRQ(ierr);
4186     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
4187     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
4188     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
4189     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
4190     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
4191     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
4192     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
4193     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
4194     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
4195     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
4196     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
4197     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
4198     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
4199     ierr = PetscFree(merge);CHKERRQ(ierr);
4200     ierr = PetscObjectCompose((PetscObject)A,"MatMergeSeqsToMPI",0);CHKERRQ(ierr);
4201   }
4202   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
4203   PetscFunctionReturn(0);
4204 }
4205 
4206 #include <../src/mat/utils/freespace.h>
4207 #include <petscbt.h>
4208 
4209 PetscErrorCode  MatCreateMPIAIJSumSeqAIJNumeric(Mat seqmat,Mat mpimat)
4210 {
4211   PetscErrorCode      ierr;
4212   MPI_Comm            comm;
4213   Mat_SeqAIJ          *a  =(Mat_SeqAIJ*)seqmat->data;
4214   PetscMPIInt         size,rank,taga,*len_s;
4215   PetscInt            N=mpimat->cmap->N,i,j,*owners,*ai=a->i,*aj;
4216   PetscInt            proc,m;
4217   PetscInt            **buf_ri,**buf_rj;
4218   PetscInt            k,anzi,*bj_i,*bi,*bj,arow,bnzi,nextaj;
4219   PetscInt            nrows,**buf_ri_k,**nextrow,**nextai;
4220   MPI_Request         *s_waits,*r_waits;
4221   MPI_Status          *status;
4222   MatScalar           *aa=a->a;
4223   MatScalar           **abuf_r,*ba_i;
4224   Mat_Merge_SeqsToMPI *merge;
4225   PetscContainer      container;
4226 
4227   PetscFunctionBegin;
4228   ierr = PetscObjectGetComm((PetscObject)mpimat,&comm);CHKERRQ(ierr);
4229   ierr = PetscLogEventBegin(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
4230 
4231   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4232   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4233 
4234   ierr = PetscObjectQuery((PetscObject)mpimat,"MatMergeSeqsToMPI",(PetscObject*)&container);CHKERRQ(ierr);
4235   ierr = PetscContainerGetPointer(container,(void**)&merge);CHKERRQ(ierr);
4236 
4237   bi     = merge->bi;
4238   bj     = merge->bj;
4239   buf_ri = merge->buf_ri;
4240   buf_rj = merge->buf_rj;
4241 
4242   ierr   = PetscMalloc1(size,&status);CHKERRQ(ierr);
4243   owners = merge->rowmap->range;
4244   len_s  = merge->len_s;
4245 
4246   /* send and recv matrix values */
4247   /*-----------------------------*/
4248   ierr = PetscObjectGetNewTag((PetscObject)mpimat,&taga);CHKERRQ(ierr);
4249   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
4250 
4251   ierr = PetscMalloc1(merge->nsend+1,&s_waits);CHKERRQ(ierr);
4252   for (proc=0,k=0; proc<size; proc++) {
4253     if (!len_s[proc]) continue;
4254     i    = owners[proc];
4255     ierr = MPI_Isend(aa+ai[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
4256     k++;
4257   }
4258 
4259   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
4260   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
4261   ierr = PetscFree(status);CHKERRQ(ierr);
4262 
4263   ierr = PetscFree(s_waits);CHKERRQ(ierr);
4264   ierr = PetscFree(r_waits);CHKERRQ(ierr);
4265 
4266   /* insert mat values of mpimat */
4267   /*----------------------------*/
4268   ierr = PetscMalloc1(N,&ba_i);CHKERRQ(ierr);
4269   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);CHKERRQ(ierr);
4270 
4271   for (k=0; k<merge->nrecv; k++) {
4272     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4273     nrows       = *(buf_ri_k[k]);
4274     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
4275     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4276   }
4277 
4278   /* set values of ba */
4279   m = merge->rowmap->n;
4280   for (i=0; i<m; i++) {
4281     arow = owners[rank] + i;
4282     bj_i = bj+bi[i];  /* col indices of the i-th row of mpimat */
4283     bnzi = bi[i+1] - bi[i];
4284     ierr = PetscMemzero(ba_i,bnzi*sizeof(PetscScalar));CHKERRQ(ierr);
4285 
4286     /* add local non-zero vals of this proc's seqmat into ba */
4287     anzi   = ai[arow+1] - ai[arow];
4288     aj     = a->j + ai[arow];
4289     aa     = a->a + ai[arow];
4290     nextaj = 0;
4291     for (j=0; nextaj<anzi; j++) {
4292       if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4293         ba_i[j] += aa[nextaj++];
4294       }
4295     }
4296 
4297     /* add received vals into ba */
4298     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4299       /* i-th row */
4300       if (i == *nextrow[k]) {
4301         anzi   = *(nextai[k]+1) - *nextai[k];
4302         aj     = buf_rj[k] + *(nextai[k]);
4303         aa     = abuf_r[k] + *(nextai[k]);
4304         nextaj = 0;
4305         for (j=0; nextaj<anzi; j++) {
4306           if (*(bj_i + j) == aj[nextaj]) { /* bcol == acol */
4307             ba_i[j] += aa[nextaj++];
4308           }
4309         }
4310         nextrow[k]++; nextai[k]++;
4311       }
4312     }
4313     ierr = MatSetValues(mpimat,1,&arow,bnzi,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
4314   }
4315   ierr = MatAssemblyBegin(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4316   ierr = MatAssemblyEnd(mpimat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
4317 
4318   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
4319   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
4320   ierr = PetscFree(ba_i);CHKERRQ(ierr);
4321   ierr = PetscFree3(buf_ri_k,nextrow,nextai);CHKERRQ(ierr);
4322   ierr = PetscLogEventEnd(MAT_Seqstompinum,seqmat,0,0,0);CHKERRQ(ierr);
4323   PetscFunctionReturn(0);
4324 }
4325 
4326 PetscErrorCode  MatCreateMPIAIJSumSeqAIJSymbolic(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,Mat *mpimat)
4327 {
4328   PetscErrorCode      ierr;
4329   Mat                 B_mpi;
4330   Mat_SeqAIJ          *a=(Mat_SeqAIJ*)seqmat->data;
4331   PetscMPIInt         size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
4332   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
4333   PetscInt            M=seqmat->rmap->n,N=seqmat->cmap->n,i,*owners,*ai=a->i,*aj=a->j;
4334   PetscInt            len,proc,*dnz,*onz,bs,cbs;
4335   PetscInt            k,anzi,*bi,*bj,*lnk,nlnk,arow,bnzi,nspacedouble=0;
4336   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextai;
4337   MPI_Request         *si_waits,*sj_waits,*ri_waits,*rj_waits;
4338   MPI_Status          *status;
4339   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
4340   PetscBT             lnkbt;
4341   Mat_Merge_SeqsToMPI *merge;
4342   PetscContainer      container;
4343 
4344   PetscFunctionBegin;
4345   ierr = PetscLogEventBegin(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4346 
4347   /* make sure it is a PETSc comm */
4348   ierr = PetscCommDuplicate(comm,&comm,NULL);CHKERRQ(ierr);
4349   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4350   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4351 
4352   ierr = PetscNew(&merge);CHKERRQ(ierr);
4353   ierr = PetscMalloc1(size,&status);CHKERRQ(ierr);
4354 
4355   /* determine row ownership */
4356   /*---------------------------------------------------------*/
4357   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
4358   ierr = PetscLayoutSetLocalSize(merge->rowmap,m);CHKERRQ(ierr);
4359   ierr = PetscLayoutSetSize(merge->rowmap,M);CHKERRQ(ierr);
4360   ierr = PetscLayoutSetBlockSize(merge->rowmap,1);CHKERRQ(ierr);
4361   ierr = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
4362   ierr = PetscMalloc1(size,&len_si);CHKERRQ(ierr);
4363   ierr = PetscMalloc1(size,&merge->len_s);CHKERRQ(ierr);
4364 
4365   m      = merge->rowmap->n;
4366   owners = merge->rowmap->range;
4367 
4368   /* determine the number of messages to send, their lengths */
4369   /*---------------------------------------------------------*/
4370   len_s = merge->len_s;
4371 
4372   len          = 0; /* length of buf_si[] */
4373   merge->nsend = 0;
4374   for (proc=0; proc<size; proc++) {
4375     len_si[proc] = 0;
4376     if (proc == rank) {
4377       len_s[proc] = 0;
4378     } else {
4379       len_si[proc] = owners[proc+1] - owners[proc] + 1;
4380       len_s[proc]  = ai[owners[proc+1]] - ai[owners[proc]]; /* num of rows to be sent to [proc] */
4381     }
4382     if (len_s[proc]) {
4383       merge->nsend++;
4384       nrows = 0;
4385       for (i=owners[proc]; i<owners[proc+1]; i++) {
4386         if (ai[i+1] > ai[i]) nrows++;
4387       }
4388       len_si[proc] = 2*(nrows+1);
4389       len         += len_si[proc];
4390     }
4391   }
4392 
4393   /* determine the number and length of messages to receive for ij-structure */
4394   /*-------------------------------------------------------------------------*/
4395   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
4396   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
4397 
4398   /* post the Irecv of j-structure */
4399   /*-------------------------------*/
4400   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
4401   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr);
4402 
4403   /* post the Isend of j-structure */
4404   /*--------------------------------*/
4405   ierr = PetscMalloc2(merge->nsend,&si_waits,merge->nsend,&sj_waits);CHKERRQ(ierr);
4406 
4407   for (proc=0, k=0; proc<size; proc++) {
4408     if (!len_s[proc]) continue;
4409     i    = owners[proc];
4410     ierr = MPI_Isend(aj+ai[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr);
4411     k++;
4412   }
4413 
4414   /* receives and sends of j-structure are complete */
4415   /*------------------------------------------------*/
4416   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);}
4417   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);}
4418 
4419   /* send and recv i-structure */
4420   /*---------------------------*/
4421   ierr = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
4422   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr);
4423 
4424   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
4425   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
4426   for (proc=0,k=0; proc<size; proc++) {
4427     if (!len_s[proc]) continue;
4428     /* form outgoing message for i-structure:
4429          buf_si[0]:                 nrows to be sent
4430                [1:nrows]:           row index (global)
4431                [nrows+1:2*nrows+1]: i-structure index
4432     */
4433     /*-------------------------------------------*/
4434     nrows       = len_si[proc]/2 - 1;
4435     buf_si_i    = buf_si + nrows+1;
4436     buf_si[0]   = nrows;
4437     buf_si_i[0] = 0;
4438     nrows       = 0;
4439     for (i=owners[proc]; i<owners[proc+1]; i++) {
4440       anzi = ai[i+1] - ai[i];
4441       if (anzi) {
4442         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
4443         buf_si[nrows+1]   = i-owners[proc]; /* local row index */
4444         nrows++;
4445       }
4446     }
4447     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr);
4448     k++;
4449     buf_si += len_si[proc];
4450   }
4451 
4452   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);}
4453   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);}
4454 
4455   ierr = PetscInfo2(seqmat,"nsend: %D, nrecv: %D\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
4456   for (i=0; i<merge->nrecv; i++) {
4457     ierr = PetscInfo3(seqmat,"recv len_ri=%D, len_rj=%D from [%D]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
4458   }
4459 
4460   ierr = PetscFree(len_si);CHKERRQ(ierr);
4461   ierr = PetscFree(len_ri);CHKERRQ(ierr);
4462   ierr = PetscFree(rj_waits);CHKERRQ(ierr);
4463   ierr = PetscFree2(si_waits,sj_waits);CHKERRQ(ierr);
4464   ierr = PetscFree(ri_waits);CHKERRQ(ierr);
4465   ierr = PetscFree(buf_s);CHKERRQ(ierr);
4466   ierr = PetscFree(status);CHKERRQ(ierr);
4467 
4468   /* compute a local seq matrix in each processor */
4469   /*----------------------------------------------*/
4470   /* allocate bi array and free space for accumulating nonzero column info */
4471   ierr  = PetscMalloc1(m+1,&bi);CHKERRQ(ierr);
4472   bi[0] = 0;
4473 
4474   /* create and initialize a linked list */
4475   nlnk = N+1;
4476   ierr = PetscLLCreate(N,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4477 
4478   /* initial FreeSpace size is 2*(num of local nnz(seqmat)) */
4479   len  = ai[owners[rank+1]] - ai[owners[rank]];
4480   ierr = PetscFreeSpaceGet(PetscIntMultTruncate(2,len)+1,&free_space);CHKERRQ(ierr);
4481 
4482   current_space = free_space;
4483 
4484   /* determine symbolic info for each local row */
4485   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextai);CHKERRQ(ierr);
4486 
4487   for (k=0; k<merge->nrecv; k++) {
4488     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
4489     nrows       = *buf_ri_k[k];
4490     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
4491     nextai[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
4492   }
4493 
4494   ierr = MatPreallocateInitialize(comm,m,n,dnz,onz);CHKERRQ(ierr);
4495   len  = 0;
4496   for (i=0; i<m; i++) {
4497     bnzi = 0;
4498     /* add local non-zero cols of this proc's seqmat into lnk */
4499     arow  = owners[rank] + i;
4500     anzi  = ai[arow+1] - ai[arow];
4501     aj    = a->j + ai[arow];
4502     ierr  = PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4503     bnzi += nlnk;
4504     /* add received col data into lnk */
4505     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
4506       if (i == *nextrow[k]) { /* i-th row */
4507         anzi  = *(nextai[k]+1) - *nextai[k];
4508         aj    = buf_rj[k] + *nextai[k];
4509         ierr  = PetscLLAddSorted(anzi,aj,N,nlnk,lnk,lnkbt);CHKERRQ(ierr);
4510         bnzi += nlnk;
4511         nextrow[k]++; nextai[k]++;
4512       }
4513     }
4514     if (len < bnzi) len = bnzi;  /* =max(bnzi) */
4515 
4516     /* if free space is not available, make more free space */
4517     if (current_space->local_remaining<bnzi) {
4518       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(bnzi,current_space->total_array_size),&current_space);CHKERRQ(ierr);
4519       nspacedouble++;
4520     }
4521     /* copy data into free space, then initialize lnk */
4522     ierr = PetscLLClean(N,N,bnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
4523     ierr = MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);CHKERRQ(ierr);
4524 
4525     current_space->array           += bnzi;
4526     current_space->local_used      += bnzi;
4527     current_space->local_remaining -= bnzi;
4528 
4529     bi[i+1] = bi[i] + bnzi;
4530   }
4531 
4532   ierr = PetscFree3(buf_ri_k,nextrow,nextai);CHKERRQ(ierr);
4533 
4534   ierr = PetscMalloc1(bi[m]+1,&bj);CHKERRQ(ierr);
4535   ierr = PetscFreeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
4536   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
4537 
4538   /* create symbolic parallel matrix B_mpi */
4539   /*---------------------------------------*/
4540   ierr = MatGetBlockSizes(seqmat,&bs,&cbs);CHKERRQ(ierr);
4541   ierr = MatCreate(comm,&B_mpi);CHKERRQ(ierr);
4542   if (n==PETSC_DECIDE) {
4543     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,N);CHKERRQ(ierr);
4544   } else {
4545     ierr = MatSetSizes(B_mpi,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
4546   }
4547   ierr = MatSetBlockSizes(B_mpi,bs,cbs);CHKERRQ(ierr);
4548   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
4549   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
4550   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
4551   ierr = MatSetOption(B_mpi,MAT_NEW_NONZERO_ALLOCATION_ERR,PETSC_FALSE);CHKERRQ(ierr);
4552 
4553   /* B_mpi is not ready for use - assembly will be done by MatCreateMPIAIJSumSeqAIJNumeric() */
4554   B_mpi->assembled    = PETSC_FALSE;
4555   B_mpi->ops->destroy = MatDestroy_MPIAIJ_SeqsToMPI;
4556   merge->bi           = bi;
4557   merge->bj           = bj;
4558   merge->buf_ri       = buf_ri;
4559   merge->buf_rj       = buf_rj;
4560   merge->coi          = NULL;
4561   merge->coj          = NULL;
4562   merge->owners_co    = NULL;
4563 
4564   ierr = PetscCommDestroy(&comm);CHKERRQ(ierr);
4565 
4566   /* attach the supporting struct to B_mpi for reuse */
4567   ierr    = PetscContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
4568   ierr    = PetscContainerSetPointer(container,merge);CHKERRQ(ierr);
4569   ierr    = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
4570   ierr    = PetscContainerDestroy(&container);CHKERRQ(ierr);
4571   *mpimat = B_mpi;
4572 
4573   ierr = PetscLogEventEnd(MAT_Seqstompisym,seqmat,0,0,0);CHKERRQ(ierr);
4574   PetscFunctionReturn(0);
4575 }
4576 
4577 /*@C
4578       MatCreateMPIAIJSumSeqAIJ - Creates a MATMPIAIJ matrix by adding sequential
4579                  matrices from each processor
4580 
4581     Collective on MPI_Comm
4582 
4583    Input Parameters:
4584 +    comm - the communicators the parallel matrix will live on
4585 .    seqmat - the input sequential matrices
4586 .    m - number of local rows (or PETSC_DECIDE)
4587 .    n - number of local columns (or PETSC_DECIDE)
4588 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4589 
4590    Output Parameter:
4591 .    mpimat - the parallel matrix generated
4592 
4593     Level: advanced
4594 
4595    Notes:
4596      The dimensions of the sequential matrix in each processor MUST be the same.
4597      The input seqmat is included into the container "Mat_Merge_SeqsToMPI", and will be
4598      destroyed when mpimat is destroyed. Call PetscObjectQuery() to access seqmat.
4599 @*/
4600 PetscErrorCode  MatCreateMPIAIJSumSeqAIJ(MPI_Comm comm,Mat seqmat,PetscInt m,PetscInt n,MatReuse scall,Mat *mpimat)
4601 {
4602   PetscErrorCode ierr;
4603   PetscMPIInt    size;
4604 
4605   PetscFunctionBegin;
4606   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4607   if (size == 1) {
4608     ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4609     if (scall == MAT_INITIAL_MATRIX) {
4610       ierr = MatDuplicate(seqmat,MAT_COPY_VALUES,mpimat);CHKERRQ(ierr);
4611     } else {
4612       ierr = MatCopy(seqmat,*mpimat,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
4613     }
4614     ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4615     PetscFunctionReturn(0);
4616   }
4617   ierr = PetscLogEventBegin(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4618   if (scall == MAT_INITIAL_MATRIX) {
4619     ierr = MatCreateMPIAIJSumSeqAIJSymbolic(comm,seqmat,m,n,mpimat);CHKERRQ(ierr);
4620   }
4621   ierr = MatCreateMPIAIJSumSeqAIJNumeric(seqmat,*mpimat);CHKERRQ(ierr);
4622   ierr = PetscLogEventEnd(MAT_Seqstompi,seqmat,0,0,0);CHKERRQ(ierr);
4623   PetscFunctionReturn(0);
4624 }
4625 
4626 /*@
4627      MatMPIAIJGetLocalMat - Creates a SeqAIJ from a MATMPIAIJ matrix by taking all its local rows and putting them into a sequential vector with
4628           mlocal rows and n columns. Where mlocal is the row count obtained with MatGetLocalSize() and n is the global column count obtained
4629           with MatGetSize()
4630 
4631     Not Collective
4632 
4633    Input Parameters:
4634 +    A - the matrix
4635 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4636 
4637    Output Parameter:
4638 .    A_loc - the local sequential matrix generated
4639 
4640     Level: developer
4641 
4642 .seealso: MatGetOwnerShipRange(), MatMPIAIJGetLocalMatCondensed()
4643 
4644 @*/
4645 PetscErrorCode  MatMPIAIJGetLocalMat(Mat A,MatReuse scall,Mat *A_loc)
4646 {
4647   PetscErrorCode ierr;
4648   Mat_MPIAIJ     *mpimat=(Mat_MPIAIJ*)A->data;
4649   Mat_SeqAIJ     *mat,*a,*b;
4650   PetscInt       *ai,*aj,*bi,*bj,*cmap=mpimat->garray;
4651   MatScalar      *aa,*ba,*cam;
4652   PetscScalar    *ca;
4653   PetscInt       am=A->rmap->n,i,j,k,cstart=A->cmap->rstart;
4654   PetscInt       *ci,*cj,col,ncols_d,ncols_o,jo;
4655   PetscBool      match;
4656   MPI_Comm       comm;
4657   PetscMPIInt    size;
4658 
4659   PetscFunctionBegin;
4660   ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);CHKERRQ(ierr);
4661   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4662   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
4663   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4664   if (size == 1 && scall == MAT_REUSE_MATRIX) PetscFunctionReturn(0);
4665 
4666   ierr = PetscLogEventBegin(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4667   a = (Mat_SeqAIJ*)(mpimat->A)->data;
4668   b = (Mat_SeqAIJ*)(mpimat->B)->data;
4669   ai = a->i; aj = a->j; bi = b->i; bj = b->j;
4670   aa = a->a; ba = b->a;
4671   if (scall == MAT_INITIAL_MATRIX) {
4672     if (size == 1) {
4673       ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ai,aj,aa,A_loc);CHKERRQ(ierr);
4674       PetscFunctionReturn(0);
4675     }
4676 
4677     ierr  = PetscMalloc1(1+am,&ci);CHKERRQ(ierr);
4678     ci[0] = 0;
4679     for (i=0; i<am; i++) {
4680       ci[i+1] = ci[i] + (ai[i+1] - ai[i]) + (bi[i+1] - bi[i]);
4681     }
4682     ierr = PetscMalloc1(1+ci[am],&cj);CHKERRQ(ierr);
4683     ierr = PetscMalloc1(1+ci[am],&ca);CHKERRQ(ierr);
4684     k    = 0;
4685     for (i=0; i<am; i++) {
4686       ncols_o = bi[i+1] - bi[i];
4687       ncols_d = ai[i+1] - ai[i];
4688       /* off-diagonal portion of A */
4689       for (jo=0; jo<ncols_o; jo++) {
4690         col = cmap[*bj];
4691         if (col >= cstart) break;
4692         cj[k]   = col; bj++;
4693         ca[k++] = *ba++;
4694       }
4695       /* diagonal portion of A */
4696       for (j=0; j<ncols_d; j++) {
4697         cj[k]   = cstart + *aj++;
4698         ca[k++] = *aa++;
4699       }
4700       /* off-diagonal portion of A */
4701       for (j=jo; j<ncols_o; j++) {
4702         cj[k]   = cmap[*bj++];
4703         ca[k++] = *ba++;
4704       }
4705     }
4706     /* put together the new matrix */
4707     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,A->cmap->N,ci,cj,ca,A_loc);CHKERRQ(ierr);
4708     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
4709     /* Since these are PETSc arrays, change flags to free them as necessary. */
4710     mat          = (Mat_SeqAIJ*)(*A_loc)->data;
4711     mat->free_a  = PETSC_TRUE;
4712     mat->free_ij = PETSC_TRUE;
4713     mat->nonew   = 0;
4714   } else if (scall == MAT_REUSE_MATRIX) {
4715     mat=(Mat_SeqAIJ*)(*A_loc)->data;
4716     ci = mat->i; cj = mat->j; cam = mat->a;
4717     for (i=0; i<am; i++) {
4718       /* off-diagonal portion of A */
4719       ncols_o = bi[i+1] - bi[i];
4720       for (jo=0; jo<ncols_o; jo++) {
4721         col = cmap[*bj];
4722         if (col >= cstart) break;
4723         *cam++ = *ba++; bj++;
4724       }
4725       /* diagonal portion of A */
4726       ncols_d = ai[i+1] - ai[i];
4727       for (j=0; j<ncols_d; j++) *cam++ = *aa++;
4728       /* off-diagonal portion of A */
4729       for (j=jo; j<ncols_o; j++) {
4730         *cam++ = *ba++; bj++;
4731       }
4732     }
4733   } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",(int)scall);
4734   ierr = PetscLogEventEnd(MAT_Getlocalmat,A,0,0,0);CHKERRQ(ierr);
4735   PetscFunctionReturn(0);
4736 }
4737 
4738 /*@C
4739      MatMPIAIJGetLocalMatCondensed - Creates a SeqAIJ matrix from an MATMPIAIJ matrix by taking all its local rows and NON-ZERO columns
4740 
4741     Not Collective
4742 
4743    Input Parameters:
4744 +    A - the matrix
4745 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4746 -    row, col - index sets of rows and columns to extract (or NULL)
4747 
4748    Output Parameter:
4749 .    A_loc - the local sequential matrix generated
4750 
4751     Level: developer
4752 
4753 .seealso: MatGetOwnershipRange(), MatMPIAIJGetLocalMat()
4754 
4755 @*/
4756 PetscErrorCode  MatMPIAIJGetLocalMatCondensed(Mat A,MatReuse scall,IS *row,IS *col,Mat *A_loc)
4757 {
4758   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
4759   PetscErrorCode ierr;
4760   PetscInt       i,start,end,ncols,nzA,nzB,*cmap,imark,*idx;
4761   IS             isrowa,iscola;
4762   Mat            *aloc;
4763   PetscBool      match;
4764 
4765   PetscFunctionBegin;
4766   ierr = PetscObjectTypeCompare((PetscObject)A,MATMPIAIJ,&match);CHKERRQ(ierr);
4767   if (!match) SETERRQ(PetscObjectComm((PetscObject)A), PETSC_ERR_SUP,"Requires MATMPIAIJ matrix as input");
4768   ierr = PetscLogEventBegin(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4769   if (!row) {
4770     start = A->rmap->rstart; end = A->rmap->rend;
4771     ierr  = ISCreateStride(PETSC_COMM_SELF,end-start,start,1,&isrowa);CHKERRQ(ierr);
4772   } else {
4773     isrowa = *row;
4774   }
4775   if (!col) {
4776     start = A->cmap->rstart;
4777     cmap  = a->garray;
4778     nzA   = a->A->cmap->n;
4779     nzB   = a->B->cmap->n;
4780     ierr  = PetscMalloc1(nzA+nzB, &idx);CHKERRQ(ierr);
4781     ncols = 0;
4782     for (i=0; i<nzB; i++) {
4783       if (cmap[i] < start) idx[ncols++] = cmap[i];
4784       else break;
4785     }
4786     imark = i;
4787     for (i=0; i<nzA; i++) idx[ncols++] = start + i;
4788     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i];
4789     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&iscola);CHKERRQ(ierr);
4790   } else {
4791     iscola = *col;
4792   }
4793   if (scall != MAT_INITIAL_MATRIX) {
4794     ierr    = PetscMalloc1(1,&aloc);CHKERRQ(ierr);
4795     aloc[0] = *A_loc;
4796   }
4797   ierr   = MatCreateSubMatrices(A,1,&isrowa,&iscola,scall,&aloc);CHKERRQ(ierr);
4798   *A_loc = aloc[0];
4799   ierr   = PetscFree(aloc);CHKERRQ(ierr);
4800   if (!row) {
4801     ierr = ISDestroy(&isrowa);CHKERRQ(ierr);
4802   }
4803   if (!col) {
4804     ierr = ISDestroy(&iscola);CHKERRQ(ierr);
4805   }
4806   ierr = PetscLogEventEnd(MAT_Getlocalmatcondensed,A,0,0,0);CHKERRQ(ierr);
4807   PetscFunctionReturn(0);
4808 }
4809 
4810 /*@C
4811     MatGetBrowsOfAcols - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns of local A
4812 
4813     Collective on Mat
4814 
4815    Input Parameters:
4816 +    A,B - the matrices in mpiaij format
4817 .    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4818 -    rowb, colb - index sets of rows and columns of B to extract (or NULL)
4819 
4820    Output Parameter:
4821 +    rowb, colb - index sets of rows and columns of B to extract
4822 -    B_seq - the sequential matrix generated
4823 
4824     Level: developer
4825 
4826 @*/
4827 PetscErrorCode  MatGetBrowsOfAcols(Mat A,Mat B,MatReuse scall,IS *rowb,IS *colb,Mat *B_seq)
4828 {
4829   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
4830   PetscErrorCode ierr;
4831   PetscInt       *idx,i,start,ncols,nzA,nzB,*cmap,imark;
4832   IS             isrowb,iscolb;
4833   Mat            *bseq=NULL;
4834 
4835   PetscFunctionBegin;
4836   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
4837     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%D, %D) != (%D,%D)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4838   }
4839   ierr = PetscLogEventBegin(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4840 
4841   if (scall == MAT_INITIAL_MATRIX) {
4842     start = A->cmap->rstart;
4843     cmap  = a->garray;
4844     nzA   = a->A->cmap->n;
4845     nzB   = a->B->cmap->n;
4846     ierr  = PetscMalloc1(nzA+nzB, &idx);CHKERRQ(ierr);
4847     ncols = 0;
4848     for (i=0; i<nzB; i++) {  /* row < local row index */
4849       if (cmap[i] < start) idx[ncols++] = cmap[i];
4850       else break;
4851     }
4852     imark = i;
4853     for (i=0; i<nzA; i++) idx[ncols++] = start + i;  /* local rows */
4854     for (i=imark; i<nzB; i++) idx[ncols++] = cmap[i]; /* row > local row index */
4855     ierr = ISCreateGeneral(PETSC_COMM_SELF,ncols,idx,PETSC_OWN_POINTER,&isrowb);CHKERRQ(ierr);
4856     ierr = ISCreateStride(PETSC_COMM_SELF,B->cmap->N,0,1,&iscolb);CHKERRQ(ierr);
4857   } else {
4858     if (!rowb || !colb) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"IS rowb and colb must be provided for MAT_REUSE_MATRIX");
4859     isrowb  = *rowb; iscolb = *colb;
4860     ierr    = PetscMalloc1(1,&bseq);CHKERRQ(ierr);
4861     bseq[0] = *B_seq;
4862   }
4863   ierr   = MatCreateSubMatrices(B,1,&isrowb,&iscolb,scall,&bseq);CHKERRQ(ierr);
4864   *B_seq = bseq[0];
4865   ierr   = PetscFree(bseq);CHKERRQ(ierr);
4866   if (!rowb) {
4867     ierr = ISDestroy(&isrowb);CHKERRQ(ierr);
4868   } else {
4869     *rowb = isrowb;
4870   }
4871   if (!colb) {
4872     ierr = ISDestroy(&iscolb);CHKERRQ(ierr);
4873   } else {
4874     *colb = iscolb;
4875   }
4876   ierr = PetscLogEventEnd(MAT_GetBrowsOfAcols,A,B,0,0);CHKERRQ(ierr);
4877   PetscFunctionReturn(0);
4878 }
4879 
4880 /*
4881     MatGetBrowsOfAoCols_MPIAIJ - Creates a SeqAIJ matrix by taking rows of B that equal to nonzero columns
4882     of the OFF-DIAGONAL portion of local A
4883 
4884     Collective on Mat
4885 
4886    Input Parameters:
4887 +    A,B - the matrices in mpiaij format
4888 -    scall - either MAT_INITIAL_MATRIX or MAT_REUSE_MATRIX
4889 
4890    Output Parameter:
4891 +    startsj_s - starting point in B's sending j-arrays, saved for MAT_REUSE (or NULL)
4892 .    startsj_r - starting point in B's receiving j-arrays, saved for MAT_REUSE (or NULL)
4893 .    bufa_ptr - array for sending matrix values, saved for MAT_REUSE (or NULL)
4894 -    B_oth - the sequential matrix generated with size aBn=a->B->cmap->n by B->cmap->N
4895 
4896     Level: developer
4897 
4898 */
4899 PetscErrorCode  MatGetBrowsOfAoCols_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscInt **startsj_s,PetscInt **startsj_r,MatScalar **bufa_ptr,Mat *B_oth)
4900 {
4901   VecScatter_MPI_General *gen_to,*gen_from;
4902   PetscErrorCode         ierr;
4903   Mat_MPIAIJ             *a=(Mat_MPIAIJ*)A->data;
4904   Mat_SeqAIJ             *b_oth;
4905   VecScatter             ctx =a->Mvctx;
4906   MPI_Comm               comm;
4907   PetscMPIInt            *rprocs,*sprocs,tag=((PetscObject)ctx)->tag,rank;
4908   PetscInt               *rowlen,*bufj,*bufJ,ncols,aBn=a->B->cmap->n,row,*b_othi,*b_othj;
4909   PetscInt               *rvalues,*svalues;
4910   MatScalar              *b_otha,*bufa,*bufA;
4911   PetscInt               i,j,k,l,ll,nrecvs,nsends,nrows,*srow,*rstarts,*rstartsj = 0,*sstarts,*sstartsj,len;
4912   MPI_Request            *rwaits = NULL,*swaits = NULL;
4913   MPI_Status             *sstatus,rstatus;
4914   PetscMPIInt            jj,size;
4915   PetscInt               *cols,sbs,rbs;
4916   PetscScalar            *vals;
4917 
4918   PetscFunctionBegin;
4919   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
4920   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
4921 
4922   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend) {
4923     SETERRQ4(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%d, %d) != (%d,%d)",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
4924   }
4925   ierr = PetscLogEventBegin(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
4926   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
4927 
4928   if (size == 1) {
4929     startsj_s = NULL;
4930     bufa_ptr  = NULL;
4931     *B_oth    = NULL;
4932     PetscFunctionReturn(0);
4933   }
4934 
4935   gen_to   = (VecScatter_MPI_General*)ctx->todata;
4936   gen_from = (VecScatter_MPI_General*)ctx->fromdata;
4937   nrecvs   = gen_from->n;
4938   nsends   = gen_to->n;
4939 
4940   ierr    = PetscMalloc2(nrecvs,&rwaits,nsends,&swaits);CHKERRQ(ierr);
4941   srow    = gen_to->indices;    /* local row index to be sent */
4942   sstarts = gen_to->starts;
4943   sprocs  = gen_to->procs;
4944   sstatus = gen_to->sstatus;
4945   sbs     = gen_to->bs;
4946   rstarts = gen_from->starts;
4947   rprocs  = gen_from->procs;
4948   rbs     = gen_from->bs;
4949 
4950   if (!startsj_s || !bufa_ptr) scall = MAT_INITIAL_MATRIX;
4951   if (scall == MAT_INITIAL_MATRIX) {
4952     /* i-array */
4953     /*---------*/
4954     /*  post receives */
4955     ierr = PetscMalloc1(rbs*(rstarts[nrecvs] - rstarts[0]),&rvalues);CHKERRQ(ierr);
4956     for (i=0; i<nrecvs; i++) {
4957       rowlen = rvalues + rstarts[i]*rbs;
4958       nrows  = (rstarts[i+1]-rstarts[i])*rbs; /* num of indices to be received */
4959       ierr   = MPI_Irecv(rowlen,nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
4960     }
4961 
4962     /* pack the outgoing message */
4963     ierr = PetscMalloc2(nsends+1,&sstartsj,nrecvs+1,&rstartsj);CHKERRQ(ierr);
4964 
4965     sstartsj[0] = 0;
4966     rstartsj[0] = 0;
4967     len         = 0; /* total length of j or a array to be sent */
4968     k           = 0;
4969     ierr = PetscMalloc1(sbs*(sstarts[nsends] - sstarts[0]),&svalues);CHKERRQ(ierr);
4970     for (i=0; i<nsends; i++) {
4971       rowlen = svalues + sstarts[i]*sbs;
4972       nrows  = sstarts[i+1]-sstarts[i]; /* num of block rows */
4973       for (j=0; j<nrows; j++) {
4974         row = srow[k] + B->rmap->range[rank]; /* global row idx */
4975         for (l=0; l<sbs; l++) {
4976           ierr = MatGetRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);CHKERRQ(ierr); /* rowlength */
4977 
4978           rowlen[j*sbs+l] = ncols;
4979 
4980           len += ncols;
4981           ierr = MatRestoreRow_MPIAIJ(B,row+l,&ncols,NULL,NULL);CHKERRQ(ierr);
4982         }
4983         k++;
4984       }
4985       ierr = MPI_Isend(rowlen,nrows*sbs,MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
4986 
4987       sstartsj[i+1] = len;  /* starting point of (i+1)-th outgoing msg in bufj and bufa */
4988     }
4989     /* recvs and sends of i-array are completed */
4990     i = nrecvs;
4991     while (i--) {
4992       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
4993     }
4994     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
4995     ierr = PetscFree(svalues);CHKERRQ(ierr);
4996 
4997     /* allocate buffers for sending j and a arrays */
4998     ierr = PetscMalloc1(len+1,&bufj);CHKERRQ(ierr);
4999     ierr = PetscMalloc1(len+1,&bufa);CHKERRQ(ierr);
5000 
5001     /* create i-array of B_oth */
5002     ierr = PetscMalloc1(aBn+2,&b_othi);CHKERRQ(ierr);
5003 
5004     b_othi[0] = 0;
5005     len       = 0; /* total length of j or a array to be received */
5006     k         = 0;
5007     for (i=0; i<nrecvs; i++) {
5008       rowlen = rvalues + rstarts[i]*rbs;
5009       nrows  = rbs*(rstarts[i+1]-rstarts[i]); /* num of rows to be received */
5010       for (j=0; j<nrows; j++) {
5011         b_othi[k+1] = b_othi[k] + rowlen[j];
5012         ierr = PetscIntSumError(rowlen[j],len,&len);CHKERRQ(ierr);
5013         k++;
5014       }
5015       rstartsj[i+1] = len; /* starting point of (i+1)-th incoming msg in bufj and bufa */
5016     }
5017     ierr = PetscFree(rvalues);CHKERRQ(ierr);
5018 
5019     /* allocate space for j and a arrrays of B_oth */
5020     ierr = PetscMalloc1(b_othi[aBn]+1,&b_othj);CHKERRQ(ierr);
5021     ierr = PetscMalloc1(b_othi[aBn]+1,&b_otha);CHKERRQ(ierr);
5022 
5023     /* j-array */
5024     /*---------*/
5025     /*  post receives of j-array */
5026     for (i=0; i<nrecvs; i++) {
5027       nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5028       ierr  = MPI_Irecv(b_othj+rstartsj[i],nrows,MPIU_INT,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
5029     }
5030 
5031     /* pack the outgoing message j-array */
5032     k = 0;
5033     for (i=0; i<nsends; i++) {
5034       nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5035       bufJ  = bufj+sstartsj[i];
5036       for (j=0; j<nrows; j++) {
5037         row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5038         for (ll=0; ll<sbs; ll++) {
5039           ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);CHKERRQ(ierr);
5040           for (l=0; l<ncols; l++) {
5041             *bufJ++ = cols[l];
5042           }
5043           ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,&cols,NULL);CHKERRQ(ierr);
5044         }
5045       }
5046       ierr = MPI_Isend(bufj+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_INT,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
5047     }
5048 
5049     /* recvs and sends of j-array are completed */
5050     i = nrecvs;
5051     while (i--) {
5052       ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
5053     }
5054     if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
5055   } else if (scall == MAT_REUSE_MATRIX) {
5056     sstartsj = *startsj_s;
5057     rstartsj = *startsj_r;
5058     bufa     = *bufa_ptr;
5059     b_oth    = (Mat_SeqAIJ*)(*B_oth)->data;
5060     b_otha   = b_oth->a;
5061   } else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
5062 
5063   /* a-array */
5064   /*---------*/
5065   /*  post receives of a-array */
5066   for (i=0; i<nrecvs; i++) {
5067     nrows = rstartsj[i+1]-rstartsj[i]; /* length of the msg received */
5068     ierr  = MPI_Irecv(b_otha+rstartsj[i],nrows,MPIU_SCALAR,rprocs[i],tag,comm,rwaits+i);CHKERRQ(ierr);
5069   }
5070 
5071   /* pack the outgoing message a-array */
5072   k = 0;
5073   for (i=0; i<nsends; i++) {
5074     nrows = sstarts[i+1]-sstarts[i]; /* num of block rows */
5075     bufA  = bufa+sstartsj[i];
5076     for (j=0; j<nrows; j++) {
5077       row = srow[k++] + B->rmap->range[rank];  /* global row idx */
5078       for (ll=0; ll<sbs; ll++) {
5079         ierr = MatGetRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);CHKERRQ(ierr);
5080         for (l=0; l<ncols; l++) {
5081           *bufA++ = vals[l];
5082         }
5083         ierr = MatRestoreRow_MPIAIJ(B,row+ll,&ncols,NULL,&vals);CHKERRQ(ierr);
5084       }
5085     }
5086     ierr = MPI_Isend(bufa+sstartsj[i],sstartsj[i+1]-sstartsj[i],MPIU_SCALAR,sprocs[i],tag,comm,swaits+i);CHKERRQ(ierr);
5087   }
5088   /* recvs and sends of a-array are completed */
5089   i = nrecvs;
5090   while (i--) {
5091     ierr = MPI_Waitany(nrecvs,rwaits,&jj,&rstatus);CHKERRQ(ierr);
5092   }
5093   if (nsends) {ierr = MPI_Waitall(nsends,swaits,sstatus);CHKERRQ(ierr);}
5094   ierr = PetscFree2(rwaits,swaits);CHKERRQ(ierr);
5095 
5096   if (scall == MAT_INITIAL_MATRIX) {
5097     /* put together the new matrix */
5098     ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,aBn,B->cmap->N,b_othi,b_othj,b_otha,B_oth);CHKERRQ(ierr);
5099 
5100     /* MatCreateSeqAIJWithArrays flags matrix so PETSc doesn't free the user's arrays. */
5101     /* Since these are PETSc arrays, change flags to free them as necessary. */
5102     b_oth          = (Mat_SeqAIJ*)(*B_oth)->data;
5103     b_oth->free_a  = PETSC_TRUE;
5104     b_oth->free_ij = PETSC_TRUE;
5105     b_oth->nonew   = 0;
5106 
5107     ierr = PetscFree(bufj);CHKERRQ(ierr);
5108     if (!startsj_s || !bufa_ptr) {
5109       ierr = PetscFree2(sstartsj,rstartsj);CHKERRQ(ierr);
5110       ierr = PetscFree(bufa_ptr);CHKERRQ(ierr);
5111     } else {
5112       *startsj_s = sstartsj;
5113       *startsj_r = rstartsj;
5114       *bufa_ptr  = bufa;
5115     }
5116   }
5117   ierr = PetscLogEventEnd(MAT_GetBrowsOfAocols,A,B,0,0);CHKERRQ(ierr);
5118   PetscFunctionReturn(0);
5119 }
5120 
5121 /*@C
5122   MatGetCommunicationStructs - Provides access to the communication structures used in matrix-vector multiplication.
5123 
5124   Not Collective
5125 
5126   Input Parameters:
5127 . A - The matrix in mpiaij format
5128 
5129   Output Parameter:
5130 + lvec - The local vector holding off-process values from the argument to a matrix-vector product
5131 . colmap - A map from global column index to local index into lvec
5132 - multScatter - A scatter from the argument of a matrix-vector product to lvec
5133 
5134   Level: developer
5135 
5136 @*/
5137 #if defined(PETSC_USE_CTABLE)
5138 PetscErrorCode  MatGetCommunicationStructs(Mat A, Vec *lvec, PetscTable *colmap, VecScatter *multScatter)
5139 #else
5140 PetscErrorCode  MatGetCommunicationStructs(Mat A, Vec *lvec, PetscInt *colmap[], VecScatter *multScatter)
5141 #endif
5142 {
5143   Mat_MPIAIJ *a;
5144 
5145   PetscFunctionBegin;
5146   PetscValidHeaderSpecific(A, MAT_CLASSID, 1);
5147   PetscValidPointer(lvec, 2);
5148   PetscValidPointer(colmap, 3);
5149   PetscValidPointer(multScatter, 4);
5150   a = (Mat_MPIAIJ*) A->data;
5151   if (lvec) *lvec = a->lvec;
5152   if (colmap) *colmap = a->colmap;
5153   if (multScatter) *multScatter = a->Mvctx;
5154   PetscFunctionReturn(0);
5155 }
5156 
5157 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJCRL(Mat,MatType,MatReuse,Mat*);
5158 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat,MatType,MatReuse,Mat*);
5159 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPISBAIJ(Mat,MatType,MatReuse,Mat*);
5160 #if defined(PETSC_HAVE_ELEMENTAL)
5161 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_Elemental(Mat,MatType,MatReuse,Mat*);
5162 #endif
5163 #if defined(PETSC_HAVE_HYPRE)
5164 PETSC_INTERN PetscErrorCode MatConvert_AIJ_HYPRE(Mat,MatType,MatReuse,Mat*);
5165 PETSC_INTERN PetscErrorCode MatMatMatMult_Transpose_AIJ_AIJ(Mat,Mat,Mat,MatReuse,PetscReal,Mat*);
5166 #endif
5167 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_IS(Mat,MatType,MatReuse,Mat*);
5168 
5169 /*
5170     Computes (B'*A')' since computing B*A directly is untenable
5171 
5172                n                       p                          p
5173         (              )       (              )         (                  )
5174       m (      A       )  *  n (       B      )   =   m (         C        )
5175         (              )       (              )         (                  )
5176 
5177 */
5178 PetscErrorCode MatMatMultNumeric_MPIDense_MPIAIJ(Mat A,Mat B,Mat C)
5179 {
5180   PetscErrorCode ierr;
5181   Mat            At,Bt,Ct;
5182 
5183   PetscFunctionBegin;
5184   ierr = MatTranspose(A,MAT_INITIAL_MATRIX,&At);CHKERRQ(ierr);
5185   ierr = MatTranspose(B,MAT_INITIAL_MATRIX,&Bt);CHKERRQ(ierr);
5186   ierr = MatMatMult(Bt,At,MAT_INITIAL_MATRIX,1.0,&Ct);CHKERRQ(ierr);
5187   ierr = MatDestroy(&At);CHKERRQ(ierr);
5188   ierr = MatDestroy(&Bt);CHKERRQ(ierr);
5189   ierr = MatTranspose(Ct,MAT_REUSE_MATRIX,&C);CHKERRQ(ierr);
5190   ierr = MatDestroy(&Ct);CHKERRQ(ierr);
5191   PetscFunctionReturn(0);
5192 }
5193 
5194 PetscErrorCode MatMatMultSymbolic_MPIDense_MPIAIJ(Mat A,Mat B,PetscReal fill,Mat *C)
5195 {
5196   PetscErrorCode ierr;
5197   PetscInt       m=A->rmap->n,n=B->cmap->n;
5198   Mat            Cmat;
5199 
5200   PetscFunctionBegin;
5201   if (A->cmap->n != B->rmap->n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"A->cmap->n %d != B->rmap->n %d\n",A->cmap->n,B->rmap->n);
5202   ierr = MatCreate(PetscObjectComm((PetscObject)A),&Cmat);CHKERRQ(ierr);
5203   ierr = MatSetSizes(Cmat,m,n,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
5204   ierr = MatSetBlockSizesFromMats(Cmat,A,B);CHKERRQ(ierr);
5205   ierr = MatSetType(Cmat,MATMPIDENSE);CHKERRQ(ierr);
5206   ierr = MatMPIDenseSetPreallocation(Cmat,NULL);CHKERRQ(ierr);
5207   ierr = MatAssemblyBegin(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5208   ierr = MatAssemblyEnd(Cmat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5209 
5210   Cmat->ops->matmultnumeric = MatMatMultNumeric_MPIDense_MPIAIJ;
5211 
5212   *C = Cmat;
5213   PetscFunctionReturn(0);
5214 }
5215 
5216 /* ----------------------------------------------------------------*/
5217 PETSC_INTERN PetscErrorCode MatMatMult_MPIDense_MPIAIJ(Mat A,Mat B,MatReuse scall,PetscReal fill,Mat *C)
5218 {
5219   PetscErrorCode ierr;
5220 
5221   PetscFunctionBegin;
5222   if (scall == MAT_INITIAL_MATRIX) {
5223     ierr = PetscLogEventBegin(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr);
5224     ierr = MatMatMultSymbolic_MPIDense_MPIAIJ(A,B,fill,C);CHKERRQ(ierr);
5225     ierr = PetscLogEventEnd(MAT_MatMultSymbolic,A,B,0,0);CHKERRQ(ierr);
5226   }
5227   ierr = PetscLogEventBegin(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr);
5228   ierr = MatMatMultNumeric_MPIDense_MPIAIJ(A,B,*C);CHKERRQ(ierr);
5229   ierr = PetscLogEventEnd(MAT_MatMultNumeric,A,B,0,0);CHKERRQ(ierr);
5230   PetscFunctionReturn(0);
5231 }
5232 
5233 /*MC
5234    MATMPIAIJ - MATMPIAIJ = "mpiaij" - A matrix type to be used for parallel sparse matrices.
5235 
5236    Options Database Keys:
5237 . -mat_type mpiaij - sets the matrix type to "mpiaij" during a call to MatSetFromOptions()
5238 
5239   Level: beginner
5240 
5241 .seealso: MatCreateAIJ()
5242 M*/
5243 
5244 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJ(Mat B)
5245 {
5246   Mat_MPIAIJ     *b;
5247   PetscErrorCode ierr;
5248   PetscMPIInt    size;
5249 
5250   PetscFunctionBegin;
5251   ierr = MPI_Comm_size(PetscObjectComm((PetscObject)B),&size);CHKERRQ(ierr);
5252 
5253   ierr          = PetscNewLog(B,&b);CHKERRQ(ierr);
5254   B->data       = (void*)b;
5255   ierr          = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
5256   B->assembled  = PETSC_FALSE;
5257   B->insertmode = NOT_SET_VALUES;
5258   b->size       = size;
5259 
5260   ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)B),&b->rank);CHKERRQ(ierr);
5261 
5262   /* build cache for off array entries formed */
5263   ierr = MatStashCreate_Private(PetscObjectComm((PetscObject)B),1,&B->stash);CHKERRQ(ierr);
5264 
5265   b->donotstash  = PETSC_FALSE;
5266   b->colmap      = 0;
5267   b->garray      = 0;
5268   b->roworiented = PETSC_TRUE;
5269 
5270   /* stuff used for matrix vector multiply */
5271   b->lvec  = NULL;
5272   b->Mvctx = NULL;
5273 
5274   /* stuff for MatGetRow() */
5275   b->rowindices   = 0;
5276   b->rowvalues    = 0;
5277   b->getrowactive = PETSC_FALSE;
5278 
5279   /* flexible pointer used in CUSP/CUSPARSE classes */
5280   b->spptr = NULL;
5281 
5282   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetUseScalableIncreaseOverlap_C",MatMPIAIJSetUseScalableIncreaseOverlap_MPIAIJ);CHKERRQ(ierr);
5283   ierr = PetscObjectComposeFunction((PetscObject)B,"MatStoreValues_C",MatStoreValues_MPIAIJ);CHKERRQ(ierr);
5284   ierr = PetscObjectComposeFunction((PetscObject)B,"MatRetrieveValues_C",MatRetrieveValues_MPIAIJ);CHKERRQ(ierr);
5285   ierr = PetscObjectComposeFunction((PetscObject)B,"MatIsTranspose_C",MatIsTranspose_MPIAIJ);CHKERRQ(ierr);
5286   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocation_C",MatMPIAIJSetPreallocation_MPIAIJ);CHKERRQ(ierr);
5287   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMPIAIJSetPreallocationCSR_C",MatMPIAIJSetPreallocationCSR_MPIAIJ);CHKERRQ(ierr);
5288   ierr = PetscObjectComposeFunction((PetscObject)B,"MatDiagonalScaleLocal_C",MatDiagonalScaleLocal_MPIAIJ);CHKERRQ(ierr);
5289   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijperm_C",MatConvert_MPIAIJ_MPIAIJPERM);CHKERRQ(ierr);
5290   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpiaijcrl_C",MatConvert_MPIAIJ_MPIAIJCRL);CHKERRQ(ierr);
5291   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_mpisbaij_C",MatConvert_MPIAIJ_MPISBAIJ);CHKERRQ(ierr);
5292 #if defined(PETSC_HAVE_ELEMENTAL)
5293   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_elemental_C",MatConvert_MPIAIJ_Elemental);CHKERRQ(ierr);
5294 #endif
5295 #if defined(PETSC_HAVE_HYPRE)
5296   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_hypre_C",MatConvert_AIJ_HYPRE);CHKERRQ(ierr);
5297 #endif
5298   ierr = PetscObjectComposeFunction((PetscObject)B,"MatConvert_mpiaij_is_C",MatConvert_MPIAIJ_IS);CHKERRQ(ierr);
5299   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMult_mpidense_mpiaij_C",MatMatMult_MPIDense_MPIAIJ);CHKERRQ(ierr);
5300   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMultSymbolic_mpidense_mpiaij_C",MatMatMultSymbolic_MPIDense_MPIAIJ);CHKERRQ(ierr);
5301   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMultNumeric_mpidense_mpiaij_C",MatMatMultNumeric_MPIDense_MPIAIJ);CHKERRQ(ierr);
5302 #if defined(PETSC_HAVE_HYPRE)
5303   ierr = PetscObjectComposeFunction((PetscObject)B,"MatMatMatMult_transpose_mpiaij_mpiaij_C",MatMatMatMult_Transpose_AIJ_AIJ);CHKERRQ(ierr);
5304 #endif
5305   ierr = PetscObjectChangeTypeName((PetscObject)B,MATMPIAIJ);CHKERRQ(ierr);
5306   PetscFunctionReturn(0);
5307 }
5308 
5309 /*@C
5310      MatCreateMPIAIJWithSplitArrays - creates a MPI AIJ matrix using arrays that contain the "diagonal"
5311          and "off-diagonal" part of the matrix in CSR format.
5312 
5313    Collective on MPI_Comm
5314 
5315    Input Parameters:
5316 +  comm - MPI communicator
5317 .  m - number of local rows (Cannot be PETSC_DECIDE)
5318 .  n - This value should be the same as the local size used in creating the
5319        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
5320        calculated if N is given) For square matrices n is almost always m.
5321 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
5322 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
5323 .   i - row indices for "diagonal" portion of matrix
5324 .   j - column indices
5325 .   a - matrix values
5326 .   oi - row indices for "off-diagonal" portion of matrix
5327 .   oj - column indices
5328 -   oa - matrix values
5329 
5330    Output Parameter:
5331 .   mat - the matrix
5332 
5333    Level: advanced
5334 
5335    Notes:
5336        The i, j, and a arrays ARE NOT copied by this routine into the internal format used by PETSc. The user
5337        must free the arrays once the matrix has been destroyed and not before.
5338 
5339        The i and j indices are 0 based
5340 
5341        See MatCreateAIJ() for the definition of "diagonal" and "off-diagonal" portion of the matrix
5342 
5343        This sets local rows and cannot be used to set off-processor values.
5344 
5345        Use of this routine is discouraged because it is inflexible and cumbersome to use. It is extremely rare that a
5346        legacy application natively assembles into exactly this split format. The code to do so is nontrivial and does
5347        not easily support in-place reassembly. It is recommended to use MatSetValues() (or a variant thereof) because
5348        the resulting assembly is easier to implement, will work with any matrix format, and the user does not have to
5349        keep track of the underlying array. Use MatSetOption(A,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) to disable all
5350        communication if it is known that only local entries will be set.
5351 
5352 .keywords: matrix, aij, compressed row, sparse, parallel
5353 
5354 .seealso: MatCreate(), MatCreateSeqAIJ(), MatSetValues(), MatMPIAIJSetPreallocation(), MatMPIAIJSetPreallocationCSR(),
5355           MATMPIAIJ, MatCreateAIJ(), MatCreateMPIAIJWithArrays()
5356 @*/
5357 PetscErrorCode  MatCreateMPIAIJWithSplitArrays(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt i[],PetscInt j[],PetscScalar a[],PetscInt oi[], PetscInt oj[],PetscScalar oa[],Mat *mat)
5358 {
5359   PetscErrorCode ierr;
5360   Mat_MPIAIJ     *maij;
5361 
5362   PetscFunctionBegin;
5363   if (m < 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"local number of rows (m) cannot be PETSC_DECIDE, or negative");
5364   if (i[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"i (row indices) must start with 0");
5365   if (oi[0]) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"oi (row indices) must start with 0");
5366   ierr = MatCreate(comm,mat);CHKERRQ(ierr);
5367   ierr = MatSetSizes(*mat,m,n,M,N);CHKERRQ(ierr);
5368   ierr = MatSetType(*mat,MATMPIAIJ);CHKERRQ(ierr);
5369   maij = (Mat_MPIAIJ*) (*mat)->data;
5370 
5371   (*mat)->preallocated = PETSC_TRUE;
5372 
5373   ierr = PetscLayoutSetUp((*mat)->rmap);CHKERRQ(ierr);
5374   ierr = PetscLayoutSetUp((*mat)->cmap);CHKERRQ(ierr);
5375 
5376   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,n,i,j,a,&maij->A);CHKERRQ(ierr);
5377   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,m,(*mat)->cmap->N,oi,oj,oa,&maij->B);CHKERRQ(ierr);
5378 
5379   ierr = MatAssemblyBegin(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5380   ierr = MatAssemblyEnd(maij->A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5381   ierr = MatAssemblyBegin(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5382   ierr = MatAssemblyEnd(maij->B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5383 
5384   ierr = MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE);CHKERRQ(ierr);
5385   ierr = MatAssemblyBegin(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5386   ierr = MatAssemblyEnd(*mat,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
5387   ierr = MatSetOption(*mat,MAT_NO_OFF_PROC_ENTRIES,PETSC_FALSE);CHKERRQ(ierr);
5388   ierr = MatSetOption(*mat,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE);CHKERRQ(ierr);
5389   PetscFunctionReturn(0);
5390 }
5391 
5392 /*
5393     Special version for direct calls from Fortran
5394 */
5395 #include <petsc/private/fortranimpl.h>
5396 
5397 /* Change these macros so can be used in void function */
5398 #undef CHKERRQ
5399 #define CHKERRQ(ierr) CHKERRABORT(PETSC_COMM_WORLD,ierr)
5400 #undef SETERRQ2
5401 #define SETERRQ2(comm,ierr,b,c,d) CHKERRABORT(comm,ierr)
5402 #undef SETERRQ3
5403 #define SETERRQ3(comm,ierr,b,c,d,e) CHKERRABORT(comm,ierr)
5404 #undef SETERRQ
5405 #define SETERRQ(c,ierr,b) CHKERRABORT(c,ierr)
5406 
5407 #if defined(PETSC_HAVE_FORTRAN_CAPS)
5408 #define matsetvaluesmpiaij_ MATSETVALUESMPIAIJ
5409 #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
5410 #define matsetvaluesmpiaij_ matsetvaluesmpiaij
5411 #else
5412 #endif
5413 PETSC_EXTERN void PETSC_STDCALL matsetvaluesmpiaij_(Mat *mmat,PetscInt *mm,const PetscInt im[],PetscInt *mn,const PetscInt in[],const PetscScalar v[],InsertMode *maddv,PetscErrorCode *_ierr)
5414 {
5415   Mat            mat  = *mmat;
5416   PetscInt       m    = *mm, n = *mn;
5417   InsertMode     addv = *maddv;
5418   Mat_MPIAIJ     *aij = (Mat_MPIAIJ*)mat->data;
5419   PetscScalar    value;
5420   PetscErrorCode ierr;
5421 
5422   MatCheckPreallocated(mat,1);
5423   if (mat->insertmode == NOT_SET_VALUES) mat->insertmode = addv;
5424 
5425 #if defined(PETSC_USE_DEBUG)
5426   else if (mat->insertmode != addv) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"Cannot mix add values and insert values");
5427 #endif
5428   {
5429     PetscInt  i,j,rstart  = mat->rmap->rstart,rend = mat->rmap->rend;
5430     PetscInt  cstart      = mat->cmap->rstart,cend = mat->cmap->rend,row,col;
5431     PetscBool roworiented = aij->roworiented;
5432 
5433     /* Some Variables required in the macro */
5434     Mat        A                 = aij->A;
5435     Mat_SeqAIJ *a                = (Mat_SeqAIJ*)A->data;
5436     PetscInt   *aimax            = a->imax,*ai = a->i,*ailen = a->ilen,*aj = a->j;
5437     MatScalar  *aa               = a->a;
5438     PetscBool  ignorezeroentries = (((a->ignorezeroentries)&&(addv==ADD_VALUES)) ? PETSC_TRUE : PETSC_FALSE);
5439     Mat        B                 = aij->B;
5440     Mat_SeqAIJ *b                = (Mat_SeqAIJ*)B->data;
5441     PetscInt   *bimax            = b->imax,*bi = b->i,*bilen = b->ilen,*bj = b->j,bm = aij->B->rmap->n,am = aij->A->rmap->n;
5442     MatScalar  *ba               = b->a;
5443 
5444     PetscInt  *rp1,*rp2,ii,nrow1,nrow2,_i,rmax1,rmax2,N,low1,high1,low2,high2,t,lastcol1,lastcol2;
5445     PetscInt  nonew = a->nonew;
5446     MatScalar *ap1,*ap2;
5447 
5448     PetscFunctionBegin;
5449     for (i=0; i<m; i++) {
5450       if (im[i] < 0) continue;
5451 #if defined(PETSC_USE_DEBUG)
5452       if (im[i] >= mat->rmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %D max %D",im[i],mat->rmap->N-1);
5453 #endif
5454       if (im[i] >= rstart && im[i] < rend) {
5455         row      = im[i] - rstart;
5456         lastcol1 = -1;
5457         rp1      = aj + ai[row];
5458         ap1      = aa + ai[row];
5459         rmax1    = aimax[row];
5460         nrow1    = ailen[row];
5461         low1     = 0;
5462         high1    = nrow1;
5463         lastcol2 = -1;
5464         rp2      = bj + bi[row];
5465         ap2      = ba + bi[row];
5466         rmax2    = bimax[row];
5467         nrow2    = bilen[row];
5468         low2     = 0;
5469         high2    = nrow2;
5470 
5471         for (j=0; j<n; j++) {
5472           if (roworiented) value = v[i*n+j];
5473           else value = v[i+j*m];
5474           if (in[j] >= cstart && in[j] < cend) {
5475             col = in[j] - cstart;
5476             if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5477             MatSetValues_SeqAIJ_A_Private(row,col,value,addv,im[i],in[j]);
5478           } else if (in[j] < 0) continue;
5479 #if defined(PETSC_USE_DEBUG)
5480           else if (in[j] >= mat->cmap->N) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %D max %D",in[j],mat->cmap->N-1);
5481 #endif
5482           else {
5483             if (mat->was_assembled) {
5484               if (!aij->colmap) {
5485                 ierr = MatCreateColmap_MPIAIJ_Private(mat);CHKERRQ(ierr);
5486               }
5487 #if defined(PETSC_USE_CTABLE)
5488               ierr = PetscTableFind(aij->colmap,in[j]+1,&col);CHKERRQ(ierr);
5489               col--;
5490 #else
5491               col = aij->colmap[in[j]] - 1;
5492 #endif
5493               if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES) && row != col) continue;
5494               if (col < 0 && !((Mat_SeqAIJ*)(aij->A->data))->nonew) {
5495                 ierr = MatDisAssemble_MPIAIJ(mat);CHKERRQ(ierr);
5496                 col  =  in[j];
5497                 /* Reinitialize the variables required by MatSetValues_SeqAIJ_B_Private() */
5498                 B     = aij->B;
5499                 b     = (Mat_SeqAIJ*)B->data;
5500                 bimax = b->imax; bi = b->i; bilen = b->ilen; bj = b->j;
5501                 rp2   = bj + bi[row];
5502                 ap2   = ba + bi[row];
5503                 rmax2 = bimax[row];
5504                 nrow2 = bilen[row];
5505                 low2  = 0;
5506                 high2 = nrow2;
5507                 bm    = aij->B->rmap->n;
5508                 ba    = b->a;
5509               }
5510             } else col = in[j];
5511             MatSetValues_SeqAIJ_B_Private(row,col,value,addv,im[i],in[j]);
5512           }
5513         }
5514       } else if (!aij->donotstash) {
5515         if (roworiented) {
5516           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr);
5517         } else {
5518           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m,(PetscBool)(ignorezeroentries && (addv == ADD_VALUES)));CHKERRQ(ierr);
5519         }
5520       }
5521     }
5522   }
5523   PetscFunctionReturnVoid();
5524 }
5525 
5526