xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision 47a75d0bca7049e25caa60963f5cf59208ef70fa)
1 /*$Id: mpisbaij.c,v 1.36 2000/10/30 18:23:34 hzhang Exp hzhang $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int DisAssemble_MPISBAIJ(Mat);
10 extern int MatIncreaseOverlap_MPISBAIJ(Mat,int,IS *,int);
11 extern int MatGetSubMatrices_MPISBAIJ(Mat,int,IS *,IS *,MatReuse,Mat **);
12 extern int MatGetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *);
13 extern int MatSetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *,InsertMode);
14 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,int*,int,int*,Scalar*,InsertMode);
15 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
16 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
17 extern int MatPrintHelp_SeqSBAIJ(Mat);
18 extern int MatZeroRows_SeqSBAIJ(Mat,IS,Scalar*);
19 extern int MatZeroRows_SeqBAIJ(Mat,IS,Scalar *);
20 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
21 
22 /*  UGLY, ugly, ugly
23    When MatScalar == Scalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
24    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
25    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
26    converts the entries into single precision and then calls ..._MatScalar() to put them
27    into the single precision data structures.
28 */
29 #if defined(PETSC_USE_MAT_SINGLE)
30 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
31 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
32 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
33 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
34 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
35 #else
36 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
37 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
38 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
39 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
40 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
41 #endif
42 
43 EXTERN_C_BEGIN
44 #undef __FUNC__
45 #define __FUNC__ /*<a name="MatStoreValues_MPISBAIJ"></a>*/"MatStoreValues_MPISBAIJ"
46 int MatStoreValues_MPISBAIJ(Mat mat)
47 {
48   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
49   int         ierr;
50 
51   PetscFunctionBegin;
52   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
53   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
54   PetscFunctionReturn(0);
55 }
56 EXTERN_C_END
57 
58 EXTERN_C_BEGIN
59 #undef __FUNC__
60 #define __FUNC__ /*<a name="MatRetrieveValues_MPISBAIJ"></a>*/"MatRetrieveValues_MPISBAIJ"
61 int MatRetrieveValues_MPISBAIJ(Mat mat)
62 {
63   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
64   int         ierr;
65 
66   PetscFunctionBegin;
67   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
68   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 EXTERN_C_END
72 
73 /*
74      Local utility routine that creates a mapping from the global column
75    number to the local number in the off-diagonal part of the local
76    storage of the matrix.  This is done in a non scable way since the
77    length of colmap equals the global matrix length.
78 */
79 #undef __FUNC__
80 #define __FUNC__ /*<a name="CreateColmap_MPISBAIJ_Private"></a>*/"CreateColmap_MPISBAIJ_Private"
81 static int CreateColmap_MPISBAIJ_Private(Mat mat)
82 {
83   PetscFunctionBegin;
84   SETERRQ(1,"Function not yet written for SBAIJ format");
85   /* PetscFunctionReturn(0); */
86 }
87 
88 #define CHUNKSIZE  10
89 
90 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
91 { \
92  \
93     brow = row/bs;  \
94     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
95     rmax = aimax[brow]; nrow = ailen[brow]; \
96       bcol = col/bs; \
97       ridx = row % bs; cidx = col % bs; \
98       low = 0; high = nrow; \
99       while (high-low > 3) { \
100         t = (low+high)/2; \
101         if (rp[t] > bcol) high = t; \
102         else              low  = t; \
103       } \
104       for (_i=low; _i<high; _i++) { \
105         if (rp[_i] > bcol) break; \
106         if (rp[_i] == bcol) { \
107           bap  = ap +  bs2*_i + bs*cidx + ridx; \
108           if (addv == ADD_VALUES) *bap += value;  \
109           else                    *bap  = value;  \
110           goto a_noinsert; \
111         } \
112       } \
113       if (a->nonew == 1) goto a_noinsert; \
114       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
115       if (nrow >= rmax) { \
116         /* there is no extra room in row, therefore enlarge */ \
117         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
118         MatScalar *new_a; \
119  \
120         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
121  \
122         /* malloc new storage space */ \
123         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
124         new_a   = (MatScalar*)PetscMalloc(len);CHKPTRQ(new_a); \
125         new_j   = (int*)(new_a + bs2*new_nz); \
126         new_i   = new_j + new_nz; \
127  \
128         /* copy over old data into new slots */ \
129         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
130         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
131         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
132         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
133         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
134         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
135         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(Scalar));CHKERRQ(ierr); \
136         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
137                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
138         /* free up old matrix storage */ \
139         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
140         if (!a->singlemalloc) { \
141           ierr = PetscFree(a->i);CHKERRQ(ierr); \
142           ierr = PetscFree(a->j);CHKERRQ(ierr);\
143         } \
144         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
145         a->singlemalloc = PETSC_TRUE; \
146  \
147         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
148         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
149         PLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
150         a->s_maxnz += bs2*CHUNKSIZE; \
151         a->reallocs++; \
152         a->s_nz++; \
153       } \
154       N = nrow++ - 1;  \
155       /* shift up all the later entries in this row */ \
156       for (ii=N; ii>=_i; ii--) { \
157         rp[ii+1] = rp[ii]; \
158         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
159       } \
160       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
161       rp[_i]                      = bcol;  \
162       ap[bs2*_i + bs*cidx + ridx] = value;  \
163       a_noinsert:; \
164     ailen[brow] = nrow; \
165 }
166 #ifndef MatSetValues_SeqBAIJ_B_Private
167 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
168 { \
169     brow = row/bs;  \
170     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
171     rmax = bimax[brow]; nrow = bilen[brow]; \
172       bcol = col/bs; \
173       ridx = row % bs; cidx = col % bs; \
174       low = 0; high = nrow; \
175       while (high-low > 3) { \
176         t = (low+high)/2; \
177         if (rp[t] > bcol) high = t; \
178         else              low  = t; \
179       } \
180       for (_i=low; _i<high; _i++) { \
181         if (rp[_i] > bcol) break; \
182         if (rp[_i] == bcol) { \
183           bap  = ap +  bs2*_i + bs*cidx + ridx; \
184           if (addv == ADD_VALUES) *bap += value;  \
185           else                    *bap  = value;  \
186           goto b_noinsert; \
187         } \
188       } \
189       if (b->nonew == 1) goto b_noinsert; \
190       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
191       if (nrow >= rmax) { \
192         /* there is no extra room in row, therefore enlarge */ \
193         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
194         MatScalar *new_a; \
195  \
196         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
197  \
198         /* malloc new storage space */ \
199         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
200         new_a   = (MatScalar*)PetscMalloc(len);CHKPTRQ(new_a); \
201         new_j   = (int*)(new_a + bs2*new_nz); \
202         new_i   = new_j + new_nz; \
203  \
204         /* copy over old data into new slots */ \
205         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
206         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
207         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
208         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
209         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
210         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
211         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
213                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
214         /* free up old matrix storage */ \
215         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
216         if (!b->singlemalloc) { \
217           ierr = PetscFree(b->i);CHKERRQ(ierr); \
218           ierr = PetscFree(b->j);CHKERRQ(ierr); \
219         } \
220         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
221         b->singlemalloc = PETSC_TRUE; \
222  \
223         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
224         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
225         PLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
226         b->maxnz += bs2*CHUNKSIZE; \
227         b->reallocs++; \
228         b->nz++; \
229       } \
230       N = nrow++ - 1;  \
231       /* shift up all the later entries in this row */ \
232       for (ii=N; ii>=_i; ii--) { \
233         rp[ii+1] = rp[ii]; \
234         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
235       } \
236       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
237       rp[_i]                      = bcol;  \
238       ap[bs2*_i + bs*cidx + ridx] = value;  \
239       b_noinsert:; \
240     bilen[brow] = nrow; \
241 }
242 #endif
243 
244 #if defined(PETSC_USE_MAT_SINGLE)
245 #undef __FUNC__
246 #define __FUNC__ /*<a name="MatSetValues_MPISBAIJ"></a>*/"MatSetValues_MPISBAIJ"
247 int MatSetValues_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
248 {
249   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
250   int         ierr,i,N = m*n;
251   MatScalar   *vsingle;
252 
253   PetscFunctionBegin;
254   if (N > b->setvalueslen) {
255     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
256     b->setvaluescopy = (MatScalar*)PetscMalloc(N*sizeof(MatScalar));CHKPTRQ(b->setvaluescopy);
257     b->setvalueslen  = N;
258   }
259   vsingle = b->setvaluescopy;
260 
261   for (i=0; i<N; i++) {
262     vsingle[i] = v[i];
263   }
264   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 
268 #undef __FUNC__
269 #define __FUNC__ /*<a name="MatSetValuesBlocked_MPISBAIJ"></a>*/"MatSetValuesBlocked_MPISBAIJ"
270 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
271 {
272   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
273   int         ierr,i,N = m*n*b->bs2;
274   MatScalar   *vsingle;
275 
276   PetscFunctionBegin;
277   if (N > b->setvalueslen) {
278     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
279     b->setvaluescopy = (MatScalar*)PetscMalloc(N*sizeof(MatScalar));CHKPTRQ(b->setvaluescopy);
280     b->setvalueslen  = N;
281   }
282   vsingle = b->setvaluescopy;
283   for (i=0; i<N; i++) {
284     vsingle[i] = v[i];
285   }
286   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNC__
291 #define __FUNC__ /*<a name="MatSetValues_MPISBAIJ_HT"></a>*/"MatSetValues_MPISBAIJ_HT"
292 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
293 {
294   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
295   int         ierr,i,N = m*n;
296   MatScalar   *vsingle;
297 
298   PetscFunctionBegin;
299   SETERRQ(1,"Function not yet written for SBAIJ format");
300   /* PetscFunctionReturn(0); */
301 }
302 
303 #undef __FUNC__
304 #define __FUNC__ /*<a name="MatSetValuesBlocked_MPISBAIJ_HT"></a>*/"MatSetValuesBlocked_MPISBAIJ_HT"
305 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
306 {
307   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
308   int         ierr,i,N = m*n*b->bs2;
309   MatScalar   *vsingle;
310 
311   PetscFunctionBegin;
312   SETERRQ(1,"Function not yet written for SBAIJ format");
313   /* PetscFunctionReturn(0); */
314 }
315 #endif
316 
317 /* Only add/insert a(i,j) with i<=j (blocks).
318    Any a(i,j) with i>j input by user is ingored.
319 */
320 #undef __FUNC__
321 #define __FUNC__ /*<a name="MatSetValues_MPIBAIJ"></a>*/"MatSetValues_MPIBAIJ"
322 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
323 {
324   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
325   MatScalar    value;
326   PetscTruth   roworiented = baij->roworiented;
327   int          ierr,i,j,row,col;
328   int          rstart_orig=baij->rstart_bs;
329   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
330   int          cend_orig=baij->cend_bs,bs=baij->bs;
331 
332   /* Some Variables required in the macro */
333   Mat          A = baij->A;
334   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
335   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
336   MatScalar    *aa=a->a;
337 
338   Mat          B = baij->B;
339   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
340   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
341   MatScalar    *ba=b->a;
342 
343   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
344   int          low,high,t,ridx,cidx,bs2=a->bs2;
345   MatScalar    *ap,*bap;
346 
347   /* for stash */
348   int          n_loc, *in_loc=0;
349   MatScalar    *v_loc=0;
350 
351   PetscFunctionBegin;
352 
353   if(!baij->donotstash){
354     in_loc = (int*)PetscMalloc(n*sizeof(int));CHKPTRQ(in_loc);
355     v_loc  = (MatScalar*)PetscMalloc(n*sizeof(MatScalar));CHKPTRQ(v_loc);
356   }
357 
358   for (i=0; i<m; i++) {
359     if (im[i] < 0) continue;
360 #if defined(PETSC_USE_BOPT_g)
361     if (im[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
362 #endif
363     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
364       row = im[i] - rstart_orig;              /* local row index */
365       for (j=0; j<n; j++) {
366         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
367         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
368           col = in[j] - cstart_orig;          /* local col index */
369           brow = row/bs; bcol = col/bs;
370           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
371           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
372           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
373           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
374         } else if (in[j] < 0) continue;
375 #if defined(PETSC_USE_BOPT_g)
376         else if (in[j] >= mat->N) {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Col too large");}
377 #endif
378         else {  /* off-diag entry (B) */
379           if (mat->was_assembled) {
380             if (!baij->colmap) {
381               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
382             }
383 #if defined (PETSC_USE_CTABLE)
384             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
385             col  = col - 1 + in[j]%bs;
386 #else
387             col = baij->colmap[in[j]/bs] - 1 + in[j]%bs;
388 #endif
389             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
390               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
391               col =  in[j];
392               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
393               B = baij->B;
394               b = (Mat_SeqBAIJ*)(B)->data;
395               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
396               ba=b->a;
397             }
398           } else col = in[j];
399           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
400           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
401           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
402         }
403       }
404     } else {  /* off processor entry */
405       if (!baij->donotstash) {
406         n_loc = 0;
407         for (j=0; j<n; j++){
408           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
409           in_loc[n_loc] = in[j];
410           if (roworiented) {
411             v_loc[n_loc] = v[i*n+j];
412           } else {
413             v_loc[n_loc] = v[j*m+i];
414           }
415           n_loc++;
416         }
417         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
418       }
419     }
420   }
421 
422   if(!baij->donotstash){
423     ierr = PetscFree(in_loc);CHKERRQ(ierr);
424     ierr = PetscFree(v_loc);CHKERRQ(ierr);
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 #undef __FUNC__
430 #define __FUNC__ /*<a name="MatSetValuesBlocked_MPISBAIJ"></a>*/"MatSetValuesBlocked_MPISBAIJ"
431 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
432 {
433   PetscFunctionBegin;
434   SETERRQ(1,"Function not yet written for SBAIJ format");
435   /* PetscFunctionReturn(0); */
436 }
437 
438 #define HASH_KEY 0.6180339887
439 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
440 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
441 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
442 #undef __FUNC__
443 #define __FUNC__ /*<a name="MatSetValues_MPISBAIJ_HT_MatScalar"></a>*/"MatSetValues_MPISBAIJ_HT_MatScalar"
444 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
445 {
446   PetscFunctionBegin;
447   SETERRQ(1,"Function not yet written for SBAIJ format");
448   /* PetscFunctionReturn(0); */
449 }
450 
451 #undef __FUNC__
452 #define __FUNC__ /*<a name=""></a>*/"MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
453 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
454 {
455   PetscFunctionBegin;
456   SETERRQ(1,"Function not yet written for SBAIJ format");
457   /* PetscFunctionReturn(0); */
458 }
459 
460 #undef __FUNC__
461 #define __FUNC__ /*<a name=""></a>*/"MatGetValues_MPISBAIJ"
462 int MatGetValues_MPISBAIJ(Mat mat,int m,int *idxm,int n,int *idxn,Scalar *v)
463 {
464   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
465   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
466   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
467 
468   PetscFunctionBegin;
469   for (i=0; i<m; i++) {
470     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
471     if (idxm[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
472     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
473       row = idxm[i] - bsrstart;
474       for (j=0; j<n; j++) {
475         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
476         if (idxn[j] >= mat->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
477         if (idxn[j] >= bscstart && idxn[j] < bscend){
478           col = idxn[j] - bscstart;
479           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
480         } else {
481           if (!baij->colmap) {
482             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
483           }
484 #if defined (PETSC_USE_CTABLE)
485           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
486           data --;
487 #else
488           data = baij->colmap[idxn[j]/bs]-1;
489 #endif
490           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
491           else {
492             col  = data + idxn[j]%bs;
493             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
494           }
495         }
496       }
497     } else {
498       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
499     }
500   }
501  PetscFunctionReturn(0);
502 }
503 
504 #undef __FUNC__
505 #define __FUNC__ /*<a name=""></a>*/"MatNorm_MPISBAIJ"
506 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
507 {
508   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
509   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
510   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
511   int        ierr;
512   PetscReal  sum[2],*lnorm2;
513 
514   PetscFunctionBegin;
515   if (baij->size == 1) {
516     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
517   } else {
518     if (type == NORM_FROBENIUS) {
519       lnorm2 = (double*)PetscMalloc(2*sizeof(double));CHKPTRQ(lnorm2);
520       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
521       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
522       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
523       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
524       /*
525       ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
526       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
527       */
528       ierr = MPI_Allreduce(lnorm2,&sum,2,MPI_DOUBLE,MPI_SUM,mat->comm);CHKERRQ(ierr);
529       /*
530       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
531       PetscSynchronizedFlush(PETSC_COMM_WORLD); */
532 
533       *norm = sqrt(sum[0] + 2*sum[1]);
534       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
535     } else {
536       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
537     }
538   }
539   PetscFunctionReturn(0);
540 }
541 
542 /*
543   Creates the hash table, and sets the table
544   This table is created only once.
545   If new entried need to be added to the matrix
546   then the hash table has to be destroyed and
547   recreated.
548 */
549 #undef __FUNC__
550 #define __FUNC__ /*<a name=""></a>*/"MatCreateHashTable_MPISBAIJ_Private"
551 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
552 {
553   PetscFunctionBegin;
554   SETERRQ(1,"Function not yet written for SBAIJ format");
555   /* PetscFunctionReturn(0); */
556 }
557 
558 #undef __FUNC__
559 #define __FUNC__ /*<a name=""></a>*/"MatAssemblyBegin_MPISBAIJ"
560 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
561 {
562   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
563   int         ierr,nstash,reallocs;
564   InsertMode  addv;
565 
566   PetscFunctionBegin;
567   if (baij->donotstash) {
568     PetscFunctionReturn(0);
569   }
570 
571   /* make sure all processors are either in INSERTMODE or ADDMODE */
572   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
573   if (addv == (ADD_VALUES|INSERT_VALUES)) {
574     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
575   }
576   mat->insertmode = addv; /* in case this processor had no cache */
577 
578   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
579   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
580   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
581   PLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
582   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
583   PLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
584   PetscFunctionReturn(0);
585 }
586 
587 #undef __FUNC__
588 #define __FUNC__ /*<a name=""></a>*/"MatAssemblyEnd_MPISBAIJ"
589 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
590 {
591   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
592   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
593   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
594   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
595   int         *row,*col,other_disassembled;
596   PetscTruth  r1,r2,r3;
597   MatScalar   *val;
598   InsertMode  addv = mat->insertmode;
599   int         rank;
600 
601   PetscFunctionBegin;
602   /* remove 2 line below later */
603   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
604 
605   if (!baij->donotstash) {
606     while (1) {
607       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
608       /*
609       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
610       PetscSynchronizedFlush(PETSC_COMM_WORLD);
611       */
612       if (!flg) break;
613 
614       for (i=0; i<n;) {
615         /* Now identify the consecutive vals belonging to the same row */
616         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
617         if (j < n) ncols = j-i;
618         else       ncols = n-i;
619         /* Now assemble all these values with a single function call */
620         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
621         i = j;
622       }
623     }
624     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
625     /* Now process the block-stash. Since the values are stashed column-oriented,
626        set the roworiented flag to column oriented, and after MatSetValues()
627        restore the original flags */
628     r1 = baij->roworiented;
629     r2 = a->roworiented;
630     r3 = b->roworiented;
631     baij->roworiented = PETSC_FALSE;
632     a->roworiented    = PETSC_FALSE;
633     b->roworiented    = PETSC_FALSE;
634     while (1) {
635       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
636       if (!flg) break;
637 
638       for (i=0; i<n;) {
639         /* Now identify the consecutive vals belonging to the same row */
640         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
641         if (j < n) ncols = j-i;
642         else       ncols = n-i;
643         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
644         i = j;
645       }
646     }
647     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
648     baij->roworiented = r1;
649     a->roworiented    = r2;
650     b->roworiented    = r3;
651   }
652 
653   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
654   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
655 
656   /* determine if any processor has disassembled, if so we must
657      also disassemble ourselfs, in order that we may reassemble. */
658   /*
659      if nonzero structure of submatrix B cannot change then we know that
660      no processor disassembled thus we can skip this stuff
661   */
662   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
663     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
664     if (mat->was_assembled && !other_disassembled) {
665       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
666     }
667   }
668 
669   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
670     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr);
671   }
672   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
673   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
674 
675 #if defined(PETSC_USE_BOPT_g)
676   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
677     PLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((double)baij->ht_total_ct)/baij->ht_insert_ct);
678     baij->ht_total_ct  = 0;
679     baij->ht_insert_ct = 0;
680   }
681 #endif
682   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
683     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
684     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
685     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
686   }
687 
688   if (baij->rowvalues) {
689     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
690     baij->rowvalues = 0;
691   }
692   PetscFunctionReturn(0);
693 }
694 
695 #undef __FUNC__
696 #define __FUNC__ /*<a name=""></a>*/"MatView_MPISBAIJ_ASCIIorDraworSocket"
697 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,Viewer viewer)
698 {
699   Mat_MPISBAIJ  *baij = (Mat_MPISBAIJ*)mat->data;
700   int          ierr,format,bs = baij->bs,size = baij->size,rank = baij->rank;
701   PetscTruth   isascii,isdraw;
702   Viewer       sviewer;
703 
704   PetscFunctionBegin;
705   ierr = PetscTypeCompare((PetscObject)viewer,ASCII_VIEWER,&isascii);CHKERRQ(ierr);
706   ierr = PetscTypeCompare((PetscObject)viewer,DRAW_VIEWER,&isdraw);CHKERRQ(ierr);
707   if (isascii) {
708     ierr = ViewerGetFormat(viewer,&format);CHKERRQ(ierr);
709     if (format == VIEWER_FORMAT_ASCII_INFO_LONG) {
710       MatInfo info;
711       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
712       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
713       ierr = ViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
714               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
715               baij->bs,(int)info.memory);CHKERRQ(ierr);
716       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
717       ierr = ViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
718       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
719       ierr = ViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
720       ierr = ViewerFlush(viewer);CHKERRQ(ierr);
721       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
722       PetscFunctionReturn(0);
723     } else if (format == VIEWER_FORMAT_ASCII_INFO) {
724       ierr = ViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
725       PetscFunctionReturn(0);
726     }
727   }
728 
729   if (isdraw) {
730     Draw       draw;
731     PetscTruth isnull;
732     ierr = ViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
733     ierr = DrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
734   }
735 
736   if (size == 1) {
737     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
738   } else {
739     /* assemble the entire matrix onto first processor. */
740     Mat         A;
741     Mat_SeqSBAIJ *Aloc;
742     Mat_SeqBAIJ *Bloc;
743     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
744     MatScalar   *a;
745 
746     if (!rank) {
747       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
748     } else {
749       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
750     }
751     PLogObjectParent(mat,A);
752 
753     /* copy over the A part */
754     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
755     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
756     rvals = (int*)PetscMalloc(bs*sizeof(int));CHKPTRQ(rvals);
757 
758     for (i=0; i<mbs; i++) {
759       rvals[0] = bs*(baij->rstart + i);
760       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
761       for (j=ai[i]; j<ai[i+1]; j++) {
762         col = (baij->cstart+aj[j])*bs;
763         for (k=0; k<bs; k++) {
764           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
765           col++; a += bs;
766         }
767       }
768     }
769     /* copy over the B part */
770     Bloc = (Mat_SeqBAIJ*)baij->B->data;
771     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
772     for (i=0; i<mbs; i++) {
773       rvals[0] = bs*(baij->rstart + i);
774       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
775       for (j=ai[i]; j<ai[i+1]; j++) {
776         col = baij->garray[aj[j]]*bs;
777         for (k=0; k<bs; k++) {
778           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
779           col++; a += bs;
780         }
781       }
782     }
783     ierr = PetscFree(rvals);CHKERRQ(ierr);
784     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
785     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
786     /*
787        Everyone has to call to draw the matrix since the graphics waits are
788        synchronized across all processors that share the Draw object
789     */
790     ierr = ViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
791     if (!rank) {
792       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
793     }
794     ierr = ViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
795     ierr = MatDestroy(A);CHKERRQ(ierr);
796   }
797   PetscFunctionReturn(0);
798 }
799 
800 #undef __FUNC__
801 #define __FUNC__ /*<a name=""></a>*/"MatView_MPISBAIJ"
802 int MatView_MPISBAIJ(Mat mat,Viewer viewer)
803 {
804   int        ierr;
805   PetscTruth isascii,isdraw,issocket,isbinary;
806 
807   PetscFunctionBegin;
808   ierr = PetscTypeCompare((PetscObject)viewer,ASCII_VIEWER,&isascii);CHKERRQ(ierr);
809   ierr = PetscTypeCompare((PetscObject)viewer,DRAW_VIEWER,&isdraw);CHKERRQ(ierr);
810   ierr = PetscTypeCompare((PetscObject)viewer,SOCKET_VIEWER,&issocket);CHKERRQ(ierr);
811   ierr = PetscTypeCompare((PetscObject)viewer,BINARY_VIEWER,&isbinary);CHKERRQ(ierr);
812   if (isascii || isdraw || issocket || isbinary) {
813     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
814   } else {
815     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
816   }
817   PetscFunctionReturn(0);
818 }
819 
820 #undef __FUNC__
821 #define __FUNC__ /*<a name=""></a>*/"MatDestroy_MPISBAIJ"
822 int MatDestroy_MPISBAIJ(Mat mat)
823 {
824   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
825   int         ierr;
826 
827   PetscFunctionBegin;
828 #if defined(PETSC_USE_LOG)
829   PLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
830 #endif
831   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
832   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
833   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
834   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
835   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
836 #if defined (PETSC_USE_CTABLE)
837   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
838 #else
839   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
840 #endif
841   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
842   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
843   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
844   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
845   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
846   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
847 #if defined(PETSC_USE_MAT_SINGLE)
848   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
849 #endif
850   ierr = PetscFree(baij);CHKERRQ(ierr);
851   PetscFunctionReturn(0);
852 }
853 
854 #undef __FUNC__
855 #define __FUNC__ /*<a name=""></a>*/"MatMult_MPISBAIJ"
856 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
857 {
858   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
859   int         ierr,nt;
860 
861   PetscFunctionBegin;
862   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
863   if (nt != A->n) {
864     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
865   }
866   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
867   if (nt != A->m) {
868     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
869   }
870 
871   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
872   /* do diagonal part */
873   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
874   /* do supperdiagonal part */
875   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
876   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
877   /* do subdiagonal part */
878   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
879   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
880   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
881 
882   PetscFunctionReturn(0);
883 }
884 
885 #undef __FUNC__
886 #define __FUNC__ /*<a name=""></a>*/"MatMultAdd_MPISBAIJ"
887 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
888 {
889   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
890   int        ierr;
891 
892   PetscFunctionBegin;
893   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
894   /* do diagonal part */
895   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
896   /* do supperdiagonal part */
897   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
898   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
899 
900   /* do subdiagonal part */
901   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
902   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
903   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
904 
905   PetscFunctionReturn(0);
906 }
907 
908 #undef __FUNC__
909 #define __FUNC__ /*<a name=""></a>*/"MatMultTranspose_MPISBAIJ"
910 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
911 {
912   PetscFunctionBegin;
913   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
914   /* PetscFunctionReturn(0); */
915 }
916 
917 #undef __FUNC__
918 #define __FUNC__ /*<a name=""></a>*/"MatMultTransposeAdd_MPISBAIJ"
919 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
920 {
921   PetscFunctionBegin;
922   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
923   /* PetscFunctionReturn(0); */
924 }
925 
926 /*
927   This only works correctly for square matrices where the subblock A->A is the
928    diagonal block
929 */
930 #undef __FUNC__
931 #define __FUNC__ /*<a name=""></a>*/"MatGetDiagonal_MPISBAIJ"
932 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
933 {
934   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
935   int         ierr;
936 
937   PetscFunctionBegin;
938   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
939   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
940   PetscFunctionReturn(0);
941 }
942 
943 #undef __FUNC__
944 #define __FUNC__ /*<a name=""></a>*/"MatScale_MPISBAIJ"
945 int MatScale_MPISBAIJ(Scalar *aa,Mat A)
946 {
947   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
948   int         ierr;
949 
950   PetscFunctionBegin;
951   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
952   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
953   PetscFunctionReturn(0);
954 }
955 
956 #undef __FUNC__
957 #define __FUNC__ /*<a name=""></a>*/"MatGetOwnershipRange_MPISBAIJ"
958 int MatGetOwnershipRange_MPISBAIJ(Mat matin,int *m,int *n)
959 {
960   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
961 
962   PetscFunctionBegin;
963   if (m) *m = mat->rstart*mat->bs;
964   if (n) *n = mat->rend*mat->bs;
965   PetscFunctionReturn(0);
966 }
967 
968 #undef __FUNC__
969 #define __FUNC__ /*<a name=""></a>*/"MatGetRow_MPISBAIJ"
970 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,Scalar **v)
971 {
972   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
973   Scalar     *vworkA,*vworkB,**pvA,**pvB,*v_p;
974   int        bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
975   int        nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
976   int        *cmap,*idx_p,cstart = mat->cstart;
977 
978   PetscFunctionBegin;
979   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
980   mat->getrowactive = PETSC_TRUE;
981 
982   if (!mat->rowvalues && (idx || v)) {
983     /*
984         allocate enough space to hold information from the longest row.
985     */
986     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
987     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
988     int     max = 1,mbs = mat->mbs,tmp;
989     for (i=0; i<mbs; i++) {
990       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
991       if (max < tmp) { max = tmp; }
992     }
993     mat->rowvalues = (Scalar*)PetscMalloc(max*bs2*(sizeof(int)+sizeof(Scalar)));CHKPTRQ(mat->rowvalues);
994     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
995   }
996 
997   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
998   lrow = row - brstart;  /* local row index */
999 
1000   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1001   if (!v)   {pvA = 0; pvB = 0;}
1002   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1003   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1004   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1005   nztot = nzA + nzB;
1006 
1007   cmap  = mat->garray;
1008   if (v  || idx) {
1009     if (nztot) {
1010       /* Sort by increasing column numbers, assuming A and B already sorted */
1011       int imark = -1;
1012       if (v) {
1013         *v = v_p = mat->rowvalues;
1014         for (i=0; i<nzB; i++) {
1015           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1016           else break;
1017         }
1018         imark = i;
1019         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1020         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1021       }
1022       if (idx) {
1023         *idx = idx_p = mat->rowindices;
1024         if (imark > -1) {
1025           for (i=0; i<imark; i++) {
1026             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1027           }
1028         } else {
1029           for (i=0; i<nzB; i++) {
1030             if (cmap[cworkB[i]/bs] < cstart)
1031               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1032             else break;
1033           }
1034           imark = i;
1035         }
1036         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1037         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1038       }
1039     } else {
1040       if (idx) *idx = 0;
1041       if (v)   *v   = 0;
1042     }
1043   }
1044   *nz = nztot;
1045   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1046   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1047   PetscFunctionReturn(0);
1048 }
1049 
1050 #undef __FUNC__
1051 #define __FUNC__ /*<a name=""></a>*/"MatRestoreRow_MPISBAIJ"
1052 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,Scalar **v)
1053 {
1054   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1055 
1056   PetscFunctionBegin;
1057   if (baij->getrowactive == PETSC_FALSE) {
1058     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1059   }
1060   baij->getrowactive = PETSC_FALSE;
1061   PetscFunctionReturn(0);
1062 }
1063 
1064 #undef __FUNC__
1065 #define __FUNC__ /*<a name=""></a>*/"MatGetBlockSize_MPISBAIJ"
1066 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1067 {
1068   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1069 
1070   PetscFunctionBegin;
1071   *bs = baij->bs;
1072   PetscFunctionReturn(0);
1073 }
1074 
1075 #undef __FUNC__
1076 #define __FUNC__ /*<a name=""></a>*/"MatZeroEntries_MPISBAIJ"
1077 int MatZeroEntries_MPISBAIJ(Mat A)
1078 {
1079   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1080   int         ierr;
1081 
1082   PetscFunctionBegin;
1083   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1084   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1085   PetscFunctionReturn(0);
1086 }
1087 
1088 #undef __FUNC__
1089 #define __FUNC__ /*<a name=""></a>*/"MatGetInfo_MPISBAIJ"
1090 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1091 {
1092   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1093   Mat         A = a->A,B = a->B;
1094   int         ierr;
1095   PetscReal   isend[5],irecv[5];
1096 
1097   PetscFunctionBegin;
1098   info->block_size     = (double)a->bs;
1099   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1100   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1101   isend[3] = info->memory;  isend[4] = info->mallocs;
1102   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1103   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1104   isend[3] += info->memory;  isend[4] += info->mallocs;
1105   if (flag == MAT_LOCAL) {
1106     info->nz_used      = isend[0];
1107     info->nz_allocated = isend[1];
1108     info->nz_unneeded  = isend[2];
1109     info->memory       = isend[3];
1110     info->mallocs      = isend[4];
1111   } else if (flag == MAT_GLOBAL_MAX) {
1112     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_MAX,matin->comm);CHKERRQ(ierr);
1113     info->nz_used      = irecv[0];
1114     info->nz_allocated = irecv[1];
1115     info->nz_unneeded  = irecv[2];
1116     info->memory       = irecv[3];
1117     info->mallocs      = irecv[4];
1118   } else if (flag == MAT_GLOBAL_SUM) {
1119     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_SUM,matin->comm);CHKERRQ(ierr);
1120     info->nz_used      = irecv[0];
1121     info->nz_allocated = irecv[1];
1122     info->nz_unneeded  = irecv[2];
1123     info->memory       = irecv[3];
1124     info->mallocs      = irecv[4];
1125   } else {
1126     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1127   }
1128   info->rows_global       = (double)A->M;
1129   info->columns_global    = (double)A->N;
1130   info->rows_local        = (double)A->m;
1131   info->columns_local     = (double)A->N;
1132   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1133   info->fill_ratio_needed = 0;
1134   info->factor_mallocs    = 0;
1135   PetscFunctionReturn(0);
1136 }
1137 
1138 #undef __FUNC__
1139 #define __FUNC__ /*<a name=""></a>*/"MatSetOption_MPISBAIJ"
1140 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1141 {
1142   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1143   int         ierr;
1144 
1145   PetscFunctionBegin;
1146   if (op == MAT_NO_NEW_NONZERO_LOCATIONS ||
1147       op == MAT_YES_NEW_NONZERO_LOCATIONS ||
1148       op == MAT_COLUMNS_UNSORTED ||
1149       op == MAT_COLUMNS_SORTED ||
1150       op == MAT_NEW_NONZERO_ALLOCATION_ERR ||
1151       op == MAT_KEEP_ZEROED_ROWS ||
1152       op == MAT_NEW_NONZERO_LOCATION_ERR) {
1153         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1154         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1155   } else if (op == MAT_ROW_ORIENTED) {
1156         a->roworiented = PETSC_TRUE;
1157         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1158         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1159   } else if (op == MAT_ROWS_SORTED ||
1160              op == MAT_ROWS_UNSORTED ||
1161              op == MAT_SYMMETRIC ||
1162              op == MAT_STRUCTURALLY_SYMMETRIC ||
1163              op == MAT_YES_NEW_DIAGONALS ||
1164              op == MAT_USE_HASH_TABLE) {
1165     PLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1166   } else if (op == MAT_COLUMN_ORIENTED) {
1167     a->roworiented = PETSC_FALSE;
1168     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1169     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1170   } else if (op == MAT_IGNORE_OFF_PROC_ENTRIES) {
1171     a->donotstash = PETSC_TRUE;
1172   } else if (op == MAT_NO_NEW_DIAGONALS) {
1173     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1174   } else if (op == MAT_USE_HASH_TABLE) {
1175     a->ht_flag = PETSC_TRUE;
1176   } else {
1177     SETERRQ(PETSC_ERR_SUP,"unknown option");
1178   }
1179   PetscFunctionReturn(0);
1180 }
1181 
1182 #undef __FUNC__
1183 #define __FUNC__ /*<a name=""></a>*/"MatTranspose_MPISBAIJ("
1184 int MatTranspose_MPISBAIJ(Mat A,Mat *matout)
1185 {
1186   PetscFunctionBegin;
1187   SETERRQ(1,"Matrix is symmetric. MatTranspose() should not be called");
1188   /* PetscFunctionReturn(0); */
1189 }
1190 
1191 #undef __FUNC__
1192 #define __FUNC__ /*<a name=""></a>*/"MatDiagonalScale_MPISBAIJ"
1193 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1194 {
1195   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1196   Mat         a = baij->A,b = baij->B;
1197   int         ierr,s1,s2,s3;
1198 
1199   PetscFunctionBegin;
1200   if (ll != rr) {
1201     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1202   }
1203   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1204   if (rr) {
1205     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1206     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1207     /* Overlap communication with computation. */
1208     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1209     /*} if (ll) { */
1210     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1211     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1212     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1213     /* } */
1214   /* scale  the diagonal block */
1215   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1216 
1217   /* if (rr) { */
1218     /* Do a scatter end and then right scale the off-diagonal block */
1219     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1220     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1221   }
1222 
1223   PetscFunctionReturn(0);
1224 }
1225 
1226 #undef __FUNC__
1227 #define __FUNC__ /*<a name=""></a>*/"MatZeroRows_MPISBAIJ"
1228 int MatZeroRows_MPISBAIJ(Mat A,IS is,Scalar *diag)
1229 {
1230   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;
1231   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1232   int            *procs,*nprocs,j,found,idx,nsends,*work,row;
1233   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1234   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1235   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1236   MPI_Comm       comm = A->comm;
1237   MPI_Request    *send_waits,*recv_waits;
1238   MPI_Status     recv_status,*send_status;
1239   IS             istmp;
1240 
1241   PetscFunctionBegin;
1242   ierr = ISGetSize(is,&N);CHKERRQ(ierr);
1243   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1244 
1245   /*  first count number of contributors to each processor */
1246   nprocs = (int*)PetscMalloc(2*size*sizeof(int));CHKPTRQ(nprocs);
1247   ierr   = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1248   procs  = nprocs + size;
1249   owner  = (int*)PetscMalloc((N+1)*sizeof(int));CHKPTRQ(owner); /* see note*/
1250   for (i=0; i<N; i++) {
1251     idx   = rows[i];
1252     found = 0;
1253     for (j=0; j<size; j++) {
1254       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1255         nprocs[j]++; procs[j] = 1; owner[i] = j; found = 1; break;
1256       }
1257     }
1258     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1259   }
1260   nsends = 0;  for (i=0; i<size; i++) { nsends += procs[i];}
1261 
1262   /* inform other processors of number of messages and max length*/
1263   work   = (int*)PetscMalloc(2*size*sizeof(int));CHKPTRQ(work);
1264   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
1265   nmax   = work[rank];
1266   nrecvs = work[size+rank];
1267   ierr = PetscFree(work);CHKERRQ(ierr);
1268 
1269   /* post receives:   */
1270   rvalues = (int*)PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int));CHKPTRQ(rvalues);
1271   recv_waits = (MPI_Request*)PetscMalloc((nrecvs+1)*sizeof(MPI_Request));CHKPTRQ(recv_waits);
1272   for (i=0; i<nrecvs; i++) {
1273     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1274   }
1275 
1276   /* do sends:
1277      1) starts[i] gives the starting index in svalues for stuff going to
1278      the ith processor
1279   */
1280   svalues    = (int*)PetscMalloc((N+1)*sizeof(int));CHKPTRQ(svalues);
1281   send_waits = (MPI_Request*)PetscMalloc((nsends+1)*sizeof(MPI_Request));CHKPTRQ(send_waits);
1282   starts     = (int*)PetscMalloc((size+1)*sizeof(int));CHKPTRQ(starts);
1283   starts[0]  = 0;
1284   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1285   for (i=0; i<N; i++) {
1286     svalues[starts[owner[i]]++] = rows[i];
1287   }
1288   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1289 
1290   starts[0] = 0;
1291   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1292   count = 0;
1293   for (i=0; i<size; i++) {
1294     if (procs[i]) {
1295       ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1296     }
1297   }
1298   ierr = PetscFree(starts);CHKERRQ(ierr);
1299 
1300   base = owners[rank]*bs;
1301 
1302   /*  wait on receives */
1303   lens   = (int*)PetscMalloc(2*(nrecvs+1)*sizeof(int));CHKPTRQ(lens);
1304   source = lens + nrecvs;
1305   count  = nrecvs; slen = 0;
1306   while (count) {
1307     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1308     /* unpack receives into our local space */
1309     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1310     source[imdex]  = recv_status.MPI_SOURCE;
1311     lens[imdex]    = n;
1312     slen          += n;
1313     count--;
1314   }
1315   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1316 
1317   /* move the data into the send scatter */
1318   lrows = (int*)PetscMalloc((slen+1)*sizeof(int));CHKPTRQ(lrows);
1319   count = 0;
1320   for (i=0; i<nrecvs; i++) {
1321     values = rvalues + i*nmax;
1322     for (j=0; j<lens[i]; j++) {
1323       lrows[count++] = values[j] - base;
1324     }
1325   }
1326   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1327   ierr = PetscFree(lens);CHKERRQ(ierr);
1328   ierr = PetscFree(owner);CHKERRQ(ierr);
1329   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1330 
1331   /* actually zap the local rows */
1332   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1333   PLogObjectParent(A,istmp);
1334 
1335   /*
1336         Zero the required rows. If the "diagonal block" of the matrix
1337      is square and the user wishes to set the diagonal we use seperate
1338      code so that MatSetValues() is not called for each diagonal allocating
1339      new memory, thus calling lots of mallocs and slowing things down.
1340 
1341        Contributed by: Mathew Knepley
1342   */
1343   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1344   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1345   if (diag && (l->A->M == l->A->N)) {
1346     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1347   } else if (diag) {
1348     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1349     if (((Mat_SeqSBAIJ*)l->A->data)->nonew) {
1350       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1351 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1352     }
1353     for (i=0; i<slen; i++) {
1354       row  = lrows[i] + rstart_bs;
1355       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1356     }
1357     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1358     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1359   } else {
1360     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1361   }
1362 
1363   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1364   ierr = PetscFree(lrows);CHKERRQ(ierr);
1365 
1366   /* wait on sends */
1367   if (nsends) {
1368     send_status = (MPI_Status*)PetscMalloc(nsends*sizeof(MPI_Status));CHKPTRQ(send_status);
1369     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1370     ierr        = PetscFree(send_status);CHKERRQ(ierr);
1371   }
1372   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1373   ierr = PetscFree(svalues);CHKERRQ(ierr);
1374 
1375   PetscFunctionReturn(0);
1376 }
1377 
1378 #undef __FUNC__
1379 #define __FUNC__ /*<a name=""></a>*/"MatPrintHelp_MPISBAIJ"
1380 int MatPrintHelp_MPISBAIJ(Mat A)
1381 {
1382   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1383   MPI_Comm    comm = A->comm;
1384   static int  called = 0;
1385   int         ierr;
1386 
1387   PetscFunctionBegin;
1388   if (!a->rank) {
1389     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1390   }
1391   if (called) {PetscFunctionReturn(0);} else called = 1;
1392   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1393   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1394   PetscFunctionReturn(0);
1395 }
1396 
1397 #undef __FUNC__
1398 #define __FUNC__ /*<a name=""></a>*/"MatSetUnfactored_MPISBAIJ"
1399 int MatSetUnfactored_MPISBAIJ(Mat A)
1400 {
1401   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1402   int         ierr;
1403 
1404   PetscFunctionBegin;
1405   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1406   PetscFunctionReturn(0);
1407 }
1408 
1409 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1410 
1411 #undef __FUNC__
1412 #define __FUNC__ /*<a name=""></a>*/"MatEqual_MPISBAIJ"
1413 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1414 {
1415   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1416   Mat         a,b,c,d;
1417   PetscTruth  flg;
1418   int         ierr;
1419 
1420   PetscFunctionBegin;
1421   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg);CHKERRQ(ierr);
1422   if (!flg) SETERRQ(PETSC_ERR_ARG_INCOMP,"Matrices must be same type");
1423   a = matA->A; b = matA->B;
1424   c = matB->A; d = matB->B;
1425 
1426   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1427   if (flg == PETSC_TRUE) {
1428     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1429   }
1430   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1431   PetscFunctionReturn(0);
1432 }
1433 
1434 #undef __FUNC__
1435 #define __FUNC__ /*<a name="MatSetUpPreallocation_MPISBAIJ"></a>*/"MatSetUpPreallocation_MPISBAIJ"
1436 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1437 {
1438   int        ierr;
1439 
1440   PetscFunctionBegin;
1441   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1442   PetscFunctionReturn(0);
1443 }
1444 /* -------------------------------------------------------------------*/
1445 static struct _MatOps MatOps_Values = {
1446   MatSetValues_MPISBAIJ,
1447   MatGetRow_MPISBAIJ,
1448   MatRestoreRow_MPISBAIJ,
1449   MatMult_MPISBAIJ,
1450   MatMultAdd_MPISBAIJ,
1451   MatMultTranspose_MPISBAIJ,
1452   MatMultTransposeAdd_MPISBAIJ,
1453   0,
1454   0,
1455   0,
1456   0,
1457   0,
1458   0,
1459   0,
1460   MatTranspose_MPISBAIJ,
1461   MatGetInfo_MPISBAIJ,
1462   MatEqual_MPISBAIJ,
1463   MatGetDiagonal_MPISBAIJ,
1464   MatDiagonalScale_MPISBAIJ,
1465   MatNorm_MPISBAIJ,
1466   MatAssemblyBegin_MPISBAIJ,
1467   MatAssemblyEnd_MPISBAIJ,
1468   0,
1469   MatSetOption_MPISBAIJ,
1470   MatZeroEntries_MPISBAIJ,
1471   MatZeroRows_MPISBAIJ,
1472   0,
1473   0,
1474   0,
1475   0,
1476   MatSetUpPreallocation_MPISBAIJ,
1477   0,
1478   MatGetOwnershipRange_MPISBAIJ,
1479   0,
1480   0,
1481   0,
1482   0,
1483   MatDuplicate_MPISBAIJ,
1484   0,
1485   0,
1486   0,
1487   0,
1488   0,
1489   MatGetSubMatrices_MPISBAIJ,
1490   MatIncreaseOverlap_MPISBAIJ,
1491   MatGetValues_MPISBAIJ,
1492   0,
1493   MatPrintHelp_MPISBAIJ,
1494   MatScale_MPISBAIJ,
1495   0,
1496   0,
1497   0,
1498   MatGetBlockSize_MPISBAIJ,
1499   0,
1500   0,
1501   0,
1502   0,
1503   0,
1504   0,
1505   MatSetUnfactored_MPISBAIJ,
1506   0,
1507   MatSetValuesBlocked_MPISBAIJ,
1508   0,
1509   0,
1510   0,
1511   MatGetMaps_Petsc,
1512   0,
1513   0,
1514   0,
1515   0,
1516   0,
1517   0,
1518   MatGetRowMax_MPISBAIJ};
1519 
1520 
1521 EXTERN_C_BEGIN
1522 #undef __FUNC__
1523 #define __FUNC__ /*<a name=""></a>*/"MatGetDiagonalBlock_MPISBAIJ"
1524 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1525 {
1526   PetscFunctionBegin;
1527   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1528   *iscopy = PETSC_FALSE;
1529   PetscFunctionReturn(0);
1530 }
1531 EXTERN_C_END
1532 
1533 EXTERN_C_BEGIN
1534 #undef __FUNC__
1535 #define __FUNC__ /*<a name=""></a>*/"MatCreate_MPISBAIJ"
1536 int MatCreate_MPISBAIJ(Mat B)
1537 {
1538   Mat_MPISBAIJ *b;
1539   int          ierr;
1540   PetscTruth   flg;
1541 
1542   PetscFunctionBegin;
1543 
1544   B->data = (void*)(b = PetscNew(Mat_MPISBAIJ));CHKPTRQ(b);
1545   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1546   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1547 
1548   B->ops->destroy    = MatDestroy_MPISBAIJ;
1549   B->ops->view       = MatView_MPISBAIJ;
1550   B->mapping    = 0;
1551   B->factor     = 0;
1552   B->assembled  = PETSC_FALSE;
1553 
1554   B->insertmode = NOT_SET_VALUES;
1555   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1556   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1557 
1558   /* build local table of row and column ownerships */
1559   b->rowners = (int*)PetscMalloc(3*(b->size+2)*sizeof(int));CHKPTRQ(b->rowners);
1560   b->cowners    = b->rowners + b->size + 2;
1561   b->rowners_bs = b->cowners + b->size + 2;
1562   PLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1563 
1564   /* build cache for off array entries formed */
1565   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1566   b->donotstash  = PETSC_FALSE;
1567   b->colmap      = PETSC_NULL;
1568   b->garray      = PETSC_NULL;
1569   b->roworiented = PETSC_TRUE;
1570 
1571 #if defined(PEYSC_USE_MAT_SINGLE)
1572   /* stuff for MatSetValues_XXX in single precision */
1573   b->lensetvalues     = 0;
1574   b->setvaluescopy    = PETSC_NULL;
1575 #endif
1576 
1577   /* stuff used in block assembly */
1578   b->barray       = 0;
1579 
1580   /* stuff used for matrix vector multiply */
1581   b->lvec         = 0;
1582   b->Mvctx        = 0;
1583 
1584   /* stuff for MatGetRow() */
1585   b->rowindices   = 0;
1586   b->rowvalues    = 0;
1587   b->getrowactive = PETSC_FALSE;
1588 
1589   /* hash table stuff */
1590   b->ht           = 0;
1591   b->hd           = 0;
1592   b->ht_size      = 0;
1593   b->ht_flag      = PETSC_FALSE;
1594   b->ht_fact      = 0;
1595   b->ht_total_ct  = 0;
1596   b->ht_insert_ct = 0;
1597 
1598   ierr = OptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1599   if (flg) {
1600     double fact = 1.39;
1601     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1602     ierr = OptionsGetDouble(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1603     if (fact <= 1.0) fact = 1.39;
1604     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1605     PLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1606   }
1607   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1608                                      "MatStoreValues_MPISBAIJ",
1609                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1610   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1611                                      "MatRetrieveValues_MPISBAIJ",
1612                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1613   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1614                                      "MatGetDiagonalBlock_MPISBAIJ",
1615                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1616   PetscFunctionReturn(0);
1617 }
1618 EXTERN_C_END
1619 
1620 #undef __FUNC__
1621 #define __FUNC__ /*<a name=""></a>*/"MatMPISBAIJSetPreallocation"
1622 /*@C
1623    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1624    the user should preallocate the matrix storage by setting the parameters
1625    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1626    performance can be increased by more than a factor of 50.
1627 
1628    Collective on Mat
1629 
1630    Input Parameters:
1631 +  A - the matrix
1632 .  bs   - size of blockk
1633 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1634            submatrix  (same for all local rows)
1635 .  d_nnz - array containing the number of block nonzeros in the various block rows
1636            of the in diagonal portion of the local (possibly different for each block
1637            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1638 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1639            submatrix (same for all local rows).
1640 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1641            off-diagonal portion of the local submatrix (possibly different for
1642            each block row) or PETSC_NULL.
1643 
1644 
1645    Options Database Keys:
1646 .   -mat_no_unroll - uses code that does not unroll the loops in the
1647                      block calculations (much slower)
1648 .   -mat_block_size - size of the blocks to use
1649 
1650    Notes:
1651 
1652    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1653    than it must be used on all processors that share the object for that argument.
1654 
1655    Storage Information:
1656    For a square global matrix we define each processor's diagonal portion
1657    to be its local rows and the corresponding columns (a square submatrix);
1658    each processor's off-diagonal portion encompasses the remainder of the
1659    local matrix (a rectangular submatrix).
1660 
1661    The user can specify preallocated storage for the diagonal part of
1662    the local submatrix with either d_nz or d_nnz (not both).  Set
1663    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1664    memory allocation.  Likewise, specify preallocated storage for the
1665    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1666 
1667    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1668    the figure below we depict these three local rows and all columns (0-11).
1669 
1670 .vb
1671            0 1 2 3 4 5 6 7 8 9 10 11
1672           -------------------
1673    row 3  |  o o o d d d o o o o o o
1674    row 4  |  o o o d d d o o o o o o
1675    row 5  |  o o o d d d o o o o o o
1676           -------------------
1677 .ve
1678 
1679    Thus, any entries in the d locations are stored in the d (diagonal)
1680    submatrix, and any entries in the o locations are stored in the
1681    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1682    stored simply in the MATSEQBAIJ format for compressed row storage.
1683 
1684    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1685    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1686    In general, for PDE problems in which most nonzeros are near the diagonal,
1687    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1688    or you will get TERRIBLE performance; see the users' manual chapter on
1689    matrices.
1690 
1691    Level: intermediate
1692 
1693 .keywords: matrix, block, aij, compressed row, sparse, parallel
1694 
1695 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1696 @*/
1697 
1698 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1699 {
1700   Mat_MPISBAIJ *b;
1701   int          ierr,i,mbs,Mbs=PETSC_DECIDE;
1702   PetscTruth   flg2;
1703 
1704   PetscFunctionBegin;
1705   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg2);CHKERRQ(ierr);
1706   if (!flg2) PetscFunctionReturn(0);
1707 
1708   ierr = OptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1709 
1710   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1711   if (d_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than -2: value %d",d_nz);
1712   if (o_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than -2: value %d",o_nz);
1713   if (d_nnz) {
1714     for (i=0; i<B->m/bs; i++) {
1715       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1716     }
1717   }
1718   if (o_nnz) {
1719     for (i=0; i<B->m/bs; i++) {
1720       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1721     }
1722   }
1723   B->preallocated = PETSC_TRUE;
1724   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1725   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1726   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1727   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1728 
1729   b   = (Mat_MPISBAIJ*)B->data;
1730   mbs = B->m/bs;
1731   Mbs = B->M/bs;
1732   if (mbs*bs != B->m) {
1733     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1734   }
1735 
1736   b->bs  = bs;
1737   b->bs2 = bs*bs;
1738   b->mbs = mbs;
1739   b->nbs = mbs;
1740   b->Mbs = Mbs;
1741   b->Nbs = Mbs;
1742 
1743   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1744   b->rowners[0]    = 0;
1745   for (i=2; i<=b->size; i++) {
1746     b->rowners[i] += b->rowners[i-1];
1747   }
1748   b->rstart    = b->rowners[b->rank];
1749   b->rend      = b->rowners[b->rank+1];
1750   b->cstart    = b->rstart;
1751   b->cend      = b->rend;
1752   for (i=0; i<=b->size; i++) {
1753     b->rowners_bs[i] = b->rowners[i]*bs;
1754   }
1755   b->rstart_bs = b-> rstart*bs;
1756   b->rend_bs   = b->rend*bs;
1757 
1758   b->cstart_bs = b->cstart*bs;
1759   b->cend_bs   = b->cend*bs;
1760 
1761 
1762   if (d_nz == PETSC_DEFAULT) d_nz = 5;
1763   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1764   PLogObjectParent(B,b->A);
1765   if (o_nz == PETSC_DEFAULT) o_nz = 0;
1766   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1767   PLogObjectParent(B,b->B);
1768 
1769   /* build cache for off array entries formed */
1770   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1771 
1772   PetscFunctionReturn(0);
1773 }
1774 
1775 #undef __FUNC__
1776 #define __FUNC__ /*<a name=""></a>*/"MatCreateMPISBAIJ"
1777 /*@C
1778    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1779    (block compressed row).  For good matrix assembly performance
1780    the user should preallocate the matrix storage by setting the parameters
1781    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1782    performance can be increased by more than a factor of 50.
1783 
1784    Collective on MPI_Comm
1785 
1786    Input Parameters:
1787 +  comm - MPI communicator
1788 .  bs   - size of blockk
1789 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1790            This value should be the same as the local size used in creating the
1791            y vector for the matrix-vector product y = Ax.
1792 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1793            This value should be the same as the local size used in creating the
1794            x vector for the matrix-vector product y = Ax.
1795 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1796 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1797 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1798            submatrix  (same for all local rows)
1799 .  d_nnz - array containing the number of block nonzeros in the various block rows
1800            of the in diagonal portion of the local (possibly different for each block
1801            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1802 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1803            submatrix (same for all local rows).
1804 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1805            off-diagonal portion of the local submatrix (possibly different for
1806            each block row) or PETSC_NULL.
1807 
1808    Output Parameter:
1809 .  A - the matrix
1810 
1811    Options Database Keys:
1812 .   -mat_no_unroll - uses code that does not unroll the loops in the
1813                      block calculations (much slower)
1814 .   -mat_block_size - size of the blocks to use
1815 .   -mat_mpi - use the parallel matrix data structures even on one processor
1816                (defaults to using SeqBAIJ format on one processor)
1817 
1818    Notes:
1819    The user MUST specify either the local or global matrix dimensions
1820    (possibly both).
1821 
1822    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1823    than it must be used on all processors that share the object for that argument.
1824 
1825    Storage Information:
1826    For a square global matrix we define each processor's diagonal portion
1827    to be its local rows and the corresponding columns (a square submatrix);
1828    each processor's off-diagonal portion encompasses the remainder of the
1829    local matrix (a rectangular submatrix).
1830 
1831    The user can specify preallocated storage for the diagonal part of
1832    the local submatrix with either d_nz or d_nnz (not both).  Set
1833    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1834    memory allocation.  Likewise, specify preallocated storage for the
1835    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1836 
1837    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1838    the figure below we depict these three local rows and all columns (0-11).
1839 
1840 .vb
1841            0 1 2 3 4 5 6 7 8 9 10 11
1842           -------------------
1843    row 3  |  o o o d d d o o o o o o
1844    row 4  |  o o o d d d o o o o o o
1845    row 5  |  o o o d d d o o o o o o
1846           -------------------
1847 .ve
1848 
1849    Thus, any entries in the d locations are stored in the d (diagonal)
1850    submatrix, and any entries in the o locations are stored in the
1851    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1852    stored simply in the MATSEQBAIJ format for compressed row storage.
1853 
1854    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1855    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1856    In general, for PDE problems in which most nonzeros are near the diagonal,
1857    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1858    or you will get TERRIBLE performance; see the users' manual chapter on
1859    matrices.
1860 
1861    Level: intermediate
1862 
1863 .keywords: matrix, block, aij, compressed row, sparse, parallel
1864 
1865 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1866 @*/
1867 
1868 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,int *d_nnz,int o_nz,int *o_nnz,Mat *A)
1869 {
1870   int ierr,size;
1871 
1872   PetscFunctionBegin;
1873   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1874   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1875   if (size > 1) {
1876     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1877     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1878   } else {
1879     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1880     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1881   }
1882   PetscFunctionReturn(0);
1883 }
1884 
1885 
1886 #undef __FUNC__
1887 #define __FUNC__ /*<a name=""></a>*/"MatDuplicate_MPISBAIJ"
1888 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1889 {
1890   Mat          mat;
1891   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1892   int          ierr,len=0;
1893 
1894   PetscFunctionBegin;
1895   *newmat       = 0;
1896   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1897   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1898   mat->preallocated = PETSC_TRUE;
1899   a = (Mat_MPISBAIJ*)mat->data;
1900   a->bs  = oldmat->bs;
1901   a->bs2 = oldmat->bs2;
1902   a->mbs = oldmat->mbs;
1903   a->nbs = oldmat->nbs;
1904   a->Mbs = oldmat->Mbs;
1905   a->Nbs = oldmat->Nbs;
1906 
1907   a->rstart       = oldmat->rstart;
1908   a->rend         = oldmat->rend;
1909   a->cstart       = oldmat->cstart;
1910   a->cend         = oldmat->cend;
1911   a->size         = oldmat->size;
1912   a->rank         = oldmat->rank;
1913   a->donotstash   = oldmat->donotstash;
1914   a->roworiented  = oldmat->roworiented;
1915   a->rowindices   = 0;
1916   a->rowvalues    = 0;
1917   a->getrowactive = PETSC_FALSE;
1918   a->barray       = 0;
1919   a->rstart_bs    = oldmat->rstart_bs;
1920   a->rend_bs      = oldmat->rend_bs;
1921   a->cstart_bs    = oldmat->cstart_bs;
1922   a->cend_bs      = oldmat->cend_bs;
1923 
1924   /* hash table stuff */
1925   a->ht           = 0;
1926   a->hd           = 0;
1927   a->ht_size      = 0;
1928   a->ht_flag      = oldmat->ht_flag;
1929   a->ht_fact      = oldmat->ht_fact;
1930   a->ht_total_ct  = 0;
1931   a->ht_insert_ct = 0;
1932 
1933   a->rowners = (int*)PetscMalloc(3*(a->size+2)*sizeof(int));CHKPTRQ(a->rowners);
1934   PLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1935   a->cowners    = a->rowners + a->size + 2;
1936   a->rowners_bs = a->cowners + a->size + 2;
1937   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1938   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1939   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1940   if (oldmat->colmap) {
1941 #if defined (PETSC_USE_CTABLE)
1942   ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1943 #else
1944     a->colmap = (int*)PetscMalloc((a->Nbs)*sizeof(int));CHKPTRQ(a->colmap);
1945     PLogObjectMemory(mat,(a->Nbs)*sizeof(int));
1946     ierr      = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
1947 #endif
1948   } else a->colmap = 0;
1949   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1950     a->garray = (int*)PetscMalloc(len*sizeof(int));CHKPTRQ(a->garray);
1951     PLogObjectMemory(mat,len*sizeof(int));
1952     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
1953   } else a->garray = 0;
1954 
1955   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
1956   PLogObjectParent(mat,a->lvec);
1957   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
1958 
1959   PLogObjectParent(mat,a->Mvctx);
1960   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1961   PLogObjectParent(mat,a->A);
1962   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
1963   PLogObjectParent(mat,a->B);
1964   ierr = FListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
1965   *newmat = mat;
1966   PetscFunctionReturn(0);
1967 }
1968 
1969 #include "petscsys.h"
1970 
1971 EXTERN_C_BEGIN
1972 #undef __FUNC__
1973 #define __FUNC__ /*<a name=""></a>*/"MatLoad_MPISBAIJ"
1974 int MatLoad_MPISBAIJ(Viewer viewer,MatType type,Mat *newmat)
1975 {
1976   Mat          A;
1977   int          i,nz,ierr,j,rstart,rend,fd;
1978   Scalar       *vals,*buf;
1979   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1980   MPI_Status   status;
1981   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
1982   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
1983   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
1984   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1985   int          dcount,kmax,k,nzcount,tmp;
1986 
1987   PetscFunctionBegin;
1988   ierr = OptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1989 
1990   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1991   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1992   if (!rank) {
1993     ierr = ViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1994     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1995     if (header[0] != MAT_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1996     if (header[3] < 0) {
1997       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
1998     }
1999   }
2000 
2001   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2002   M = header[1]; N = header[2];
2003 
2004   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2005 
2006   /*
2007      This code adds extra rows to make sure the number of rows is
2008      divisible by the blocksize
2009   */
2010   Mbs        = M/bs;
2011   extra_rows = bs - M + bs*(Mbs);
2012   if (extra_rows == bs) extra_rows = 0;
2013   else                  Mbs++;
2014   if (extra_rows &&!rank) {
2015     PLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2016   }
2017 
2018   /* determine ownership of all rows */
2019   mbs = Mbs/size + ((Mbs % size) > rank);
2020   m   = mbs*bs;
2021   rowners = (int*)PetscMalloc(2*(size+2)*sizeof(int));CHKPTRQ(rowners);
2022   browners = rowners + size + 1;
2023   ierr = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2024   rowners[0] = 0;
2025   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2026   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2027   rstart = rowners[rank];
2028   rend   = rowners[rank+1];
2029 
2030   /* distribute row lengths to all processors */
2031   locrowlens = (int*)PetscMalloc((rend-rstart)*bs*sizeof(int));CHKPTRQ(locrowlens);
2032   if (!rank) {
2033     rowlengths = (int*)PetscMalloc((M+extra_rows)*sizeof(int));CHKPTRQ(rowlengths);
2034     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2035     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2036     sndcounts = (int*)PetscMalloc(size*sizeof(int));CHKPTRQ(sndcounts);
2037     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2038     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2039     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2040   } else {
2041     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2042   }
2043 
2044   if (!rank) {   /* procs[0] */
2045     /* calculate the number of nonzeros on each processor */
2046     procsnz = (int*)PetscMalloc(size*sizeof(int));CHKPTRQ(procsnz);
2047     ierr    = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2048     for (i=0; i<size; i++) {
2049       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2050         procsnz[i] += rowlengths[j];
2051       }
2052     }
2053     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2054 
2055     /* determine max buffer needed and allocate it */
2056     maxnz = 0;
2057     for (i=0; i<size; i++) {
2058       maxnz = PetscMax(maxnz,procsnz[i]);
2059     }
2060     cols = (int*)PetscMalloc(maxnz*sizeof(int));CHKPTRQ(cols);
2061 
2062     /* read in my part of the matrix column indices  */
2063     nz = procsnz[0];
2064     ibuf = (int*)PetscMalloc(nz*sizeof(int));CHKPTRQ(ibuf);
2065     mycols = ibuf;
2066     if (size == 1)  nz -= extra_rows;
2067     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2068     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2069 
2070     /* read in every ones (except the last) and ship off */
2071     for (i=1; i<size-1; i++) {
2072       nz   = procsnz[i];
2073       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2074       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2075     }
2076     /* read in the stuff for the last proc */
2077     if (size != 1) {
2078       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2079       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2080       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2081       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2082     }
2083     ierr = PetscFree(cols);CHKERRQ(ierr);
2084   } else {  /* procs[i], i>0 */
2085     /* determine buffer space needed for message */
2086     nz = 0;
2087     for (i=0; i<m; i++) {
2088       nz += locrowlens[i];
2089     }
2090     ibuf   = (int*)PetscMalloc(nz*sizeof(int));CHKPTRQ(ibuf);
2091     mycols = ibuf;
2092     /* receive message of column indices*/
2093     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2094     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2095     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2096   }
2097 
2098   /* loop over local rows, determining number of off diagonal entries */
2099   dlens  = (int*)PetscMalloc(2*(rend-rstart+1)*sizeof(int));CHKPTRQ(dlens);
2100   odlens = dlens + (rend-rstart);
2101   mask   = (int*)PetscMalloc(3*Mbs*sizeof(int));CHKPTRQ(mask);
2102   ierr   = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2103   masked1 = mask    + Mbs;
2104   masked2 = masked1 + Mbs;
2105   rowcount = 0; nzcount = 0;
2106   for (i=0; i<mbs; i++) {
2107     dcount  = 0;
2108     odcount = 0;
2109     for (j=0; j<bs; j++) {
2110       kmax = locrowlens[rowcount];
2111       for (k=0; k<kmax; k++) {
2112         tmp = mycols[nzcount++]/bs; /* block col. index */
2113         if (!mask[tmp]) {
2114           mask[tmp] = 1;
2115           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2116           else masked1[dcount++] = tmp; /* entry in diag portion */
2117         }
2118       }
2119       rowcount++;
2120     }
2121 
2122     dlens[i]  = dcount;  /* d_nzz[i] */
2123     odlens[i] = odcount; /* o_nzz[i] */
2124 
2125     /* zero out the mask elements we set */
2126     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2127     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2128   }
2129 
2130   /* create our matrix */
2131   ierr = MatCreateMPISBAIJ(comm,bs,m,m,PETSC_DETERMINE,PETSC_DETERMINE,0,dlens,0,odlens,newmat);
2132   CHKERRQ(ierr);
2133   A = *newmat;
2134   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2135 
2136   if (!rank) {
2137     buf = (Scalar*)PetscMalloc(maxnz*sizeof(Scalar));CHKPTRQ(buf);
2138     /* read in my part of the matrix numerical values  */
2139     nz = procsnz[0];
2140     vals = buf;
2141     mycols = ibuf;
2142     if (size == 1)  nz -= extra_rows;
2143     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2144     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2145 
2146     /* insert into matrix */
2147     jj      = rstart*bs;
2148     for (i=0; i<m; i++) {
2149       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2150       mycols += locrowlens[i];
2151       vals   += locrowlens[i];
2152       jj++;
2153     }
2154 
2155     /* read in other processors (except the last one) and ship out */
2156     for (i=1; i<size-1; i++) {
2157       nz   = procsnz[i];
2158       vals = buf;
2159       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2160       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2161     }
2162     /* the last proc */
2163     if (size != 1){
2164       nz   = procsnz[i] - extra_rows;
2165       vals = buf;
2166       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2167       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2168       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2169     }
2170     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2171 
2172   } else {
2173     /* receive numeric values */
2174     buf = (Scalar*)PetscMalloc(nz*sizeof(Scalar));CHKPTRQ(buf);
2175 
2176     /* receive message of values*/
2177     vals   = buf;
2178     mycols = ibuf;
2179     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2180     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2181     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2182 
2183     /* insert into matrix */
2184     jj      = rstart*bs;
2185     for (i=0; i<m; i++) {
2186       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2187       mycols += locrowlens[i];
2188       vals   += locrowlens[i];
2189       jj++;
2190     }
2191   }
2192 
2193   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2194   ierr = PetscFree(buf);CHKERRQ(ierr);
2195   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2196   ierr = PetscFree(rowners);CHKERRQ(ierr);
2197   ierr = PetscFree(dlens);CHKERRQ(ierr);
2198   ierr = PetscFree(mask);CHKERRQ(ierr);
2199   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2200   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2201   PetscFunctionReturn(0);
2202 }
2203 EXTERN_C_END
2204 
2205 #undef __FUNC__
2206 #define __FUNC__ /*<a name=""></a>*/"MatMPISBAIJSetHashTableFactor"
2207 /*@
2208    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2209 
2210    Input Parameters:
2211 .  mat  - the matrix
2212 .  fact - factor
2213 
2214    Collective on Mat
2215 
2216    Level: advanced
2217 
2218   Notes:
2219    This can also be set by the command line option: -mat_use_hash_table fact
2220 
2221 .keywords: matrix, hashtable, factor, HT
2222 
2223 .seealso: MatSetOption()
2224 @*/
2225 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2226 {
2227   PetscFunctionBegin;
2228   SETERRQ(1,"Function not yet written for SBAIJ format");
2229   /* PetscFunctionReturn(0); */
2230 }
2231 
2232 #undef __FUNC__
2233 #define __FUNC__ /*<a name=""></a>*/"MatGetRowMax_MPISBAIJ"
2234 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2235 {
2236   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2237   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2238   PetscReal    atmp;
2239   double       *work,*svalues,*rvalues;
2240   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2241   int          rank,size,*rowners_bs,dest,count,source;
2242   Scalar       *ba,*va;
2243   MPI_Status   stat;
2244 
2245   PetscFunctionBegin;
2246   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2247   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2248 
2249   ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRA(ierr);
2250   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRA(ierr);
2251 
2252   bs   = a->bs;
2253   mbs  = a->mbs;
2254   Mbs  = a->Mbs;
2255   ba   = b->a;
2256   bi   = b->i;
2257   bj   = b->j;
2258   /*
2259   PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2260   PetscSynchronizedFlush(PETSC_COMM_WORLD);
2261   */
2262 
2263   /* find ownerships */
2264   rowners_bs = a->rowners_bs;
2265   /*
2266   if (!rank){
2267     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2268   }
2269   */
2270 
2271   /* each proc creates an array to be distributed */
2272   work  = (PetscReal*)PetscMalloc(bs*Mbs*sizeof(PetscReal));CHKPTRQ(work);
2273   ierr  = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2274 
2275   /* row_max for B */
2276   if (rank != size-1){
2277     for (i=0; i<mbs; i++) {
2278       ncols = bi[1] - bi[0]; bi++;
2279       brow  = bs*i;
2280       for (j=0; j<ncols; j++){
2281         bcol = bs*(*bj);
2282         for (kcol=0; kcol<bs; kcol++){
2283           col = bcol + kcol;                 /* local col index */
2284           col += rowners_bs[rank+1];      /* global col index */
2285           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2286           for (krow=0; krow<bs; krow++){
2287             atmp = PetscAbsScalar(*ba); ba++;
2288             row = brow + krow;    /* local row index */
2289             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2290             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2291             if (work[col] < atmp) work[col] = atmp;
2292           }
2293         }
2294         bj++;
2295       }
2296     }
2297     /*
2298       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2299       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2300       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2301       */
2302 
2303     /* send values to its owners */
2304     for (dest=rank+1; dest<size; dest++){
2305       svalues = work + rowners_bs[dest];
2306       count = rowners_bs[dest+1]-rowners_bs[dest];
2307       ierr = MPI_Send(svalues,count,MPI_DOUBLE,dest,rank,PETSC_COMM_WORLD);CHKERRQ(ierr);
2308       /*
2309       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2310       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2311       */
2312     }
2313   }
2314 
2315   /* receive values */
2316   if (rank){
2317     rvalues = work;
2318     count = rowners_bs[rank+1]-rowners_bs[rank];
2319     for (source=0; source<rank; source++){
2320       ierr = MPI_Recv(rvalues,count,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,PETSC_COMM_WORLD,&stat);CHKERRQ(ierr);
2321       /* process values */
2322       for (i=0; i<count; i++){
2323         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2324       }
2325       /*
2326       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2327       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2328       */
2329     }
2330   }
2331 
2332   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2333   ierr = PetscFree(work);CHKERRA(ierr);
2334   PetscFunctionReturn(0);
2335 }
2336