xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision b0a32e0c6855ee6a6cd3495fa7da12ea9885bc5d)
1 /*$Id: mpisbaij.c,v 1.38 2000/11/28 17:29:39 bsmith Exp bsmith $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int DisAssemble_MPISBAIJ(Mat);
10 extern int MatIncreaseOverlap_MPISBAIJ(Mat,int,IS *,int);
11 extern int MatGetSubMatrices_MPISBAIJ(Mat,int,IS *,IS *,MatReuse,Mat **);
12 extern int MatGetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *);
13 extern int MatSetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *,InsertMode);
14 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,int*,int,int*,Scalar*,InsertMode);
15 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
16 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
17 extern int MatPrintHelp_SeqSBAIJ(Mat);
18 extern int MatZeroRows_SeqSBAIJ(Mat,IS,Scalar*);
19 extern int MatZeroRows_SeqBAIJ(Mat,IS,Scalar *);
20 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
21 
22 /*  UGLY, ugly, ugly
23    When MatScalar == Scalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
24    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
25    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
26    converts the entries into single precision and then calls ..._MatScalar() to put them
27    into the single precision data structures.
28 */
29 #if defined(PETSC_USE_MAT_SINGLE)
30 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
31 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
32 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
33 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
34 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
35 #else
36 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
37 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
38 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
39 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
40 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
41 #endif
42 
43 EXTERN_C_BEGIN
44 #undef __FUNC__
45 #define __FUNC__ "MatStoreValues_MPISBAIJ"
46 int MatStoreValues_MPISBAIJ(Mat mat)
47 {
48   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
49   int         ierr;
50 
51   PetscFunctionBegin;
52   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
53   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
54   PetscFunctionReturn(0);
55 }
56 EXTERN_C_END
57 
58 EXTERN_C_BEGIN
59 #undef __FUNC__
60 #define __FUNC__ "MatRetrieveValues_MPISBAIJ"
61 int MatRetrieveValues_MPISBAIJ(Mat mat)
62 {
63   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
64   int         ierr;
65 
66   PetscFunctionBegin;
67   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
68   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 EXTERN_C_END
72 
73 /*
74      Local utility routine that creates a mapping from the global column
75    number to the local number in the off-diagonal part of the local
76    storage of the matrix.  This is done in a non scable way since the
77    length of colmap equals the global matrix length.
78 */
79 #undef __FUNC__
80 #define __FUNC__ "CreateColmap_MPISBAIJ_Private"
81 static int CreateColmap_MPISBAIJ_Private(Mat mat)
82 {
83   PetscFunctionBegin;
84   SETERRQ(1,"Function not yet written for SBAIJ format");
85   /* PetscFunctionReturn(0); */
86 }
87 
88 #define CHUNKSIZE  10
89 
90 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
91 { \
92  \
93     brow = row/bs;  \
94     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
95     rmax = aimax[brow]; nrow = ailen[brow]; \
96       bcol = col/bs; \
97       ridx = row % bs; cidx = col % bs; \
98       low = 0; high = nrow; \
99       while (high-low > 3) { \
100         t = (low+high)/2; \
101         if (rp[t] > bcol) high = t; \
102         else              low  = t; \
103       } \
104       for (_i=low; _i<high; _i++) { \
105         if (rp[_i] > bcol) break; \
106         if (rp[_i] == bcol) { \
107           bap  = ap +  bs2*_i + bs*cidx + ridx; \
108           if (addv == ADD_VALUES) *bap += value;  \
109           else                    *bap  = value;  \
110           goto a_noinsert; \
111         } \
112       } \
113       if (a->nonew == 1) goto a_noinsert; \
114       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
115       if (nrow >= rmax) { \
116         /* there is no extra room in row, therefore enlarge */ \
117         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
118         MatScalar *new_a; \
119  \
120         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
121  \
122         /* malloc new storage space */ \
123         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
124 ierr = PetscMalloc(len,&(        new_a   ));CHKERRQ(ierr); \
125         new_j   = (int*)(new_a + bs2*new_nz); \
126         new_i   = new_j + new_nz; \
127  \
128         /* copy over old data into new slots */ \
129         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
130         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
131         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
132         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
133         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
134         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
135         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(Scalar));CHKERRQ(ierr); \
136         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
137                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
138         /* free up old matrix storage */ \
139         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
140         if (!a->singlemalloc) { \
141           ierr = PetscFree(a->i);CHKERRQ(ierr); \
142           ierr = PetscFree(a->j);CHKERRQ(ierr);\
143         } \
144         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
145         a->singlemalloc = PETSC_TRUE; \
146  \
147         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
148         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
149         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
150         a->s_maxnz += bs2*CHUNKSIZE; \
151         a->reallocs++; \
152         a->s_nz++; \
153       } \
154       N = nrow++ - 1;  \
155       /* shift up all the later entries in this row */ \
156       for (ii=N; ii>=_i; ii--) { \
157         rp[ii+1] = rp[ii]; \
158         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
159       } \
160       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
161       rp[_i]                      = bcol;  \
162       ap[bs2*_i + bs*cidx + ridx] = value;  \
163       a_noinsert:; \
164     ailen[brow] = nrow; \
165 }
166 #ifndef MatSetValues_SeqBAIJ_B_Private
167 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
168 { \
169     brow = row/bs;  \
170     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
171     rmax = bimax[brow]; nrow = bilen[brow]; \
172       bcol = col/bs; \
173       ridx = row % bs; cidx = col % bs; \
174       low = 0; high = nrow; \
175       while (high-low > 3) { \
176         t = (low+high)/2; \
177         if (rp[t] > bcol) high = t; \
178         else              low  = t; \
179       } \
180       for (_i=low; _i<high; _i++) { \
181         if (rp[_i] > bcol) break; \
182         if (rp[_i] == bcol) { \
183           bap  = ap +  bs2*_i + bs*cidx + ridx; \
184           if (addv == ADD_VALUES) *bap += value;  \
185           else                    *bap  = value;  \
186           goto b_noinsert; \
187         } \
188       } \
189       if (b->nonew == 1) goto b_noinsert; \
190       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
191       if (nrow >= rmax) { \
192         /* there is no extra room in row, therefore enlarge */ \
193         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
194         MatScalar *new_a; \
195  \
196         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
197  \
198         /* malloc new storage space */ \
199         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
200 ierr = PetscMalloc(len,&(        new_a   ));CHKERRQ(ierr); \
201         new_j   = (int*)(new_a + bs2*new_nz); \
202         new_i   = new_j + new_nz; \
203  \
204         /* copy over old data into new slots */ \
205         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
206         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
207         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
208         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
209         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
210         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
211         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
213                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
214         /* free up old matrix storage */ \
215         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
216         if (!b->singlemalloc) { \
217           ierr = PetscFree(b->i);CHKERRQ(ierr); \
218           ierr = PetscFree(b->j);CHKERRQ(ierr); \
219         } \
220         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
221         b->singlemalloc = PETSC_TRUE; \
222  \
223         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
224         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
225         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
226         b->maxnz += bs2*CHUNKSIZE; \
227         b->reallocs++; \
228         b->nz++; \
229       } \
230       N = nrow++ - 1;  \
231       /* shift up all the later entries in this row */ \
232       for (ii=N; ii>=_i; ii--) { \
233         rp[ii+1] = rp[ii]; \
234         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
235       } \
236       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
237       rp[_i]                      = bcol;  \
238       ap[bs2*_i + bs*cidx + ridx] = value;  \
239       b_noinsert:; \
240     bilen[brow] = nrow; \
241 }
242 #endif
243 
244 #if defined(PETSC_USE_MAT_SINGLE)
245 #undef __FUNC__
246 #define __FUNC__ "MatSetValues_MPISBAIJ"
247 int MatSetValues_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
248 {
249   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
250   int         ierr,i,N = m*n;
251   MatScalar   *vsingle;
252 
253   PetscFunctionBegin;
254   if (N > b->setvalueslen) {
255     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
256 ierr = PetscMalloc(N*sizeof(MatScalar),&(    b->setvaluescopy ));CHKERRQ(ierr);
257     b->setvalueslen  = N;
258   }
259   vsingle = b->setvaluescopy;
260 
261   for (i=0; i<N; i++) {
262     vsingle[i] = v[i];
263   }
264   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 
268 #undef __FUNC__
269 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ"
270 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
271 {
272   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
273   int         ierr,i,N = m*n*b->bs2;
274   MatScalar   *vsingle;
275 
276   PetscFunctionBegin;
277   if (N > b->setvalueslen) {
278     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
279 ierr = PetscMalloc(N*sizeof(MatScalar),&(    b->setvaluescopy ));CHKERRQ(ierr);
280     b->setvalueslen  = N;
281   }
282   vsingle = b->setvaluescopy;
283   for (i=0; i<N; i++) {
284     vsingle[i] = v[i];
285   }
286   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNC__
291 #define __FUNC__ "MatSetValues_MPISBAIJ_HT"
292 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
293 {
294   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
295   int         ierr,i,N = m*n;
296   MatScalar   *vsingle;
297 
298   PetscFunctionBegin;
299   SETERRQ(1,"Function not yet written for SBAIJ format");
300   /* PetscFunctionReturn(0); */
301 }
302 
303 #undef __FUNC__
304 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ_HT"
305 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
306 {
307   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
308   int         ierr,i,N = m*n*b->bs2;
309   MatScalar   *vsingle;
310 
311   PetscFunctionBegin;
312   SETERRQ(1,"Function not yet written for SBAIJ format");
313   /* PetscFunctionReturn(0); */
314 }
315 #endif
316 
317 /* Only add/insert a(i,j) with i<=j (blocks).
318    Any a(i,j) with i>j input by user is ingored.
319 */
320 #undef __FUNC__
321 #define __FUNC__ "MatSetValues_MPIBAIJ"
322 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
323 {
324   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
325   MatScalar    value;
326   PetscTruth   roworiented = baij->roworiented;
327   int          ierr,i,j,row,col;
328   int          rstart_orig=baij->rstart_bs;
329   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
330   int          cend_orig=baij->cend_bs,bs=baij->bs;
331 
332   /* Some Variables required in the macro */
333   Mat          A = baij->A;
334   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
335   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
336   MatScalar    *aa=a->a;
337 
338   Mat          B = baij->B;
339   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
340   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
341   MatScalar    *ba=b->a;
342 
343   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
344   int          low,high,t,ridx,cidx,bs2=a->bs2;
345   MatScalar    *ap,*bap;
346 
347   /* for stash */
348   int          n_loc, *in_loc=0;
349   MatScalar    *v_loc=0;
350 
351   PetscFunctionBegin;
352 
353   if(!baij->donotstash){
354 ierr = PetscMalloc(n*sizeof(int),&(    in_loc ));CHKERRQ(ierr);
355 ierr = PetscMalloc(n*sizeof(MatScalar),&(    v_loc  ));CHKERRQ(ierr);
356   }
357 
358   for (i=0; i<m; i++) {
359     if (im[i] < 0) continue;
360 #if defined(PETSC_USE_BOPT_g)
361     if (im[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
362 #endif
363     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
364       row = im[i] - rstart_orig;              /* local row index */
365       for (j=0; j<n; j++) {
366         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
367         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
368           col = in[j] - cstart_orig;          /* local col index */
369           brow = row/bs; bcol = col/bs;
370           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
371           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
372           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
373           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
374         } else if (in[j] < 0) continue;
375 #if defined(PETSC_USE_BOPT_g)
376         else if (in[j] >= mat->N) {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Col too large");}
377 #endif
378         else {  /* off-diag entry (B) */
379           if (mat->was_assembled) {
380             if (!baij->colmap) {
381               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
382             }
383 #if defined (PETSC_USE_CTABLE)
384             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
385             col  = col - 1 + in[j]%bs;
386 #else
387             col = baij->colmap[in[j]/bs] - 1 + in[j]%bs;
388 #endif
389             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
390               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
391               col =  in[j];
392               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
393               B = baij->B;
394               b = (Mat_SeqBAIJ*)(B)->data;
395               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
396               ba=b->a;
397             }
398           } else col = in[j];
399           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
400           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
401           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
402         }
403       }
404     } else {  /* off processor entry */
405       if (!baij->donotstash) {
406         n_loc = 0;
407         for (j=0; j<n; j++){
408           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
409           in_loc[n_loc] = in[j];
410           if (roworiented) {
411             v_loc[n_loc] = v[i*n+j];
412           } else {
413             v_loc[n_loc] = v[j*m+i];
414           }
415           n_loc++;
416         }
417         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
418       }
419     }
420   }
421 
422   if(!baij->donotstash){
423     ierr = PetscFree(in_loc);CHKERRQ(ierr);
424     ierr = PetscFree(v_loc);CHKERRQ(ierr);
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 #undef __FUNC__
430 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ"
431 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
432 {
433   PetscFunctionBegin;
434   SETERRQ(1,"Function not yet written for SBAIJ format");
435   /* PetscFunctionReturn(0); */
436 }
437 
438 #define HASH_KEY 0.6180339887
439 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
440 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
441 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
442 #undef __FUNC__
443 #define __FUNC__ "MatSetValues_MPISBAIJ_HT_MatScalar"
444 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
445 {
446   PetscFunctionBegin;
447   SETERRQ(1,"Function not yet written for SBAIJ format");
448   /* PetscFunctionReturn(0); */
449 }
450 
451 #undef __FUNC__
452 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
453 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
454 {
455   PetscFunctionBegin;
456   SETERRQ(1,"Function not yet written for SBAIJ format");
457   /* PetscFunctionReturn(0); */
458 }
459 
460 #undef __FUNC__
461 #define __FUNC__ "MatGetValues_MPISBAIJ"
462 int MatGetValues_MPISBAIJ(Mat mat,int m,int *idxm,int n,int *idxn,Scalar *v)
463 {
464   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
465   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
466   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
467 
468   PetscFunctionBegin;
469   for (i=0; i<m; i++) {
470     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
471     if (idxm[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
472     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
473       row = idxm[i] - bsrstart;
474       for (j=0; j<n; j++) {
475         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
476         if (idxn[j] >= mat->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
477         if (idxn[j] >= bscstart && idxn[j] < bscend){
478           col = idxn[j] - bscstart;
479           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
480         } else {
481           if (!baij->colmap) {
482             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
483           }
484 #if defined (PETSC_USE_CTABLE)
485           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
486           data --;
487 #else
488           data = baij->colmap[idxn[j]/bs]-1;
489 #endif
490           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
491           else {
492             col  = data + idxn[j]%bs;
493             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
494           }
495         }
496       }
497     } else {
498       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
499     }
500   }
501  PetscFunctionReturn(0);
502 }
503 
504 #undef __FUNC__
505 #define __FUNC__ "MatNorm_MPISBAIJ"
506 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
507 {
508   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
509   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
510   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
511   int        ierr;
512   PetscReal  sum[2],*lnorm2;
513 
514   PetscFunctionBegin;
515   if (baij->size == 1) {
516     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
517   } else {
518     if (type == NORM_FROBENIUS) {
519 ierr = PetscMalloc(2*sizeof(double),&(      lnorm2 ));CHKERRQ(ierr);
520       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
521       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
522       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
523       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
524       /*
525       ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
526       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
527       */
528       ierr = MPI_Allreduce(lnorm2,&sum,2,MPI_DOUBLE,MPI_SUM,mat->comm);CHKERRQ(ierr);
529       /*
530       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
531       PetscSynchronizedFlush(PETSC_COMM_WORLD); */
532 
533       *norm = sqrt(sum[0] + 2*sum[1]);
534       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
535     } else {
536       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
537     }
538   }
539   PetscFunctionReturn(0);
540 }
541 
542 /*
543   Creates the hash table, and sets the table
544   This table is created only once.
545   If new entried need to be added to the matrix
546   then the hash table has to be destroyed and
547   recreated.
548 */
549 #undef __FUNC__
550 #define __FUNC__ "MatCreateHashTable_MPISBAIJ_Private"
551 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
552 {
553   PetscFunctionBegin;
554   SETERRQ(1,"Function not yet written for SBAIJ format");
555   /* PetscFunctionReturn(0); */
556 }
557 
558 #undef __FUNC__
559 #define __FUNC__ "MatAssemblyBegin_MPISBAIJ"
560 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
561 {
562   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
563   int         ierr,nstash,reallocs;
564   InsertMode  addv;
565 
566   PetscFunctionBegin;
567   if (baij->donotstash) {
568     PetscFunctionReturn(0);
569   }
570 
571   /* make sure all processors are either in INSERTMODE or ADDMODE */
572   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
573   if (addv == (ADD_VALUES|INSERT_VALUES)) {
574     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
575   }
576   mat->insertmode = addv; /* in case this processor had no cache */
577 
578   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
579   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
580   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
581   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
582   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
583   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
584   PetscFunctionReturn(0);
585 }
586 
587 #undef __FUNC__
588 #define __FUNC__ "MatAssemblyEnd_MPISBAIJ"
589 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
590 {
591   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
592   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
593   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
594   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
595   int         *row,*col,other_disassembled;
596   PetscTruth  r1,r2,r3;
597   MatScalar   *val;
598   InsertMode  addv = mat->insertmode;
599   int         rank;
600 
601   PetscFunctionBegin;
602   /* remove 2 line below later */
603   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
604 
605   if (!baij->donotstash) {
606     while (1) {
607       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
608       /*
609       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
610       PetscSynchronizedFlush(PETSC_COMM_WORLD);
611       */
612       if (!flg) break;
613 
614       for (i=0; i<n;) {
615         /* Now identify the consecutive vals belonging to the same row */
616         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
617         if (j < n) ncols = j-i;
618         else       ncols = n-i;
619         /* Now assemble all these values with a single function call */
620         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
621         i = j;
622       }
623     }
624     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
625     /* Now process the block-stash. Since the values are stashed column-oriented,
626        set the roworiented flag to column oriented, and after MatSetValues()
627        restore the original flags */
628     r1 = baij->roworiented;
629     r2 = a->roworiented;
630     r3 = b->roworiented;
631     baij->roworiented = PETSC_FALSE;
632     a->roworiented    = PETSC_FALSE;
633     b->roworiented    = PETSC_FALSE;
634     while (1) {
635       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
636       if (!flg) break;
637 
638       for (i=0; i<n;) {
639         /* Now identify the consecutive vals belonging to the same row */
640         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
641         if (j < n) ncols = j-i;
642         else       ncols = n-i;
643         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
644         i = j;
645       }
646     }
647     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
648     baij->roworiented = r1;
649     a->roworiented    = r2;
650     b->roworiented    = r3;
651   }
652 
653   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
654   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
655 
656   /* determine if any processor has disassembled, if so we must
657      also disassemble ourselfs, in order that we may reassemble. */
658   /*
659      if nonzero structure of submatrix B cannot change then we know that
660      no processor disassembled thus we can skip this stuff
661   */
662   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
663     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
664     if (mat->was_assembled && !other_disassembled) {
665       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
666     }
667   }
668 
669   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
670     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr);
671   }
672   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
673   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
674 
675 #if defined(PETSC_USE_BOPT_g)
676   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
677     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((double)baij->ht_total_ct)/baij->ht_insert_ct);
678     baij->ht_total_ct  = 0;
679     baij->ht_insert_ct = 0;
680   }
681 #endif
682   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
683     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
684     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
685     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
686   }
687 
688   if (baij->rowvalues) {
689     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
690     baij->rowvalues = 0;
691   }
692   PetscFunctionReturn(0);
693 }
694 
695 #undef __FUNC__
696 #define __FUNC__ "MatView_MPISBAIJ_ASCIIorDraworSocket"
697 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
698 {
699   Mat_MPISBAIJ  *baij = (Mat_MPISBAIJ*)mat->data;
700   int          ierr,format,bs = baij->bs,size = baij->size,rank = baij->rank;
701   PetscTruth   isascii,isdraw;
702   PetscViewer       sviewer;
703 
704   PetscFunctionBegin;
705   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
706   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_DRAW_VIEWER,&isdraw);CHKERRQ(ierr);
707   if (isascii) {
708     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
709     if (format == PETSC_VIEWER_FORMAT_ASCII_INFO_LONG) {
710       MatInfo info;
711       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
712       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
713       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
714               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
715               baij->bs,(int)info.memory);CHKERRQ(ierr);
716       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
717       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
718       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
719       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
720       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
721       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
722       PetscFunctionReturn(0);
723     } else if (format == PETSC_VIEWER_FORMAT_ASCII_INFO) {
724       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
725       PetscFunctionReturn(0);
726     }
727   }
728 
729   if (isdraw) {
730     PetscDraw       draw;
731     PetscTruth isnull;
732     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
733     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
734   }
735 
736   if (size == 1) {
737     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
738   } else {
739     /* assemble the entire matrix onto first processor. */
740     Mat         A;
741     Mat_SeqSBAIJ *Aloc;
742     Mat_SeqBAIJ *Bloc;
743     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
744     MatScalar   *a;
745 
746     if (!rank) {
747       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
748     } else {
749       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
750     }
751     PetscLogObjectParent(mat,A);
752 
753     /* copy over the A part */
754     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
755     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
756 ierr = PetscMalloc(bs*sizeof(int),&(    rvals ));CHKERRQ(ierr);
757 
758     for (i=0; i<mbs; i++) {
759       rvals[0] = bs*(baij->rstart + i);
760       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
761       for (j=ai[i]; j<ai[i+1]; j++) {
762         col = (baij->cstart+aj[j])*bs;
763         for (k=0; k<bs; k++) {
764           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
765           col++; a += bs;
766         }
767       }
768     }
769     /* copy over the B part */
770     Bloc = (Mat_SeqBAIJ*)baij->B->data;
771     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
772     for (i=0; i<mbs; i++) {
773       rvals[0] = bs*(baij->rstart + i);
774       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
775       for (j=ai[i]; j<ai[i+1]; j++) {
776         col = baij->garray[aj[j]]*bs;
777         for (k=0; k<bs; k++) {
778           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
779           col++; a += bs;
780         }
781       }
782     }
783     ierr = PetscFree(rvals);CHKERRQ(ierr);
784     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
785     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
786     /*
787        Everyone has to call to draw the matrix since the graphics waits are
788        synchronized across all processors that share the PetscDraw object
789     */
790     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
791     if (!rank) {
792       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
793     }
794     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
795     ierr = MatDestroy(A);CHKERRQ(ierr);
796   }
797   PetscFunctionReturn(0);
798 }
799 
800 #undef __FUNC__
801 #define __FUNC__ "MatView_MPISBAIJ"
802 int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
803 {
804   int        ierr;
805   PetscTruth isascii,isdraw,issocket,isbinary;
806 
807   PetscFunctionBegin;
808   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
809   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_DRAW_VIEWER,&isdraw);CHKERRQ(ierr);
810   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
811   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_BINARY_VIEWER,&isbinary);CHKERRQ(ierr);
812   if (isascii || isdraw || issocket || isbinary) {
813     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
814   } else {
815     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
816   }
817   PetscFunctionReturn(0);
818 }
819 
820 #undef __FUNC__
821 #define __FUNC__ "MatDestroy_MPISBAIJ"
822 int MatDestroy_MPISBAIJ(Mat mat)
823 {
824   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
825   int         ierr;
826 
827   PetscFunctionBegin;
828 #if defined(PETSC_USE_LOG)
829   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
830 #endif
831   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
832   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
833   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
834   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
835   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
836 #if defined (PETSC_USE_CTABLE)
837   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
838 #else
839   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
840 #endif
841   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
842   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
843   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
844   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
845   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
846   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
847 #if defined(PETSC_USE_MAT_SINGLE)
848   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
849 #endif
850   ierr = PetscFree(baij);CHKERRQ(ierr);
851   PetscFunctionReturn(0);
852 }
853 
854 #undef __FUNC__
855 #define __FUNC__ "MatMult_MPISBAIJ"
856 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
857 {
858   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
859   int         ierr,nt;
860 
861   PetscFunctionBegin;
862   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
863   if (nt != A->n) {
864     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
865   }
866   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
867   if (nt != A->m) {
868     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
869   }
870 
871   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
872   /* do diagonal part */
873   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
874   /* do supperdiagonal part */
875   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
876   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
877   /* do subdiagonal part */
878   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
879   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
880   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
881 
882   PetscFunctionReturn(0);
883 }
884 
885 #undef __FUNC__
886 #define __FUNC__ "MatMultAdd_MPISBAIJ"
887 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
888 {
889   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
890   int        ierr;
891 
892   PetscFunctionBegin;
893   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
894   /* do diagonal part */
895   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
896   /* do supperdiagonal part */
897   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
898   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
899 
900   /* do subdiagonal part */
901   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
902   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
903   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
904 
905   PetscFunctionReturn(0);
906 }
907 
908 #undef __FUNC__
909 #define __FUNC__ "MatMultTranspose_MPISBAIJ"
910 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
911 {
912   PetscFunctionBegin;
913   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
914   /* PetscFunctionReturn(0); */
915 }
916 
917 #undef __FUNC__
918 #define __FUNC__ "MatMultTransposeAdd_MPISBAIJ"
919 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
920 {
921   PetscFunctionBegin;
922   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
923   /* PetscFunctionReturn(0); */
924 }
925 
926 /*
927   This only works correctly for square matrices where the subblock A->A is the
928    diagonal block
929 */
930 #undef __FUNC__
931 #define __FUNC__ "MatGetDiagonal_MPISBAIJ"
932 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
933 {
934   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
935   int         ierr;
936 
937   PetscFunctionBegin;
938   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
939   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
940   PetscFunctionReturn(0);
941 }
942 
943 #undef __FUNC__
944 #define __FUNC__ "MatScale_MPISBAIJ"
945 int MatScale_MPISBAIJ(Scalar *aa,Mat A)
946 {
947   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
948   int         ierr;
949 
950   PetscFunctionBegin;
951   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
952   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
953   PetscFunctionReturn(0);
954 }
955 
956 #undef __FUNC__
957 #define __FUNC__ "MatGetOwnershipRange_MPISBAIJ"
958 int MatGetOwnershipRange_MPISBAIJ(Mat matin,int *m,int *n)
959 {
960   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
961 
962   PetscFunctionBegin;
963   if (m) *m = mat->rstart*mat->bs;
964   if (n) *n = mat->rend*mat->bs;
965   PetscFunctionReturn(0);
966 }
967 
968 #undef __FUNC__
969 #define __FUNC__ "MatGetRow_MPISBAIJ"
970 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,Scalar **v)
971 {
972   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
973   Scalar     *vworkA,*vworkB,**pvA,**pvB,*v_p;
974   int        bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
975   int        nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
976   int        *cmap,*idx_p,cstart = mat->cstart;
977 
978   PetscFunctionBegin;
979   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
980   mat->getrowactive = PETSC_TRUE;
981 
982   if (!mat->rowvalues && (idx || v)) {
983     /*
984         allocate enough space to hold information from the longest row.
985     */
986     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
987     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
988     int     max = 1,mbs = mat->mbs,tmp;
989     for (i=0; i<mbs; i++) {
990       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
991       if (max < tmp) { max = tmp; }
992     }
993 ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(Scalar)),&    mat->rowvalues );CHKERRQ(ierr);
994     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
995   }
996 
997   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
998   lrow = row - brstart;  /* local row index */
999 
1000   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1001   if (!v)   {pvA = 0; pvB = 0;}
1002   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1003   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1004   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1005   nztot = nzA + nzB;
1006 
1007   cmap  = mat->garray;
1008   if (v  || idx) {
1009     if (nztot) {
1010       /* Sort by increasing column numbers, assuming A and B already sorted */
1011       int imark = -1;
1012       if (v) {
1013         *v = v_p = mat->rowvalues;
1014         for (i=0; i<nzB; i++) {
1015           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1016           else break;
1017         }
1018         imark = i;
1019         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1020         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1021       }
1022       if (idx) {
1023         *idx = idx_p = mat->rowindices;
1024         if (imark > -1) {
1025           for (i=0; i<imark; i++) {
1026             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1027           }
1028         } else {
1029           for (i=0; i<nzB; i++) {
1030             if (cmap[cworkB[i]/bs] < cstart)
1031               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1032             else break;
1033           }
1034           imark = i;
1035         }
1036         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1037         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1038       }
1039     } else {
1040       if (idx) *idx = 0;
1041       if (v)   *v   = 0;
1042     }
1043   }
1044   *nz = nztot;
1045   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1046   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1047   PetscFunctionReturn(0);
1048 }
1049 
1050 #undef __FUNC__
1051 #define __FUNC__ "MatRestoreRow_MPISBAIJ"
1052 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,Scalar **v)
1053 {
1054   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1055 
1056   PetscFunctionBegin;
1057   if (baij->getrowactive == PETSC_FALSE) {
1058     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1059   }
1060   baij->getrowactive = PETSC_FALSE;
1061   PetscFunctionReturn(0);
1062 }
1063 
1064 #undef __FUNC__
1065 #define __FUNC__ "MatGetBlockSize_MPISBAIJ"
1066 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1067 {
1068   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1069 
1070   PetscFunctionBegin;
1071   *bs = baij->bs;
1072   PetscFunctionReturn(0);
1073 }
1074 
1075 #undef __FUNC__
1076 #define __FUNC__ "MatZeroEntries_MPISBAIJ"
1077 int MatZeroEntries_MPISBAIJ(Mat A)
1078 {
1079   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1080   int         ierr;
1081 
1082   PetscFunctionBegin;
1083   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1084   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1085   PetscFunctionReturn(0);
1086 }
1087 
1088 #undef __FUNC__
1089 #define __FUNC__ "MatGetInfo_MPISBAIJ"
1090 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1091 {
1092   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1093   Mat         A = a->A,B = a->B;
1094   int         ierr;
1095   PetscReal   isend[5],irecv[5];
1096 
1097   PetscFunctionBegin;
1098   info->block_size     = (double)a->bs;
1099   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1100   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1101   isend[3] = info->memory;  isend[4] = info->mallocs;
1102   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1103   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1104   isend[3] += info->memory;  isend[4] += info->mallocs;
1105   if (flag == MAT_LOCAL) {
1106     info->nz_used      = isend[0];
1107     info->nz_allocated = isend[1];
1108     info->nz_unneeded  = isend[2];
1109     info->memory       = isend[3];
1110     info->mallocs      = isend[4];
1111   } else if (flag == MAT_GLOBAL_MAX) {
1112     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_MAX,matin->comm);CHKERRQ(ierr);
1113     info->nz_used      = irecv[0];
1114     info->nz_allocated = irecv[1];
1115     info->nz_unneeded  = irecv[2];
1116     info->memory       = irecv[3];
1117     info->mallocs      = irecv[4];
1118   } else if (flag == MAT_GLOBAL_SUM) {
1119     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_SUM,matin->comm);CHKERRQ(ierr);
1120     info->nz_used      = irecv[0];
1121     info->nz_allocated = irecv[1];
1122     info->nz_unneeded  = irecv[2];
1123     info->memory       = irecv[3];
1124     info->mallocs      = irecv[4];
1125   } else {
1126     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1127   }
1128   info->rows_global       = (double)A->M;
1129   info->columns_global    = (double)A->N;
1130   info->rows_local        = (double)A->m;
1131   info->columns_local     = (double)A->N;
1132   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1133   info->fill_ratio_needed = 0;
1134   info->factor_mallocs    = 0;
1135   PetscFunctionReturn(0);
1136 }
1137 
1138 #undef __FUNC__
1139 #define __FUNC__ "MatSetOption_MPISBAIJ"
1140 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1141 {
1142   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1143   int         ierr;
1144 
1145   PetscFunctionBegin;
1146   if (op == MAT_NO_NEW_NONZERO_LOCATIONS ||
1147       op == MAT_YES_NEW_NONZERO_LOCATIONS ||
1148       op == MAT_COLUMNS_UNSORTED ||
1149       op == MAT_COLUMNS_SORTED ||
1150       op == MAT_NEW_NONZERO_ALLOCATION_ERR ||
1151       op == MAT_KEEP_ZEROED_ROWS ||
1152       op == MAT_NEW_NONZERO_LOCATION_ERR) {
1153         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1154         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1155   } else if (op == MAT_ROW_ORIENTED) {
1156         a->roworiented = PETSC_TRUE;
1157         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1158         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1159   } else if (op == MAT_ROWS_SORTED ||
1160              op == MAT_ROWS_UNSORTED ||
1161              op == MAT_SYMMETRIC ||
1162              op == MAT_STRUCTURALLY_SYMMETRIC ||
1163              op == MAT_YES_NEW_DIAGONALS ||
1164              op == MAT_USE_HASH_TABLE) {
1165     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1166   } else if (op == MAT_COLUMN_ORIENTED) {
1167     a->roworiented = PETSC_FALSE;
1168     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1169     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1170   } else if (op == MAT_IGNORE_OFF_PROC_ENTRIES) {
1171     a->donotstash = PETSC_TRUE;
1172   } else if (op == MAT_NO_NEW_DIAGONALS) {
1173     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1174   } else if (op == MAT_USE_HASH_TABLE) {
1175     a->ht_flag = PETSC_TRUE;
1176   } else {
1177     SETERRQ(PETSC_ERR_SUP,"unknown option");
1178   }
1179   PetscFunctionReturn(0);
1180 }
1181 
1182 #undef __FUNC__
1183 #define __FUNC__ "MatTranspose_MPISBAIJ("
1184 int MatTranspose_MPISBAIJ(Mat A,Mat *matout)
1185 {
1186   PetscFunctionBegin;
1187   SETERRQ(1,"Matrix is symmetric. MatTranspose() should not be called");
1188   /* PetscFunctionReturn(0); */
1189 }
1190 
1191 #undef __FUNC__
1192 #define __FUNC__ "MatDiagonalScale_MPISBAIJ"
1193 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1194 {
1195   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1196   Mat         a = baij->A,b = baij->B;
1197   int         ierr,s1,s2,s3;
1198 
1199   PetscFunctionBegin;
1200   if (ll != rr) {
1201     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1202   }
1203   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1204   if (rr) {
1205     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1206     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1207     /* Overlap communication with computation. */
1208     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1209     /*} if (ll) { */
1210     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1211     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1212     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1213     /* } */
1214   /* scale  the diagonal block */
1215   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1216 
1217   /* if (rr) { */
1218     /* Do a scatter end and then right scale the off-diagonal block */
1219     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1220     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1221   }
1222 
1223   PetscFunctionReturn(0);
1224 }
1225 
1226 #undef __FUNC__
1227 #define __FUNC__ "MatZeroRows_MPISBAIJ"
1228 int MatZeroRows_MPISBAIJ(Mat A,IS is,Scalar *diag)
1229 {
1230   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;
1231   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1232   int            *procs,*nprocs,j,idx,nsends,*work,row;
1233   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1234   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1235   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1236   MPI_Comm       comm = A->comm;
1237   MPI_Request    *send_waits,*recv_waits;
1238   MPI_Status     recv_status,*send_status;
1239   IS             istmp;
1240   PetscTruth     found;
1241 
1242   PetscFunctionBegin;
1243   ierr = ISGetSize(is,&N);CHKERRQ(ierr);
1244   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1245 
1246   /*  first count number of contributors to each processor */
1247 ierr = PetscMalloc(2*size*sizeof(int),&(  nprocs ));CHKERRQ(ierr);
1248   ierr   = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1249   procs  = nprocs + size;
1250 ierr = PetscMalloc((N+1)*sizeof(int),&  owner  );CHKERRQ(ierr); /* see note*/
1251   for (i=0; i<N; i++) {
1252     idx   = rows[i];
1253     found = PETSC_FALSE;
1254     for (j=0; j<size; j++) {
1255       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1256         nprocs[j]++; procs[j] = 1; owner[i] = j; found = PETSC_TRUE; break;
1257       }
1258     }
1259     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1260   }
1261   nsends = 0;  for (i=0; i<size; i++) { nsends += procs[i];}
1262 
1263   /* inform other processors of number of messages and max length*/
1264 ierr = PetscMalloc(2*size*sizeof(int),&(  work   ));CHKERRQ(ierr);
1265   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
1266   nmax   = work[rank];
1267   nrecvs = work[size+rank];
1268   ierr = PetscFree(work);CHKERRQ(ierr);
1269 
1270   /* post receives:   */
1271 ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&  rvalues );CHKERRQ(ierr);
1272 ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&  recv_waits );CHKERRQ(ierr);
1273   for (i=0; i<nrecvs; i++) {
1274     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1275   }
1276 
1277   /* do sends:
1278      1) starts[i] gives the starting index in svalues for stuff going to
1279      the ith processor
1280   */
1281 ierr = PetscMalloc((N+1)*sizeof(int),&  svalues    );CHKERRQ(ierr);
1282 ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&  send_waits );CHKERRQ(ierr);
1283 ierr = PetscMalloc((size+1)*sizeof(int),&  starts     );CHKERRQ(ierr);
1284   starts[0]  = 0;
1285   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1286   for (i=0; i<N; i++) {
1287     svalues[starts[owner[i]]++] = rows[i];
1288   }
1289   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1290 
1291   starts[0] = 0;
1292   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1293   count = 0;
1294   for (i=0; i<size; i++) {
1295     if (procs[i]) {
1296       ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1297     }
1298   }
1299   ierr = PetscFree(starts);CHKERRQ(ierr);
1300 
1301   base = owners[rank]*bs;
1302 
1303   /*  wait on receives */
1304 ierr = PetscMalloc(2*(nrecvs+1)*sizeof(int),&  lens   );CHKERRQ(ierr);
1305   source = lens + nrecvs;
1306   count  = nrecvs; slen = 0;
1307   while (count) {
1308     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1309     /* unpack receives into our local space */
1310     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1311     source[imdex]  = recv_status.MPI_SOURCE;
1312     lens[imdex]    = n;
1313     slen          += n;
1314     count--;
1315   }
1316   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1317 
1318   /* move the data into the send scatter */
1319 ierr = PetscMalloc((slen+1)*sizeof(int),&  lrows );CHKERRQ(ierr);
1320   count = 0;
1321   for (i=0; i<nrecvs; i++) {
1322     values = rvalues + i*nmax;
1323     for (j=0; j<lens[i]; j++) {
1324       lrows[count++] = values[j] - base;
1325     }
1326   }
1327   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1328   ierr = PetscFree(lens);CHKERRQ(ierr);
1329   ierr = PetscFree(owner);CHKERRQ(ierr);
1330   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1331 
1332   /* actually zap the local rows */
1333   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1334   PetscLogObjectParent(A,istmp);
1335 
1336   /*
1337         Zero the required rows. If the "diagonal block" of the matrix
1338      is square and the user wishes to set the diagonal we use seperate
1339      code so that MatSetValues() is not called for each diagonal allocating
1340      new memory, thus calling lots of mallocs and slowing things down.
1341 
1342        Contributed by: Mathew Knepley
1343   */
1344   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1345   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1346   if (diag && (l->A->M == l->A->N)) {
1347     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1348   } else if (diag) {
1349     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1350     if (((Mat_SeqSBAIJ*)l->A->data)->nonew) {
1351       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1352 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1353     }
1354     for (i=0; i<slen; i++) {
1355       row  = lrows[i] + rstart_bs;
1356       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1357     }
1358     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1359     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1360   } else {
1361     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1362   }
1363 
1364   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1365   ierr = PetscFree(lrows);CHKERRQ(ierr);
1366 
1367   /* wait on sends */
1368   if (nsends) {
1369 ierr = PetscMalloc(nsends*sizeof(MPI_Status),&(    send_status ));CHKERRQ(ierr);
1370     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1371     ierr        = PetscFree(send_status);CHKERRQ(ierr);
1372   }
1373   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1374   ierr = PetscFree(svalues);CHKERRQ(ierr);
1375 
1376   PetscFunctionReturn(0);
1377 }
1378 
1379 #undef __FUNC__
1380 #define __FUNC__ "MatPrintHelp_MPISBAIJ"
1381 int MatPrintHelp_MPISBAIJ(Mat A)
1382 {
1383   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1384   MPI_Comm    comm = A->comm;
1385   static int  called = 0;
1386   int         ierr;
1387 
1388   PetscFunctionBegin;
1389   if (!a->rank) {
1390     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1391   }
1392   if (called) {PetscFunctionReturn(0);} else called = 1;
1393   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1394   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1395   PetscFunctionReturn(0);
1396 }
1397 
1398 #undef __FUNC__
1399 #define __FUNC__ "MatSetUnfactored_MPISBAIJ"
1400 int MatSetUnfactored_MPISBAIJ(Mat A)
1401 {
1402   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1403   int         ierr;
1404 
1405   PetscFunctionBegin;
1406   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1407   PetscFunctionReturn(0);
1408 }
1409 
1410 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1411 
1412 #undef __FUNC__
1413 #define __FUNC__ "MatEqual_MPISBAIJ"
1414 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1415 {
1416   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1417   Mat         a,b,c,d;
1418   PetscTruth  flg;
1419   int         ierr;
1420 
1421   PetscFunctionBegin;
1422   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg);CHKERRQ(ierr);
1423   if (!flg) SETERRQ(PETSC_ERR_ARG_INCOMP,"Matrices must be same type");
1424   a = matA->A; b = matA->B;
1425   c = matB->A; d = matB->B;
1426 
1427   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1428   if (flg == PETSC_TRUE) {
1429     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1430   }
1431   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1432   PetscFunctionReturn(0);
1433 }
1434 
1435 #undef __FUNC__
1436 #define __FUNC__ "MatSetUpPreallocation_MPISBAIJ"
1437 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1438 {
1439   int        ierr;
1440 
1441   PetscFunctionBegin;
1442   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1443   PetscFunctionReturn(0);
1444 }
1445 /* -------------------------------------------------------------------*/
1446 static struct _MatOps MatOps_Values = {
1447   MatSetValues_MPISBAIJ,
1448   MatGetRow_MPISBAIJ,
1449   MatRestoreRow_MPISBAIJ,
1450   MatMult_MPISBAIJ,
1451   MatMultAdd_MPISBAIJ,
1452   MatMultTranspose_MPISBAIJ,
1453   MatMultTransposeAdd_MPISBAIJ,
1454   0,
1455   0,
1456   0,
1457   0,
1458   0,
1459   0,
1460   0,
1461   MatTranspose_MPISBAIJ,
1462   MatGetInfo_MPISBAIJ,
1463   MatEqual_MPISBAIJ,
1464   MatGetDiagonal_MPISBAIJ,
1465   MatDiagonalScale_MPISBAIJ,
1466   MatNorm_MPISBAIJ,
1467   MatAssemblyBegin_MPISBAIJ,
1468   MatAssemblyEnd_MPISBAIJ,
1469   0,
1470   MatSetOption_MPISBAIJ,
1471   MatZeroEntries_MPISBAIJ,
1472   MatZeroRows_MPISBAIJ,
1473   0,
1474   0,
1475   0,
1476   0,
1477   MatSetUpPreallocation_MPISBAIJ,
1478   0,
1479   MatGetOwnershipRange_MPISBAIJ,
1480   0,
1481   0,
1482   0,
1483   0,
1484   MatDuplicate_MPISBAIJ,
1485   0,
1486   0,
1487   0,
1488   0,
1489   0,
1490   MatGetSubMatrices_MPISBAIJ,
1491   MatIncreaseOverlap_MPISBAIJ,
1492   MatGetValues_MPISBAIJ,
1493   0,
1494   MatPrintHelp_MPISBAIJ,
1495   MatScale_MPISBAIJ,
1496   0,
1497   0,
1498   0,
1499   MatGetBlockSize_MPISBAIJ,
1500   0,
1501   0,
1502   0,
1503   0,
1504   0,
1505   0,
1506   MatSetUnfactored_MPISBAIJ,
1507   0,
1508   MatSetValuesBlocked_MPISBAIJ,
1509   0,
1510   0,
1511   0,
1512   MatGetMaps_Petsc,
1513   0,
1514   0,
1515   0,
1516   0,
1517   0,
1518   0,
1519   MatGetRowMax_MPISBAIJ};
1520 
1521 
1522 EXTERN_C_BEGIN
1523 #undef __FUNC__
1524 #define __FUNC__ "MatGetDiagonalBlock_MPISBAIJ"
1525 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1526 {
1527   PetscFunctionBegin;
1528   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1529   *iscopy = PETSC_FALSE;
1530   PetscFunctionReturn(0);
1531 }
1532 EXTERN_C_END
1533 
1534 EXTERN_C_BEGIN
1535 #undef __FUNC__
1536 #define __FUNC__ "MatCreate_MPISBAIJ"
1537 int MatCreate_MPISBAIJ(Mat B)
1538 {
1539   Mat_MPISBAIJ *b;
1540   int          ierr;
1541   PetscTruth   flg;
1542 
1543   PetscFunctionBegin;
1544 
1545   ierr    = PetscNew(Mat_MPISBAIJ,&b);CHKERRQ(ierr);
1546   B->data = (void*)b;
1547   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1548   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1549 
1550   B->ops->destroy    = MatDestroy_MPISBAIJ;
1551   B->ops->view       = MatView_MPISBAIJ;
1552   B->mapping    = 0;
1553   B->factor     = 0;
1554   B->assembled  = PETSC_FALSE;
1555 
1556   B->insertmode = NOT_SET_VALUES;
1557   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1558   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1559 
1560   /* build local table of row and column ownerships */
1561   b->rowners = (int*)PetscMalloc(3*(b->size+2)*sizeof(int));CHKERRQ(ierr);
1562   b->cowners    = b->rowners + b->size + 2;
1563   b->rowners_bs = b->cowners + b->size + 2;
1564   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1565 
1566   /* build cache for off array entries formed */
1567   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1568   b->donotstash  = PETSC_FALSE;
1569   b->colmap      = PETSC_NULL;
1570   b->garray      = PETSC_NULL;
1571   b->roworiented = PETSC_TRUE;
1572 
1573 #if defined(PEYSC_USE_MAT_SINGLE)
1574   /* stuff for MatSetValues_XXX in single precision */
1575   b->lensetvalues     = 0;
1576   b->setvaluescopy    = PETSC_NULL;
1577 #endif
1578 
1579   /* stuff used in block assembly */
1580   b->barray       = 0;
1581 
1582   /* stuff used for matrix vector multiply */
1583   b->lvec         = 0;
1584   b->Mvctx        = 0;
1585 
1586   /* stuff for MatGetRow() */
1587   b->rowindices   = 0;
1588   b->rowvalues    = 0;
1589   b->getrowactive = PETSC_FALSE;
1590 
1591   /* hash table stuff */
1592   b->ht           = 0;
1593   b->hd           = 0;
1594   b->ht_size      = 0;
1595   b->ht_flag      = PETSC_FALSE;
1596   b->ht_fact      = 0;
1597   b->ht_total_ct  = 0;
1598   b->ht_insert_ct = 0;
1599 
1600   ierr = PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1601   if (flg) {
1602     double fact = 1.39;
1603     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1604     ierr = PetscOptionsGetDouble(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1605     if (fact <= 1.0) fact = 1.39;
1606     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1607     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1608   }
1609   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1610                                      "MatStoreValues_MPISBAIJ",
1611                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1612   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1613                                      "MatRetrieveValues_MPISBAIJ",
1614                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1615   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1616                                      "MatGetDiagonalBlock_MPISBAIJ",
1617                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1618   PetscFunctionReturn(0);
1619 }
1620 EXTERN_C_END
1621 
1622 #undef __FUNC__
1623 #define __FUNC__ "MatMPISBAIJSetPreallocation"
1624 /*@C
1625    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1626    the user should preallocate the matrix storage by setting the parameters
1627    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1628    performance can be increased by more than a factor of 50.
1629 
1630    Collective on Mat
1631 
1632    Input Parameters:
1633 +  A - the matrix
1634 .  bs   - size of blockk
1635 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1636            submatrix  (same for all local rows)
1637 .  d_nnz - array containing the number of block nonzeros in the various block rows
1638            of the in diagonal portion of the local (possibly different for each block
1639            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1640 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1641            submatrix (same for all local rows).
1642 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1643            off-diagonal portion of the local submatrix (possibly different for
1644            each block row) or PETSC_NULL.
1645 
1646 
1647    Options Database Keys:
1648 .   -mat_no_unroll - uses code that does not unroll the loops in the
1649                      block calculations (much slower)
1650 .   -mat_block_size - size of the blocks to use
1651 
1652    Notes:
1653 
1654    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1655    than it must be used on all processors that share the object for that argument.
1656 
1657    Storage Information:
1658    For a square global matrix we define each processor's diagonal portion
1659    to be its local rows and the corresponding columns (a square submatrix);
1660    each processor's off-diagonal portion encompasses the remainder of the
1661    local matrix (a rectangular submatrix).
1662 
1663    The user can specify preallocated storage for the diagonal part of
1664    the local submatrix with either d_nz or d_nnz (not both).  Set
1665    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1666    memory allocation.  Likewise, specify preallocated storage for the
1667    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1668 
1669    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1670    the figure below we depict these three local rows and all columns (0-11).
1671 
1672 .vb
1673            0 1 2 3 4 5 6 7 8 9 10 11
1674           -------------------
1675    row 3  |  o o o d d d o o o o o o
1676    row 4  |  o o o d d d o o o o o o
1677    row 5  |  o o o d d d o o o o o o
1678           -------------------
1679 .ve
1680 
1681    Thus, any entries in the d locations are stored in the d (diagonal)
1682    submatrix, and any entries in the o locations are stored in the
1683    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1684    stored simply in the MATSEQBAIJ format for compressed row storage.
1685 
1686    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1687    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1688    In general, for PDE problems in which most nonzeros are near the diagonal,
1689    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1690    or you will get TERRIBLE performance; see the users' manual chapter on
1691    matrices.
1692 
1693    Level: intermediate
1694 
1695 .keywords: matrix, block, aij, compressed row, sparse, parallel
1696 
1697 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1698 @*/
1699 
1700 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1701 {
1702   Mat_MPISBAIJ *b;
1703   int          ierr,i,mbs,Mbs=PETSC_DECIDE;
1704   PetscTruth   flg2;
1705 
1706   PetscFunctionBegin;
1707   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg2);CHKERRQ(ierr);
1708   if (!flg2) PetscFunctionReturn(0);
1709 
1710   ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1711 
1712   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1713   if (d_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than -2: value %d",d_nz);
1714   if (o_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than -2: value %d",o_nz);
1715   if (d_nnz) {
1716     for (i=0; i<B->m/bs; i++) {
1717       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1718     }
1719   }
1720   if (o_nnz) {
1721     for (i=0; i<B->m/bs; i++) {
1722       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1723     }
1724   }
1725   B->preallocated = PETSC_TRUE;
1726   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1727   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1728   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1729   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1730 
1731   b   = (Mat_MPISBAIJ*)B->data;
1732   mbs = B->m/bs;
1733   Mbs = B->M/bs;
1734   if (mbs*bs != B->m) {
1735     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1736   }
1737 
1738   b->bs  = bs;
1739   b->bs2 = bs*bs;
1740   b->mbs = mbs;
1741   b->nbs = mbs;
1742   b->Mbs = Mbs;
1743   b->Nbs = Mbs;
1744 
1745   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1746   b->rowners[0]    = 0;
1747   for (i=2; i<=b->size; i++) {
1748     b->rowners[i] += b->rowners[i-1];
1749   }
1750   b->rstart    = b->rowners[b->rank];
1751   b->rend      = b->rowners[b->rank+1];
1752   b->cstart    = b->rstart;
1753   b->cend      = b->rend;
1754   for (i=0; i<=b->size; i++) {
1755     b->rowners_bs[i] = b->rowners[i]*bs;
1756   }
1757   b->rstart_bs = b-> rstart*bs;
1758   b->rend_bs   = b->rend*bs;
1759 
1760   b->cstart_bs = b->cstart*bs;
1761   b->cend_bs   = b->cend*bs;
1762 
1763 
1764   if (d_nz == PETSC_DEFAULT) d_nz = 5;
1765   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1766   PetscLogObjectParent(B,b->A);
1767   if (o_nz == PETSC_DEFAULT) o_nz = 0;
1768   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1769   PetscLogObjectParent(B,b->B);
1770 
1771   /* build cache for off array entries formed */
1772   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1773 
1774   PetscFunctionReturn(0);
1775 }
1776 
1777 #undef __FUNC__
1778 #define __FUNC__ "MatCreateMPISBAIJ"
1779 /*@C
1780    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1781    (block compressed row).  For good matrix assembly performance
1782    the user should preallocate the matrix storage by setting the parameters
1783    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1784    performance can be increased by more than a factor of 50.
1785 
1786    Collective on MPI_Comm
1787 
1788    Input Parameters:
1789 +  comm - MPI communicator
1790 .  bs   - size of blockk
1791 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1792            This value should be the same as the local size used in creating the
1793            y vector for the matrix-vector product y = Ax.
1794 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1795            This value should be the same as the local size used in creating the
1796            x vector for the matrix-vector product y = Ax.
1797 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1798 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1799 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1800            submatrix  (same for all local rows)
1801 .  d_nnz - array containing the number of block nonzeros in the various block rows
1802            of the in diagonal portion of the local (possibly different for each block
1803            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1804 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1805            submatrix (same for all local rows).
1806 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1807            off-diagonal portion of the local submatrix (possibly different for
1808            each block row) or PETSC_NULL.
1809 
1810    Output Parameter:
1811 .  A - the matrix
1812 
1813    Options Database Keys:
1814 .   -mat_no_unroll - uses code that does not unroll the loops in the
1815                      block calculations (much slower)
1816 .   -mat_block_size - size of the blocks to use
1817 .   -mat_mpi - use the parallel matrix data structures even on one processor
1818                (defaults to using SeqBAIJ format on one processor)
1819 
1820    Notes:
1821    The user MUST specify either the local or global matrix dimensions
1822    (possibly both).
1823 
1824    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1825    than it must be used on all processors that share the object for that argument.
1826 
1827    Storage Information:
1828    For a square global matrix we define each processor's diagonal portion
1829    to be its local rows and the corresponding columns (a square submatrix);
1830    each processor's off-diagonal portion encompasses the remainder of the
1831    local matrix (a rectangular submatrix).
1832 
1833    The user can specify preallocated storage for the diagonal part of
1834    the local submatrix with either d_nz or d_nnz (not both).  Set
1835    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1836    memory allocation.  Likewise, specify preallocated storage for the
1837    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1838 
1839    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1840    the figure below we depict these three local rows and all columns (0-11).
1841 
1842 .vb
1843            0 1 2 3 4 5 6 7 8 9 10 11
1844           -------------------
1845    row 3  |  o o o d d d o o o o o o
1846    row 4  |  o o o d d d o o o o o o
1847    row 5  |  o o o d d d o o o o o o
1848           -------------------
1849 .ve
1850 
1851    Thus, any entries in the d locations are stored in the d (diagonal)
1852    submatrix, and any entries in the o locations are stored in the
1853    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1854    stored simply in the MATSEQBAIJ format for compressed row storage.
1855 
1856    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1857    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1858    In general, for PDE problems in which most nonzeros are near the diagonal,
1859    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1860    or you will get TERRIBLE performance; see the users' manual chapter on
1861    matrices.
1862 
1863    Level: intermediate
1864 
1865 .keywords: matrix, block, aij, compressed row, sparse, parallel
1866 
1867 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1868 @*/
1869 
1870 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,int *d_nnz,int o_nz,int *o_nnz,Mat *A)
1871 {
1872   int ierr,size;
1873 
1874   PetscFunctionBegin;
1875   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1876   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1877   if (size > 1) {
1878     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1879     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1880   } else {
1881     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1882     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1883   }
1884   PetscFunctionReturn(0);
1885 }
1886 
1887 
1888 #undef __FUNC__
1889 #define __FUNC__ "MatDuplicate_MPISBAIJ"
1890 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1891 {
1892   Mat          mat;
1893   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1894   int          ierr,len=0;
1895 
1896   PetscFunctionBegin;
1897   *newmat       = 0;
1898   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1899   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1900   mat->preallocated = PETSC_TRUE;
1901   a = (Mat_MPISBAIJ*)mat->data;
1902   a->bs  = oldmat->bs;
1903   a->bs2 = oldmat->bs2;
1904   a->mbs = oldmat->mbs;
1905   a->nbs = oldmat->nbs;
1906   a->Mbs = oldmat->Mbs;
1907   a->Nbs = oldmat->Nbs;
1908 
1909   a->rstart       = oldmat->rstart;
1910   a->rend         = oldmat->rend;
1911   a->cstart       = oldmat->cstart;
1912   a->cend         = oldmat->cend;
1913   a->size         = oldmat->size;
1914   a->rank         = oldmat->rank;
1915   a->donotstash   = oldmat->donotstash;
1916   a->roworiented  = oldmat->roworiented;
1917   a->rowindices   = 0;
1918   a->rowvalues    = 0;
1919   a->getrowactive = PETSC_FALSE;
1920   a->barray       = 0;
1921   a->rstart_bs    = oldmat->rstart_bs;
1922   a->rend_bs      = oldmat->rend_bs;
1923   a->cstart_bs    = oldmat->cstart_bs;
1924   a->cend_bs      = oldmat->cend_bs;
1925 
1926   /* hash table stuff */
1927   a->ht           = 0;
1928   a->hd           = 0;
1929   a->ht_size      = 0;
1930   a->ht_flag      = oldmat->ht_flag;
1931   a->ht_fact      = oldmat->ht_fact;
1932   a->ht_total_ct  = 0;
1933   a->ht_insert_ct = 0;
1934 
1935   a->rowners = (int*)PetscMalloc(3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1936   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1937   a->cowners    = a->rowners + a->size + 2;
1938   a->rowners_bs = a->cowners + a->size + 2;
1939   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1940   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1941   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1942   if (oldmat->colmap) {
1943 #if defined (PETSC_USE_CTABLE)
1944   ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1945 #else
1946     a->colmap = (int*)PetscMalloc((a->Nbs)*sizeof(int));CHKERRQ(ierr);
1947     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
1948     ierr      = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
1949 #endif
1950   } else a->colmap = 0;
1951   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1952 ierr = PetscMalloc(len*sizeof(int),&(    a->garray ));CHKERRQ(ierr);
1953     PetscLogObjectMemory(mat,len*sizeof(int));
1954     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
1955   } else a->garray = 0;
1956 
1957   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
1958   PetscLogObjectParent(mat,a->lvec);
1959   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
1960 
1961   PetscLogObjectParent(mat,a->Mvctx);
1962   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1963   PetscLogObjectParent(mat,a->A);
1964   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
1965   PetscLogObjectParent(mat,a->B);
1966   ierr = PetscFListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
1967   *newmat = mat;
1968   PetscFunctionReturn(0);
1969 }
1970 
1971 #include "petscsys.h"
1972 
1973 EXTERN_C_BEGIN
1974 #undef __FUNC__
1975 #define __FUNC__ "MatLoad_MPISBAIJ"
1976 int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
1977 {
1978   Mat          A;
1979   int          i,nz,ierr,j,rstart,rend,fd;
1980   Scalar       *vals,*buf;
1981   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1982   MPI_Status   status;
1983   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
1984   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
1985   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
1986   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1987   int          dcount,kmax,k,nzcount,tmp;
1988 
1989   PetscFunctionBegin;
1990   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1991 
1992   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1993   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1994   if (!rank) {
1995     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1996     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1997     if (header[0] != MAT_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1998     if (header[3] < 0) {
1999       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2000     }
2001   }
2002 
2003   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2004   M = header[1]; N = header[2];
2005 
2006   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2007 
2008   /*
2009      This code adds extra rows to make sure the number of rows is
2010      divisible by the blocksize
2011   */
2012   Mbs        = M/bs;
2013   extra_rows = bs - M + bs*(Mbs);
2014   if (extra_rows == bs) extra_rows = 0;
2015   else                  Mbs++;
2016   if (extra_rows &&!rank) {
2017     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2018   }
2019 
2020   /* determine ownership of all rows */
2021   mbs = Mbs/size + ((Mbs % size) > rank);
2022   m   = mbs*bs;
2023 ierr = PetscMalloc(2*(size+2)*sizeof(int),&  rowners );CHKERRQ(ierr);
2024   browners = rowners + size + 1;
2025   ierr = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2026   rowners[0] = 0;
2027   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2028   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2029   rstart = rowners[rank];
2030   rend   = rowners[rank+1];
2031 
2032   /* distribute row lengths to all processors */
2033   locrowlens = (int*)PetscMalloc((rend-rstart)*bs*sizeof(int));CHKERRQ(ierr);
2034   if (!rank) {
2035 ierr = PetscMalloc((M+extra_rows)*sizeof(int),&    rowlengths );CHKERRQ(ierr);
2036     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2037     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2038 ierr = PetscMalloc(size*sizeof(int),&(    sndcounts ));CHKERRQ(ierr);
2039     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2040     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2041     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2042   } else {
2043     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2044   }
2045 
2046   if (!rank) {   /* procs[0] */
2047     /* calculate the number of nonzeros on each processor */
2048 ierr = PetscMalloc(size*sizeof(int),&(    procsnz ));CHKERRQ(ierr);
2049     ierr    = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2050     for (i=0; i<size; i++) {
2051       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2052         procsnz[i] += rowlengths[j];
2053       }
2054     }
2055     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2056 
2057     /* determine max buffer needed and allocate it */
2058     maxnz = 0;
2059     for (i=0; i<size; i++) {
2060       maxnz = PetscMax(maxnz,procsnz[i]);
2061     }
2062 ierr = PetscMalloc(maxnz*sizeof(int),&(    cols ));CHKERRQ(ierr);
2063 
2064     /* read in my part of the matrix column indices  */
2065     nz = procsnz[0];
2066 ierr = PetscMalloc(nz*sizeof(int),&(    ibuf ));CHKERRQ(ierr);
2067     mycols = ibuf;
2068     if (size == 1)  nz -= extra_rows;
2069     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2070     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2071 
2072     /* read in every ones (except the last) and ship off */
2073     for (i=1; i<size-1; i++) {
2074       nz   = procsnz[i];
2075       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2076       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2077     }
2078     /* read in the stuff for the last proc */
2079     if (size != 1) {
2080       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2081       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2082       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2083       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2084     }
2085     ierr = PetscFree(cols);CHKERRQ(ierr);
2086   } else {  /* procs[i], i>0 */
2087     /* determine buffer space needed for message */
2088     nz = 0;
2089     for (i=0; i<m; i++) {
2090       nz += locrowlens[i];
2091     }
2092 ierr = PetscMalloc(nz*sizeof(int),&(    ibuf   ));CHKERRQ(ierr);
2093     mycols = ibuf;
2094     /* receive message of column indices*/
2095     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2096     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2097     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2098   }
2099 
2100   /* loop over local rows, determining number of off diagonal entries */
2101   dlens  = (int*)PetscMalloc(2*(rend-rstart+1)*sizeof(int));CHKERRQ(ierr);
2102   odlens = dlens + (rend-rstart);
2103 ierr = PetscMalloc(3*Mbs*sizeof(int),&(  mask   ));CHKERRQ(ierr);
2104   ierr   = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2105   masked1 = mask    + Mbs;
2106   masked2 = masked1 + Mbs;
2107   rowcount = 0; nzcount = 0;
2108   for (i=0; i<mbs; i++) {
2109     dcount  = 0;
2110     odcount = 0;
2111     for (j=0; j<bs; j++) {
2112       kmax = locrowlens[rowcount];
2113       for (k=0; k<kmax; k++) {
2114         tmp = mycols[nzcount++]/bs; /* block col. index */
2115         if (!mask[tmp]) {
2116           mask[tmp] = 1;
2117           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2118           else masked1[dcount++] = tmp; /* entry in diag portion */
2119         }
2120       }
2121       rowcount++;
2122     }
2123 
2124     dlens[i]  = dcount;  /* d_nzz[i] */
2125     odlens[i] = odcount; /* o_nzz[i] */
2126 
2127     /* zero out the mask elements we set */
2128     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2129     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2130   }
2131 
2132   /* create our matrix */
2133   ierr = MatCreateMPISBAIJ(comm,bs,m,m,PETSC_DETERMINE,PETSC_DETERMINE,0,dlens,0,odlens,newmat);
2134   CHKERRQ(ierr);
2135   A = *newmat;
2136   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2137 
2138   if (!rank) {
2139 ierr = PetscMalloc(maxnz*sizeof(Scalar),&(    buf ));CHKERRQ(ierr);
2140     /* read in my part of the matrix numerical values  */
2141     nz = procsnz[0];
2142     vals = buf;
2143     mycols = ibuf;
2144     if (size == 1)  nz -= extra_rows;
2145     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2146     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2147 
2148     /* insert into matrix */
2149     jj      = rstart*bs;
2150     for (i=0; i<m; i++) {
2151       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2152       mycols += locrowlens[i];
2153       vals   += locrowlens[i];
2154       jj++;
2155     }
2156 
2157     /* read in other processors (except the last one) and ship out */
2158     for (i=1; i<size-1; i++) {
2159       nz   = procsnz[i];
2160       vals = buf;
2161       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2162       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2163     }
2164     /* the last proc */
2165     if (size != 1){
2166       nz   = procsnz[i] - extra_rows;
2167       vals = buf;
2168       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2169       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2170       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2171     }
2172     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2173 
2174   } else {
2175     /* receive numeric values */
2176 ierr = PetscMalloc(nz*sizeof(Scalar),&(    buf ));CHKERRQ(ierr);
2177 
2178     /* receive message of values*/
2179     vals   = buf;
2180     mycols = ibuf;
2181     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2182     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2183     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2184 
2185     /* insert into matrix */
2186     jj      = rstart*bs;
2187     for (i=0; i<m; i++) {
2188       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2189       mycols += locrowlens[i];
2190       vals   += locrowlens[i];
2191       jj++;
2192     }
2193   }
2194 
2195   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2196   ierr = PetscFree(buf);CHKERRQ(ierr);
2197   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2198   ierr = PetscFree(rowners);CHKERRQ(ierr);
2199   ierr = PetscFree(dlens);CHKERRQ(ierr);
2200   ierr = PetscFree(mask);CHKERRQ(ierr);
2201   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2202   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2203   PetscFunctionReturn(0);
2204 }
2205 EXTERN_C_END
2206 
2207 #undef __FUNC__
2208 #define __FUNC__ "MatMPISBAIJSetHashTableFactor"
2209 /*@
2210    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2211 
2212    Input Parameters:
2213 .  mat  - the matrix
2214 .  fact - factor
2215 
2216    Collective on Mat
2217 
2218    Level: advanced
2219 
2220   Notes:
2221    This can also be set by the command line option: -mat_use_hash_table fact
2222 
2223 .keywords: matrix, hashtable, factor, HT
2224 
2225 .seealso: MatSetOption()
2226 @*/
2227 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2228 {
2229   PetscFunctionBegin;
2230   SETERRQ(1,"Function not yet written for SBAIJ format");
2231   /* PetscFunctionReturn(0); */
2232 }
2233 
2234 #undef __FUNC__
2235 #define __FUNC__ "MatGetRowMax_MPISBAIJ"
2236 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2237 {
2238   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2239   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2240   PetscReal    atmp;
2241   double       *work,*svalues,*rvalues;
2242   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2243   int          rank,size,*rowners_bs,dest,count,source;
2244   Scalar       *ba,*va;
2245   MPI_Status   stat;
2246 
2247   PetscFunctionBegin;
2248   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2249   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2250 
2251   ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRA(ierr);
2252   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRA(ierr);
2253 
2254   bs   = a->bs;
2255   mbs  = a->mbs;
2256   Mbs  = a->Mbs;
2257   ba   = b->a;
2258   bi   = b->i;
2259   bj   = b->j;
2260   /*
2261   PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2262   PetscSynchronizedFlush(PETSC_COMM_WORLD);
2263   */
2264 
2265   /* find ownerships */
2266   rowners_bs = a->rowners_bs;
2267   /*
2268   if (!rank){
2269     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2270   }
2271   */
2272 
2273   /* each proc creates an array to be distributed */
2274 ierr = PetscMalloc(bs*Mbs*sizeof(PetscReal),&(  work  ));CHKERRQ(ierr);
2275   ierr  = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2276 
2277   /* row_max for B */
2278   if (rank != size-1){
2279     for (i=0; i<mbs; i++) {
2280       ncols = bi[1] - bi[0]; bi++;
2281       brow  = bs*i;
2282       for (j=0; j<ncols; j++){
2283         bcol = bs*(*bj);
2284         for (kcol=0; kcol<bs; kcol++){
2285           col = bcol + kcol;                 /* local col index */
2286           col += rowners_bs[rank+1];      /* global col index */
2287           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2288           for (krow=0; krow<bs; krow++){
2289             atmp = PetscAbsScalar(*ba); ba++;
2290             row = brow + krow;    /* local row index */
2291             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2292             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2293             if (work[col] < atmp) work[col] = atmp;
2294           }
2295         }
2296         bj++;
2297       }
2298     }
2299     /*
2300       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2301       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2302       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2303       */
2304 
2305     /* send values to its owners */
2306     for (dest=rank+1; dest<size; dest++){
2307       svalues = work + rowners_bs[dest];
2308       count = rowners_bs[dest+1]-rowners_bs[dest];
2309       ierr = MPI_Send(svalues,count,MPI_DOUBLE,dest,rank,PETSC_COMM_WORLD);CHKERRQ(ierr);
2310       /*
2311       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2312       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2313       */
2314     }
2315   }
2316 
2317   /* receive values */
2318   if (rank){
2319     rvalues = work;
2320     count = rowners_bs[rank+1]-rowners_bs[rank];
2321     for (source=0; source<rank; source++){
2322       ierr = MPI_Recv(rvalues,count,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,PETSC_COMM_WORLD,&stat);CHKERRQ(ierr);
2323       /* process values */
2324       for (i=0; i<count; i++){
2325         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2326       }
2327       /*
2328       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2329       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2330       */
2331     }
2332   }
2333 
2334   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2335   ierr = PetscFree(work);CHKERRA(ierr);
2336   PetscFunctionReturn(0);
2337 }
2338