xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision b5758dff40dc4232f7fa9713b5481f7e0e0d5d24)
1 /*$Id: mpisbaij.c,v 1.43 2001/01/20 03:35:02 bsmith Exp bsmith $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int DisAssemble_MPISBAIJ(Mat);
10 extern int MatIncreaseOverlap_MPISBAIJ(Mat,int,IS *,int);
11 extern int MatGetSubMatrices_MPISBAIJ(Mat,int,IS *,IS *,MatReuse,Mat **);
12 extern int MatGetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *);
13 extern int MatSetValues_SeqSBAIJ(Mat,int,int *,int,int *,Scalar *,InsertMode);
14 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,int*,int,int*,Scalar*,InsertMode);
15 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
16 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,Scalar**);
17 extern int MatPrintHelp_SeqSBAIJ(Mat);
18 extern int MatZeroRows_SeqSBAIJ(Mat,IS,Scalar*);
19 extern int MatZeroRows_SeqBAIJ(Mat,IS,Scalar *);
20 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
21 
22 /*  UGLY, ugly, ugly
23    When MatScalar == Scalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
24    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
25    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
26    converts the entries into single precision and then calls ..._MatScalar() to put them
27    into the single precision data structures.
28 */
29 #if defined(PETSC_USE_MAT_SINGLE)
30 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
31 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
32 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
33 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
34 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
35 #else
36 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
37 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
38 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
39 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
40 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
41 #endif
42 
43 EXTERN_C_BEGIN
44 #undef __FUNC__
45 #define __FUNC__ "MatStoreValues_MPISBAIJ"
46 int MatStoreValues_MPISBAIJ(Mat mat)
47 {
48   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
49   int          ierr;
50 
51   PetscFunctionBegin;
52   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
53   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
54   PetscFunctionReturn(0);
55 }
56 EXTERN_C_END
57 
58 EXTERN_C_BEGIN
59 #undef __FUNC__
60 #define __FUNC__ "MatRetrieveValues_MPISBAIJ"
61 int MatRetrieveValues_MPISBAIJ(Mat mat)
62 {
63   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
64   int          ierr;
65 
66   PetscFunctionBegin;
67   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
68   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 EXTERN_C_END
72 
73 /*
74      Local utility routine that creates a mapping from the global column
75    number to the local number in the off-diagonal part of the local
76    storage of the matrix.  This is done in a non scable way since the
77    length of colmap equals the global matrix length.
78 */
79 #undef __FUNC__
80 #define __FUNC__ "CreateColmap_MPISBAIJ_Private"
81 static int CreateColmap_MPISBAIJ_Private(Mat mat)
82 {
83   PetscFunctionBegin;
84   SETERRQ(1,"Function not yet written for SBAIJ format");
85   /* PetscFunctionReturn(0); */
86 }
87 
88 #define CHUNKSIZE  10
89 
90 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
91 { \
92  \
93     brow = row/bs;  \
94     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
95     rmax = aimax[brow]; nrow = ailen[brow]; \
96       bcol = col/bs; \
97       ridx = row % bs; cidx = col % bs; \
98       low = 0; high = nrow; \
99       while (high-low > 3) { \
100         t = (low+high)/2; \
101         if (rp[t] > bcol) high = t; \
102         else              low  = t; \
103       } \
104       for (_i=low; _i<high; _i++) { \
105         if (rp[_i] > bcol) break; \
106         if (rp[_i] == bcol) { \
107           bap  = ap +  bs2*_i + bs*cidx + ridx; \
108           if (addv == ADD_VALUES) *bap += value;  \
109           else                    *bap  = value;  \
110           goto a_noinsert; \
111         } \
112       } \
113       if (a->nonew == 1) goto a_noinsert; \
114       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
115       if (nrow >= rmax) { \
116         /* there is no extra room in row, therefore enlarge */ \
117         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
118         MatScalar *new_a; \
119  \
120         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
121  \
122         /* malloc new storage space */ \
123         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
124         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
125         new_j = (int*)(new_a + bs2*new_nz); \
126         new_i = new_j + new_nz; \
127  \
128         /* copy over old data into new slots */ \
129         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
130         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
131         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
132         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
133         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
134         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
135         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(Scalar));CHKERRQ(ierr); \
136         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
137                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
138         /* free up old matrix storage */ \
139         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
140         if (!a->singlemalloc) { \
141           ierr = PetscFree(a->i);CHKERRQ(ierr); \
142           ierr = PetscFree(a->j);CHKERRQ(ierr);\
143         } \
144         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
145         a->singlemalloc = PETSC_TRUE; \
146  \
147         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
148         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
149         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
150         a->s_maxnz += bs2*CHUNKSIZE; \
151         a->reallocs++; \
152         a->s_nz++; \
153       } \
154       N = nrow++ - 1;  \
155       /* shift up all the later entries in this row */ \
156       for (ii=N; ii>=_i; ii--) { \
157         rp[ii+1] = rp[ii]; \
158         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
159       } \
160       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
161       rp[_i]                      = bcol;  \
162       ap[bs2*_i + bs*cidx + ridx] = value;  \
163       a_noinsert:; \
164     ailen[brow] = nrow; \
165 }
166 #ifndef MatSetValues_SeqBAIJ_B_Private
167 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
168 { \
169     brow = row/bs;  \
170     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
171     rmax = bimax[brow]; nrow = bilen[brow]; \
172       bcol = col/bs; \
173       ridx = row % bs; cidx = col % bs; \
174       low = 0; high = nrow; \
175       while (high-low > 3) { \
176         t = (low+high)/2; \
177         if (rp[t] > bcol) high = t; \
178         else              low  = t; \
179       } \
180       for (_i=low; _i<high; _i++) { \
181         if (rp[_i] > bcol) break; \
182         if (rp[_i] == bcol) { \
183           bap  = ap +  bs2*_i + bs*cidx + ridx; \
184           if (addv == ADD_VALUES) *bap += value;  \
185           else                    *bap  = value;  \
186           goto b_noinsert; \
187         } \
188       } \
189       if (b->nonew == 1) goto b_noinsert; \
190       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
191       if (nrow >= rmax) { \
192         /* there is no extra room in row, therefore enlarge */ \
193         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
194         MatScalar *new_a; \
195  \
196         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
197  \
198         /* malloc new storage space */ \
199         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
200         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
201         new_j = (int*)(new_a + bs2*new_nz); \
202         new_i = new_j + new_nz; \
203  \
204         /* copy over old data into new slots */ \
205         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
206         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
207         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
208         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
209         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
210         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
211         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
213                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
214         /* free up old matrix storage */ \
215         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
216         if (!b->singlemalloc) { \
217           ierr = PetscFree(b->i);CHKERRQ(ierr); \
218           ierr = PetscFree(b->j);CHKERRQ(ierr); \
219         } \
220         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
221         b->singlemalloc = PETSC_TRUE; \
222  \
223         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
224         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
225         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
226         b->maxnz += bs2*CHUNKSIZE; \
227         b->reallocs++; \
228         b->nz++; \
229       } \
230       N = nrow++ - 1;  \
231       /* shift up all the later entries in this row */ \
232       for (ii=N; ii>=_i; ii--) { \
233         rp[ii+1] = rp[ii]; \
234         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
235       } \
236       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
237       rp[_i]                      = bcol;  \
238       ap[bs2*_i + bs*cidx + ridx] = value;  \
239       b_noinsert:; \
240     bilen[brow] = nrow; \
241 }
242 #endif
243 
244 #if defined(PETSC_USE_MAT_SINGLE)
245 #undef __FUNC__
246 #define __FUNC__ "MatSetValues_MPISBAIJ"
247 int MatSetValues_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
248 {
249   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
250   int          ierr,i,N = m*n;
251   MatScalar    *vsingle;
252 
253   PetscFunctionBegin;
254   if (N > b->setvalueslen) {
255     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
256     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
257     b->setvalueslen  = N;
258   }
259   vsingle = b->setvaluescopy;
260 
261   for (i=0; i<N; i++) {
262     vsingle[i] = v[i];
263   }
264   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 
268 #undef __FUNC__
269 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ"
270 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
271 {
272   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
273   int         ierr,i,N = m*n*b->bs2;
274   MatScalar   *vsingle;
275 
276   PetscFunctionBegin;
277   if (N > b->setvalueslen) {
278     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
279     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
280     b->setvalueslen  = N;
281   }
282   vsingle = b->setvaluescopy;
283   for (i=0; i<N; i++) {
284     vsingle[i] = v[i];
285   }
286   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNC__
291 #define __FUNC__ "MatSetValues_MPISBAIJ_HT"
292 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
293 {
294   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
295   int         ierr,i,N = m*n;
296   MatScalar   *vsingle;
297 
298   PetscFunctionBegin;
299   SETERRQ(1,"Function not yet written for SBAIJ format");
300   /* PetscFunctionReturn(0); */
301 }
302 
303 #undef __FUNC__
304 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ_HT"
305 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,Scalar *v,InsertMode addv)
306 {
307   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
308   int         ierr,i,N = m*n*b->bs2;
309   MatScalar   *vsingle;
310 
311   PetscFunctionBegin;
312   SETERRQ(1,"Function not yet written for SBAIJ format");
313   /* PetscFunctionReturn(0); */
314 }
315 #endif
316 
317 /* Only add/insert a(i,j) with i<=j (blocks).
318    Any a(i,j) with i>j input by user is ingored.
319 */
320 #undef __FUNC__
321 #define __FUNC__ "MatSetValues_MPIBAIJ"
322 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
323 {
324   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
325   MatScalar    value;
326   PetscTruth   roworiented = baij->roworiented;
327   int          ierr,i,j,row,col;
328   int          rstart_orig=baij->rstart_bs;
329   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
330   int          cend_orig=baij->cend_bs,bs=baij->bs;
331 
332   /* Some Variables required in the macro */
333   Mat          A = baij->A;
334   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
335   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
336   MatScalar    *aa=a->a;
337 
338   Mat          B = baij->B;
339   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
340   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
341   MatScalar    *ba=b->a;
342 
343   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
344   int          low,high,t,ridx,cidx,bs2=a->bs2;
345   MatScalar    *ap,*bap;
346 
347   /* for stash */
348   int          n_loc, *in_loc=0;
349   MatScalar    *v_loc=0;
350 
351   PetscFunctionBegin;
352 
353   if(!baij->donotstash){
354     ierr = PetscMalloc(n*sizeof(int),&in_loc);CHKERRQ(ierr);
355     ierr = PetscMalloc(n*sizeof(MatScalar),&v_loc);CHKERRQ(ierr);
356   }
357 
358   for (i=0; i<m; i++) {
359     if (im[i] < 0) continue;
360 #if defined(PETSC_USE_BOPT_g)
361     if (im[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
362 #endif
363     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
364       row = im[i] - rstart_orig;              /* local row index */
365       for (j=0; j<n; j++) {
366         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
367         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
368           col = in[j] - cstart_orig;          /* local col index */
369           brow = row/bs; bcol = col/bs;
370           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
371           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
372           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
373           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
374         } else if (in[j] < 0) continue;
375 #if defined(PETSC_USE_BOPT_g)
376         else if (in[j] >= mat->N) {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Col too large");}
377 #endif
378         else {  /* off-diag entry (B) */
379           if (mat->was_assembled) {
380             if (!baij->colmap) {
381               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
382             }
383 #if defined (PETSC_USE_CTABLE)
384             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
385             col  = col - 1 + in[j]%bs;
386 #else
387             col = baij->colmap[in[j]/bs] - 1 + in[j]%bs;
388 #endif
389             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
390               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
391               col =  in[j];
392               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
393               B = baij->B;
394               b = (Mat_SeqBAIJ*)(B)->data;
395               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
396               ba=b->a;
397             }
398           } else col = in[j];
399           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
400           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
401           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
402         }
403       }
404     } else {  /* off processor entry */
405       if (!baij->donotstash) {
406         n_loc = 0;
407         for (j=0; j<n; j++){
408           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
409           in_loc[n_loc] = in[j];
410           if (roworiented) {
411             v_loc[n_loc] = v[i*n+j];
412           } else {
413             v_loc[n_loc] = v[j*m+i];
414           }
415           n_loc++;
416         }
417         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
418       }
419     }
420   }
421 
422   if(!baij->donotstash){
423     ierr = PetscFree(in_loc);CHKERRQ(ierr);
424     ierr = PetscFree(v_loc);CHKERRQ(ierr);
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 #undef __FUNC__
430 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ"
431 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
432 {
433   PetscFunctionBegin;
434   SETERRQ(1,"Function not yet written for SBAIJ format");
435   /* PetscFunctionReturn(0); */
436 }
437 
438 #define HASH_KEY 0.6180339887
439 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
440 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
441 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
442 #undef __FUNC__
443 #define __FUNC__ "MatSetValues_MPISBAIJ_HT_MatScalar"
444 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
445 {
446   PetscFunctionBegin;
447   SETERRQ(1,"Function not yet written for SBAIJ format");
448   /* PetscFunctionReturn(0); */
449 }
450 
451 #undef __FUNC__
452 #define __FUNC__ "MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
453 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
454 {
455   PetscFunctionBegin;
456   SETERRQ(1,"Function not yet written for SBAIJ format");
457   /* PetscFunctionReturn(0); */
458 }
459 
460 #undef __FUNC__
461 #define __FUNC__ "MatGetValues_MPISBAIJ"
462 int MatGetValues_MPISBAIJ(Mat mat,int m,int *idxm,int n,int *idxn,Scalar *v)
463 {
464   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
465   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
466   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
467 
468   PetscFunctionBegin;
469   for (i=0; i<m; i++) {
470     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
471     if (idxm[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
472     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
473       row = idxm[i] - bsrstart;
474       for (j=0; j<n; j++) {
475         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
476         if (idxn[j] >= mat->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
477         if (idxn[j] >= bscstart && idxn[j] < bscend){
478           col = idxn[j] - bscstart;
479           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
480         } else {
481           if (!baij->colmap) {
482             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
483           }
484 #if defined (PETSC_USE_CTABLE)
485           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
486           data --;
487 #else
488           data = baij->colmap[idxn[j]/bs]-1;
489 #endif
490           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
491           else {
492             col  = data + idxn[j]%bs;
493             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
494           }
495         }
496       }
497     } else {
498       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
499     }
500   }
501  PetscFunctionReturn(0);
502 }
503 
504 #undef __FUNC__
505 #define __FUNC__ "MatNorm_MPISBAIJ"
506 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
507 {
508   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
509   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
510   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
511   int        ierr;
512   PetscReal  sum[2],*lnorm2;
513 
514   PetscFunctionBegin;
515   if (baij->size == 1) {
516     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
517   } else {
518     if (type == NORM_FROBENIUS) {
519       ierr = PetscMalloc(2*sizeof(double),&lnorm2);CHKERRQ(ierr);
520       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
521       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
522       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
523       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
524       /*
525       ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
526       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
527       */
528       ierr = MPI_Allreduce(lnorm2,&sum,2,MPI_DOUBLE,MPI_SUM,mat->comm);CHKERRQ(ierr);
529       /*
530       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
531       PetscSynchronizedFlush(PETSC_COMM_WORLD); */
532 
533       *norm = sqrt(sum[0] + 2*sum[1]);
534       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
535     } else {
536       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
537     }
538   }
539   PetscFunctionReturn(0);
540 }
541 
542 /*
543   Creates the hash table, and sets the table
544   This table is created only once.
545   If new entried need to be added to the matrix
546   then the hash table has to be destroyed and
547   recreated.
548 */
549 #undef __FUNC__
550 #define __FUNC__ "MatCreateHashTable_MPISBAIJ_Private"
551 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
552 {
553   PetscFunctionBegin;
554   SETERRQ(1,"Function not yet written for SBAIJ format");
555   /* PetscFunctionReturn(0); */
556 }
557 
558 #undef __FUNC__
559 #define __FUNC__ "MatAssemblyBegin_MPISBAIJ"
560 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
561 {
562   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
563   int         ierr,nstash,reallocs;
564   InsertMode  addv;
565 
566   PetscFunctionBegin;
567   if (baij->donotstash) {
568     PetscFunctionReturn(0);
569   }
570 
571   /* make sure all processors are either in INSERTMODE or ADDMODE */
572   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
573   if (addv == (ADD_VALUES|INSERT_VALUES)) {
574     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
575   }
576   mat->insertmode = addv; /* in case this processor had no cache */
577 
578   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
579   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
580   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
581   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
582   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
583   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
584   PetscFunctionReturn(0);
585 }
586 
587 #undef __FUNC__
588 #define __FUNC__ "MatAssemblyEnd_MPISBAIJ"
589 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
590 {
591   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
592   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
593   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
594   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
595   int         *row,*col,other_disassembled;
596   PetscTruth  r1,r2,r3;
597   MatScalar   *val;
598   InsertMode  addv = mat->insertmode;
599   int         rank;
600 
601   PetscFunctionBegin;
602   /* remove 2 line below later */
603   ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr);
604 
605   if (!baij->donotstash) {
606     while (1) {
607       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
608       /*
609       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
610       PetscSynchronizedFlush(PETSC_COMM_WORLD);
611       */
612       if (!flg) break;
613 
614       for (i=0; i<n;) {
615         /* Now identify the consecutive vals belonging to the same row */
616         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
617         if (j < n) ncols = j-i;
618         else       ncols = n-i;
619         /* Now assemble all these values with a single function call */
620         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
621         i = j;
622       }
623     }
624     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
625     /* Now process the block-stash. Since the values are stashed column-oriented,
626        set the roworiented flag to column oriented, and after MatSetValues()
627        restore the original flags */
628     r1 = baij->roworiented;
629     r2 = a->roworiented;
630     r3 = b->roworiented;
631     baij->roworiented = PETSC_FALSE;
632     a->roworiented    = PETSC_FALSE;
633     b->roworiented    = PETSC_FALSE;
634     while (1) {
635       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
636       if (!flg) break;
637 
638       for (i=0; i<n;) {
639         /* Now identify the consecutive vals belonging to the same row */
640         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
641         if (j < n) ncols = j-i;
642         else       ncols = n-i;
643         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
644         i = j;
645       }
646     }
647     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
648     baij->roworiented = r1;
649     a->roworiented    = r2;
650     b->roworiented    = r3;
651   }
652 
653   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
654   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
655 
656   /* determine if any processor has disassembled, if so we must
657      also disassemble ourselfs, in order that we may reassemble. */
658   /*
659      if nonzero structure of submatrix B cannot change then we know that
660      no processor disassembled thus we can skip this stuff
661   */
662   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
663     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
664     if (mat->was_assembled && !other_disassembled) {
665       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
666     }
667   }
668 
669   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
670     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr);
671   }
672   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
673   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
674 
675 #if defined(PETSC_USE_BOPT_g)
676   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
677     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((double)baij->ht_total_ct)/baij->ht_insert_ct);
678     baij->ht_total_ct  = 0;
679     baij->ht_insert_ct = 0;
680   }
681 #endif
682   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
683     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
684     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
685     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
686   }
687 
688   if (baij->rowvalues) {
689     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
690     baij->rowvalues = 0;
691   }
692   PetscFunctionReturn(0);
693 }
694 
695 #undef __FUNC__
696 #define __FUNC__ "MatView_MPISBAIJ_ASCIIorDraworSocket"
697 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
698 {
699   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
700   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
701   PetscTruth        isascii,isdraw;
702   PetscViewer       sviewer;
703   PetscViewerFormat format;
704 
705   PetscFunctionBegin;
706   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
707   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
708   if (isascii) {
709     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
710     if (format == PETSC_VIEWER_ASCII_INFO_LONG) {
711       MatInfo info;
712       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
713       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
714       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
715               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
716               baij->bs,(int)info.memory);CHKERRQ(ierr);
717       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
718       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
719       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
720       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
721       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
722       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
723       PetscFunctionReturn(0);
724     } else if (format == PETSC_VIEWER_ASCII_INFO) {
725       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
726       PetscFunctionReturn(0);
727     }
728   }
729 
730   if (isdraw) {
731     PetscDraw       draw;
732     PetscTruth isnull;
733     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
734     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
735   }
736 
737   if (size == 1) {
738     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
739   } else {
740     /* assemble the entire matrix onto first processor. */
741     Mat         A;
742     Mat_SeqSBAIJ *Aloc;
743     Mat_SeqBAIJ *Bloc;
744     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
745     MatScalar   *a;
746 
747     if (!rank) {
748       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
749     } else {
750       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
751     }
752     PetscLogObjectParent(mat,A);
753 
754     /* copy over the A part */
755     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
756     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
757     ierr  = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
758 
759     for (i=0; i<mbs; i++) {
760       rvals[0] = bs*(baij->rstart + i);
761       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
762       for (j=ai[i]; j<ai[i+1]; j++) {
763         col = (baij->cstart+aj[j])*bs;
764         for (k=0; k<bs; k++) {
765           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
766           col++; a += bs;
767         }
768       }
769     }
770     /* copy over the B part */
771     Bloc = (Mat_SeqBAIJ*)baij->B->data;
772     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
773     for (i=0; i<mbs; i++) {
774       rvals[0] = bs*(baij->rstart + i);
775       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
776       for (j=ai[i]; j<ai[i+1]; j++) {
777         col = baij->garray[aj[j]]*bs;
778         for (k=0; k<bs; k++) {
779           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
780           col++; a += bs;
781         }
782       }
783     }
784     ierr = PetscFree(rvals);CHKERRQ(ierr);
785     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
786     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
787     /*
788        Everyone has to call to draw the matrix since the graphics waits are
789        synchronized across all processors that share the PetscDraw object
790     */
791     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
792     if (!rank) {
793       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
794     }
795     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
796     ierr = MatDestroy(A);CHKERRQ(ierr);
797   }
798   PetscFunctionReturn(0);
799 }
800 
801 #undef __FUNC__
802 #define __FUNC__ "MatView_MPISBAIJ"
803 int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
804 {
805   int        ierr;
806   PetscTruth isascii,isdraw,issocket,isbinary;
807 
808   PetscFunctionBegin;
809   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
810   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
811   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
812   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
813   if (isascii || isdraw || issocket || isbinary) {
814     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
815   } else {
816     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
817   }
818   PetscFunctionReturn(0);
819 }
820 
821 #undef __FUNC__
822 #define __FUNC__ "MatDestroy_MPISBAIJ"
823 int MatDestroy_MPISBAIJ(Mat mat)
824 {
825   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
826   int         ierr;
827 
828   PetscFunctionBegin;
829 #if defined(PETSC_USE_LOG)
830   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
831 #endif
832   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
833   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
834   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
835   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
836   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
837 #if defined (PETSC_USE_CTABLE)
838   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
839 #else
840   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
841 #endif
842   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
843   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
844   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
845   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
846   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
847   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
848 #if defined(PETSC_USE_MAT_SINGLE)
849   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
850 #endif
851   ierr = PetscFree(baij);CHKERRQ(ierr);
852   PetscFunctionReturn(0);
853 }
854 
855 #undef __FUNC__
856 #define __FUNC__ "MatMult_MPISBAIJ"
857 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
858 {
859   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
860   int         ierr,nt;
861 
862   PetscFunctionBegin;
863   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
864   if (nt != A->n) {
865     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
866   }
867   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
868   if (nt != A->m) {
869     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
870   }
871 
872   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
873   /* do diagonal part */
874   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
875   /* do supperdiagonal part */
876   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
877   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
878   /* do subdiagonal part */
879   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
880   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
881   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
882 
883   PetscFunctionReturn(0);
884 }
885 
886 #undef __FUNC__
887 #define __FUNC__ "MatMultAdd_MPISBAIJ"
888 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
889 {
890   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
891   int        ierr;
892 
893   PetscFunctionBegin;
894   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
895   /* do diagonal part */
896   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
897   /* do supperdiagonal part */
898   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
899   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
900 
901   /* do subdiagonal part */
902   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
903   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
904   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
905 
906   PetscFunctionReturn(0);
907 }
908 
909 #undef __FUNC__
910 #define __FUNC__ "MatMultTranspose_MPISBAIJ"
911 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
912 {
913   PetscFunctionBegin;
914   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
915   /* PetscFunctionReturn(0); */
916 }
917 
918 #undef __FUNC__
919 #define __FUNC__ "MatMultTransposeAdd_MPISBAIJ"
920 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
921 {
922   PetscFunctionBegin;
923   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
924   /* PetscFunctionReturn(0); */
925 }
926 
927 /*
928   This only works correctly for square matrices where the subblock A->A is the
929    diagonal block
930 */
931 #undef __FUNC__
932 #define __FUNC__ "MatGetDiagonal_MPISBAIJ"
933 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
934 {
935   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
936   int         ierr;
937 
938   PetscFunctionBegin;
939   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
940   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
941   PetscFunctionReturn(0);
942 }
943 
944 #undef __FUNC__
945 #define __FUNC__ "MatScale_MPISBAIJ"
946 int MatScale_MPISBAIJ(Scalar *aa,Mat A)
947 {
948   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
949   int         ierr;
950 
951   PetscFunctionBegin;
952   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
953   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
954   PetscFunctionReturn(0);
955 }
956 
957 #undef __FUNC__
958 #define __FUNC__ "MatGetOwnershipRange_MPISBAIJ"
959 int MatGetOwnershipRange_MPISBAIJ(Mat matin,int *m,int *n)
960 {
961   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
962 
963   PetscFunctionBegin;
964   if (m) *m = mat->rstart*mat->bs;
965   if (n) *n = mat->rend*mat->bs;
966   PetscFunctionReturn(0);
967 }
968 
969 #undef __FUNC__
970 #define __FUNC__ "MatGetRow_MPISBAIJ"
971 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,Scalar **v)
972 {
973   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
974   Scalar     *vworkA,*vworkB,**pvA,**pvB,*v_p;
975   int        bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
976   int        nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
977   int        *cmap,*idx_p,cstart = mat->cstart;
978 
979   PetscFunctionBegin;
980   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
981   mat->getrowactive = PETSC_TRUE;
982 
983   if (!mat->rowvalues && (idx || v)) {
984     /*
985         allocate enough space to hold information from the longest row.
986     */
987     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
988     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
989     int     max = 1,mbs = mat->mbs,tmp;
990     for (i=0; i<mbs; i++) {
991       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
992       if (max < tmp) { max = tmp; }
993     }
994     ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(Scalar)),&mat->rowvalues);CHKERRQ(ierr);
995     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
996   }
997 
998   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
999   lrow = row - brstart;  /* local row index */
1000 
1001   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1002   if (!v)   {pvA = 0; pvB = 0;}
1003   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1004   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1005   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1006   nztot = nzA + nzB;
1007 
1008   cmap  = mat->garray;
1009   if (v  || idx) {
1010     if (nztot) {
1011       /* Sort by increasing column numbers, assuming A and B already sorted */
1012       int imark = -1;
1013       if (v) {
1014         *v = v_p = mat->rowvalues;
1015         for (i=0; i<nzB; i++) {
1016           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1017           else break;
1018         }
1019         imark = i;
1020         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1021         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1022       }
1023       if (idx) {
1024         *idx = idx_p = mat->rowindices;
1025         if (imark > -1) {
1026           for (i=0; i<imark; i++) {
1027             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1028           }
1029         } else {
1030           for (i=0; i<nzB; i++) {
1031             if (cmap[cworkB[i]/bs] < cstart)
1032               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1033             else break;
1034           }
1035           imark = i;
1036         }
1037         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1038         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1039       }
1040     } else {
1041       if (idx) *idx = 0;
1042       if (v)   *v   = 0;
1043     }
1044   }
1045   *nz = nztot;
1046   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1047   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1048   PetscFunctionReturn(0);
1049 }
1050 
1051 #undef __FUNC__
1052 #define __FUNC__ "MatRestoreRow_MPISBAIJ"
1053 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,Scalar **v)
1054 {
1055   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1056 
1057   PetscFunctionBegin;
1058   if (baij->getrowactive == PETSC_FALSE) {
1059     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1060   }
1061   baij->getrowactive = PETSC_FALSE;
1062   PetscFunctionReturn(0);
1063 }
1064 
1065 #undef __FUNC__
1066 #define __FUNC__ "MatGetBlockSize_MPISBAIJ"
1067 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1068 {
1069   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1070 
1071   PetscFunctionBegin;
1072   *bs = baij->bs;
1073   PetscFunctionReturn(0);
1074 }
1075 
1076 #undef __FUNC__
1077 #define __FUNC__ "MatZeroEntries_MPISBAIJ"
1078 int MatZeroEntries_MPISBAIJ(Mat A)
1079 {
1080   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1081   int         ierr;
1082 
1083   PetscFunctionBegin;
1084   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1085   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1086   PetscFunctionReturn(0);
1087 }
1088 
1089 #undef __FUNC__
1090 #define __FUNC__ "MatGetInfo_MPISBAIJ"
1091 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1092 {
1093   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1094   Mat         A = a->A,B = a->B;
1095   int         ierr;
1096   PetscReal   isend[5],irecv[5];
1097 
1098   PetscFunctionBegin;
1099   info->block_size     = (double)a->bs;
1100   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1101   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1102   isend[3] = info->memory;  isend[4] = info->mallocs;
1103   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1104   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1105   isend[3] += info->memory;  isend[4] += info->mallocs;
1106   if (flag == MAT_LOCAL) {
1107     info->nz_used      = isend[0];
1108     info->nz_allocated = isend[1];
1109     info->nz_unneeded  = isend[2];
1110     info->memory       = isend[3];
1111     info->mallocs      = isend[4];
1112   } else if (flag == MAT_GLOBAL_MAX) {
1113     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_MAX,matin->comm);CHKERRQ(ierr);
1114     info->nz_used      = irecv[0];
1115     info->nz_allocated = irecv[1];
1116     info->nz_unneeded  = irecv[2];
1117     info->memory       = irecv[3];
1118     info->mallocs      = irecv[4];
1119   } else if (flag == MAT_GLOBAL_SUM) {
1120     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_SUM,matin->comm);CHKERRQ(ierr);
1121     info->nz_used      = irecv[0];
1122     info->nz_allocated = irecv[1];
1123     info->nz_unneeded  = irecv[2];
1124     info->memory       = irecv[3];
1125     info->mallocs      = irecv[4];
1126   } else {
1127     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1128   }
1129   info->rows_global       = (double)A->M;
1130   info->columns_global    = (double)A->N;
1131   info->rows_local        = (double)A->m;
1132   info->columns_local     = (double)A->N;
1133   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1134   info->fill_ratio_needed = 0;
1135   info->factor_mallocs    = 0;
1136   PetscFunctionReturn(0);
1137 }
1138 
1139 #undef __FUNC__
1140 #define __FUNC__ "MatSetOption_MPISBAIJ"
1141 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1142 {
1143   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1144   int         ierr;
1145 
1146   PetscFunctionBegin;
1147   if (op == MAT_NO_NEW_NONZERO_LOCATIONS ||
1148       op == MAT_YES_NEW_NONZERO_LOCATIONS ||
1149       op == MAT_COLUMNS_UNSORTED ||
1150       op == MAT_COLUMNS_SORTED ||
1151       op == MAT_NEW_NONZERO_ALLOCATION_ERR ||
1152       op == MAT_KEEP_ZEROED_ROWS ||
1153       op == MAT_NEW_NONZERO_LOCATION_ERR) {
1154         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1155         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1156   } else if (op == MAT_ROW_ORIENTED) {
1157         a->roworiented = PETSC_TRUE;
1158         ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1159         ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1160   } else if (op == MAT_ROWS_SORTED ||
1161              op == MAT_ROWS_UNSORTED ||
1162              op == MAT_SYMMETRIC ||
1163              op == MAT_STRUCTURALLY_SYMMETRIC ||
1164              op == MAT_YES_NEW_DIAGONALS ||
1165              op == MAT_USE_HASH_TABLE) {
1166     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1167   } else if (op == MAT_COLUMN_ORIENTED) {
1168     a->roworiented = PETSC_FALSE;
1169     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1170     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1171   } else if (op == MAT_IGNORE_OFF_PROC_ENTRIES) {
1172     a->donotstash = PETSC_TRUE;
1173   } else if (op == MAT_NO_NEW_DIAGONALS) {
1174     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1175   } else if (op == MAT_USE_HASH_TABLE) {
1176     a->ht_flag = PETSC_TRUE;
1177   } else {
1178     SETERRQ(PETSC_ERR_SUP,"unknown option");
1179   }
1180   PetscFunctionReturn(0);
1181 }
1182 
1183 #undef __FUNC__
1184 #define __FUNC__ "MatTranspose_MPISBAIJ("
1185 int MatTranspose_MPISBAIJ(Mat A,Mat *matout)
1186 {
1187   PetscFunctionBegin;
1188   SETERRQ(1,"Matrix is symmetric. MatTranspose() should not be called");
1189   /* PetscFunctionReturn(0); */
1190 }
1191 
1192 #undef __FUNC__
1193 #define __FUNC__ "MatDiagonalScale_MPISBAIJ"
1194 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1195 {
1196   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1197   Mat         a = baij->A,b = baij->B;
1198   int         ierr,s1,s2,s3;
1199 
1200   PetscFunctionBegin;
1201   if (ll != rr) {
1202     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1203   }
1204   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1205   if (rr) {
1206     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1207     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1208     /* Overlap communication with computation. */
1209     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1210     /*} if (ll) { */
1211     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1212     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1213     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1214     /* } */
1215   /* scale  the diagonal block */
1216   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1217 
1218   /* if (rr) { */
1219     /* Do a scatter end and then right scale the off-diagonal block */
1220     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1221     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1222   }
1223 
1224   PetscFunctionReturn(0);
1225 }
1226 
1227 #undef __FUNC__
1228 #define __FUNC__ "MatZeroRows_MPISBAIJ"
1229 int MatZeroRows_MPISBAIJ(Mat A,IS is,Scalar *diag)
1230 {
1231   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;
1232   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1233   int            *procs,*nprocs,j,idx,nsends,*work,row;
1234   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1235   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1236   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1237   MPI_Comm       comm = A->comm;
1238   MPI_Request    *send_waits,*recv_waits;
1239   MPI_Status     recv_status,*send_status;
1240   IS             istmp;
1241   PetscTruth     found;
1242 
1243   PetscFunctionBegin;
1244   ierr = ISGetSize(is,&N);CHKERRQ(ierr);
1245   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1246 
1247   /*  first count number of contributors to each processor */
1248   ierr  = PetscMalloc(2*size*sizeof(int),&nprocs);CHKERRQ(ierr);
1249   ierr  = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1250   procs = nprocs + size;
1251   ierr  = PetscMalloc((N+1)*sizeof(int),&owner);CHKERRQ(ierr); /* see note*/
1252   for (i=0; i<N; i++) {
1253     idx   = rows[i];
1254     found = PETSC_FALSE;
1255     for (j=0; j<size; j++) {
1256       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1257         nprocs[j]++; procs[j] = 1; owner[i] = j; found = PETSC_TRUE; break;
1258       }
1259     }
1260     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1261   }
1262   nsends = 0;  for (i=0; i<size; i++) { nsends += procs[i];}
1263 
1264   /* inform other processors of number of messages and max length*/
1265   ierr   = PetscMalloc(2*size*sizeof(int),&work);CHKERRQ(ierr);
1266   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
1267   nmax   = work[rank];
1268   nrecvs = work[size+rank];
1269   ierr   = PetscFree(work);CHKERRQ(ierr);
1270 
1271   /* post receives:   */
1272   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&rvalues);CHKERRQ(ierr);
1273   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
1274   for (i=0; i<nrecvs; i++) {
1275     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1276   }
1277 
1278   /* do sends:
1279      1) starts[i] gives the starting index in svalues for stuff going to
1280      the ith processor
1281   */
1282   ierr = PetscMalloc((N+1)*sizeof(int),&svalues);CHKERRQ(ierr);
1283   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
1284   ierr = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
1285   starts[0]  = 0;
1286   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1287   for (i=0; i<N; i++) {
1288     svalues[starts[owner[i]]++] = rows[i];
1289   }
1290   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1291 
1292   starts[0] = 0;
1293   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1294   count = 0;
1295   for (i=0; i<size; i++) {
1296     if (procs[i]) {
1297       ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1298     }
1299   }
1300   ierr = PetscFree(starts);CHKERRQ(ierr);
1301 
1302   base = owners[rank]*bs;
1303 
1304   /*  wait on receives */
1305   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(int),&lens);CHKERRQ(ierr);
1306   source = lens + nrecvs;
1307   count  = nrecvs; slen = 0;
1308   while (count) {
1309     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1310     /* unpack receives into our local space */
1311     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1312     source[imdex]  = recv_status.MPI_SOURCE;
1313     lens[imdex]    = n;
1314     slen          += n;
1315     count--;
1316   }
1317   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1318 
1319   /* move the data into the send scatter */
1320   ierr = PetscMalloc((slen+1)*sizeof(int),&lrows);CHKERRQ(ierr);
1321   count = 0;
1322   for (i=0; i<nrecvs; i++) {
1323     values = rvalues + i*nmax;
1324     for (j=0; j<lens[i]; j++) {
1325       lrows[count++] = values[j] - base;
1326     }
1327   }
1328   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1329   ierr = PetscFree(lens);CHKERRQ(ierr);
1330   ierr = PetscFree(owner);CHKERRQ(ierr);
1331   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1332 
1333   /* actually zap the local rows */
1334   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1335   PetscLogObjectParent(A,istmp);
1336 
1337   /*
1338         Zero the required rows. If the "diagonal block" of the matrix
1339      is square and the user wishes to set the diagonal we use seperate
1340      code so that MatSetValues() is not called for each diagonal allocating
1341      new memory, thus calling lots of mallocs and slowing things down.
1342 
1343        Contributed by: Mathew Knepley
1344   */
1345   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1346   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1347   if (diag && (l->A->M == l->A->N)) {
1348     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1349   } else if (diag) {
1350     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1351     if (((Mat_SeqSBAIJ*)l->A->data)->nonew) {
1352       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1353 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1354     }
1355     for (i=0; i<slen; i++) {
1356       row  = lrows[i] + rstart_bs;
1357       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1358     }
1359     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1360     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1361   } else {
1362     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1363   }
1364 
1365   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1366   ierr = PetscFree(lrows);CHKERRQ(ierr);
1367 
1368   /* wait on sends */
1369   if (nsends) {
1370     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
1371     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1372     ierr        = PetscFree(send_status);CHKERRQ(ierr);
1373   }
1374   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1375   ierr = PetscFree(svalues);CHKERRQ(ierr);
1376 
1377   PetscFunctionReturn(0);
1378 }
1379 
1380 #undef __FUNC__
1381 #define __FUNC__ "MatPrintHelp_MPISBAIJ"
1382 int MatPrintHelp_MPISBAIJ(Mat A)
1383 {
1384   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1385   MPI_Comm    comm = A->comm;
1386   static int  called = 0;
1387   int         ierr;
1388 
1389   PetscFunctionBegin;
1390   if (!a->rank) {
1391     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1392   }
1393   if (called) {PetscFunctionReturn(0);} else called = 1;
1394   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1395   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1396   PetscFunctionReturn(0);
1397 }
1398 
1399 #undef __FUNC__
1400 #define __FUNC__ "MatSetUnfactored_MPISBAIJ"
1401 int MatSetUnfactored_MPISBAIJ(Mat A)
1402 {
1403   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1404   int         ierr;
1405 
1406   PetscFunctionBegin;
1407   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1408   PetscFunctionReturn(0);
1409 }
1410 
1411 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1412 
1413 #undef __FUNC__
1414 #define __FUNC__ "MatEqual_MPISBAIJ"
1415 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1416 {
1417   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1418   Mat         a,b,c,d;
1419   PetscTruth  flg;
1420   int         ierr;
1421 
1422   PetscFunctionBegin;
1423   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg);CHKERRQ(ierr);
1424   if (!flg) SETERRQ(PETSC_ERR_ARG_INCOMP,"Matrices must be same type");
1425   a = matA->A; b = matA->B;
1426   c = matB->A; d = matB->B;
1427 
1428   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1429   if (flg == PETSC_TRUE) {
1430     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1431   }
1432   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1433   PetscFunctionReturn(0);
1434 }
1435 
1436 #undef __FUNC__
1437 #define __FUNC__ "MatSetUpPreallocation_MPISBAIJ"
1438 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1439 {
1440   int        ierr;
1441 
1442   PetscFunctionBegin;
1443   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1444   PetscFunctionReturn(0);
1445 }
1446 /* -------------------------------------------------------------------*/
1447 static struct _MatOps MatOps_Values = {
1448   MatSetValues_MPISBAIJ,
1449   MatGetRow_MPISBAIJ,
1450   MatRestoreRow_MPISBAIJ,
1451   MatMult_MPISBAIJ,
1452   MatMultAdd_MPISBAIJ,
1453   MatMultTranspose_MPISBAIJ,
1454   MatMultTransposeAdd_MPISBAIJ,
1455   0,
1456   0,
1457   0,
1458   0,
1459   0,
1460   0,
1461   0,
1462   MatTranspose_MPISBAIJ,
1463   MatGetInfo_MPISBAIJ,
1464   MatEqual_MPISBAIJ,
1465   MatGetDiagonal_MPISBAIJ,
1466   MatDiagonalScale_MPISBAIJ,
1467   MatNorm_MPISBAIJ,
1468   MatAssemblyBegin_MPISBAIJ,
1469   MatAssemblyEnd_MPISBAIJ,
1470   0,
1471   MatSetOption_MPISBAIJ,
1472   MatZeroEntries_MPISBAIJ,
1473   MatZeroRows_MPISBAIJ,
1474   0,
1475   0,
1476   0,
1477   0,
1478   MatSetUpPreallocation_MPISBAIJ,
1479   0,
1480   MatGetOwnershipRange_MPISBAIJ,
1481   0,
1482   0,
1483   0,
1484   0,
1485   MatDuplicate_MPISBAIJ,
1486   0,
1487   0,
1488   0,
1489   0,
1490   0,
1491   MatGetSubMatrices_MPISBAIJ,
1492   MatIncreaseOverlap_MPISBAIJ,
1493   MatGetValues_MPISBAIJ,
1494   0,
1495   MatPrintHelp_MPISBAIJ,
1496   MatScale_MPISBAIJ,
1497   0,
1498   0,
1499   0,
1500   MatGetBlockSize_MPISBAIJ,
1501   0,
1502   0,
1503   0,
1504   0,
1505   0,
1506   0,
1507   MatSetUnfactored_MPISBAIJ,
1508   0,
1509   MatSetValuesBlocked_MPISBAIJ,
1510   0,
1511   0,
1512   0,
1513   MatGetMaps_Petsc,
1514   0,
1515   0,
1516   0,
1517   0,
1518   0,
1519   0,
1520   MatGetRowMax_MPISBAIJ};
1521 
1522 
1523 EXTERN_C_BEGIN
1524 #undef __FUNC__
1525 #define __FUNC__ "MatGetDiagonalBlock_MPISBAIJ"
1526 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1527 {
1528   PetscFunctionBegin;
1529   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1530   *iscopy = PETSC_FALSE;
1531   PetscFunctionReturn(0);
1532 }
1533 EXTERN_C_END
1534 
1535 EXTERN_C_BEGIN
1536 #undef __FUNC__
1537 #define __FUNC__ "MatCreate_MPISBAIJ"
1538 int MatCreate_MPISBAIJ(Mat B)
1539 {
1540   Mat_MPISBAIJ *b;
1541   int          ierr;
1542   PetscTruth   flg;
1543 
1544   PetscFunctionBegin;
1545 
1546   ierr    = PetscNew(Mat_MPISBAIJ,&b);CHKERRQ(ierr);
1547   B->data = (void*)b;
1548   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1549   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1550 
1551   B->ops->destroy    = MatDestroy_MPISBAIJ;
1552   B->ops->view       = MatView_MPISBAIJ;
1553   B->mapping    = 0;
1554   B->factor     = 0;
1555   B->assembled  = PETSC_FALSE;
1556 
1557   B->insertmode = NOT_SET_VALUES;
1558   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1559   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1560 
1561   /* build local table of row and column ownerships */
1562   ierr          = PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);CHKERRQ(ierr);
1563   b->cowners    = b->rowners + b->size + 2;
1564   b->rowners_bs = b->cowners + b->size + 2;
1565   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1566 
1567   /* build cache for off array entries formed */
1568   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1569   b->donotstash  = PETSC_FALSE;
1570   b->colmap      = PETSC_NULL;
1571   b->garray      = PETSC_NULL;
1572   b->roworiented = PETSC_TRUE;
1573 
1574 #if defined(PEYSC_USE_MAT_SINGLE)
1575   /* stuff for MatSetValues_XXX in single precision */
1576   b->lensetvalues     = 0;
1577   b->setvaluescopy    = PETSC_NULL;
1578 #endif
1579 
1580   /* stuff used in block assembly */
1581   b->barray       = 0;
1582 
1583   /* stuff used for matrix vector multiply */
1584   b->lvec         = 0;
1585   b->Mvctx        = 0;
1586 
1587   /* stuff for MatGetRow() */
1588   b->rowindices   = 0;
1589   b->rowvalues    = 0;
1590   b->getrowactive = PETSC_FALSE;
1591 
1592   /* hash table stuff */
1593   b->ht           = 0;
1594   b->hd           = 0;
1595   b->ht_size      = 0;
1596   b->ht_flag      = PETSC_FALSE;
1597   b->ht_fact      = 0;
1598   b->ht_total_ct  = 0;
1599   b->ht_insert_ct = 0;
1600 
1601   ierr = PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1602   if (flg) {
1603     double fact = 1.39;
1604     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1605     ierr = PetscOptionsGetDouble(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1606     if (fact <= 1.0) fact = 1.39;
1607     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1608     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1609   }
1610   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1611                                      "MatStoreValues_MPISBAIJ",
1612                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1613   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1614                                      "MatRetrieveValues_MPISBAIJ",
1615                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1616   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1617                                      "MatGetDiagonalBlock_MPISBAIJ",
1618                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1619   PetscFunctionReturn(0);
1620 }
1621 EXTERN_C_END
1622 
1623 #undef __FUNC__
1624 #define __FUNC__ "MatMPISBAIJSetPreallocation"
1625 /*@C
1626    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1627    the user should preallocate the matrix storage by setting the parameters
1628    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1629    performance can be increased by more than a factor of 50.
1630 
1631    Collective on Mat
1632 
1633    Input Parameters:
1634 +  A - the matrix
1635 .  bs   - size of blockk
1636 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1637            submatrix  (same for all local rows)
1638 .  d_nnz - array containing the number of block nonzeros in the various block rows
1639            of the in diagonal portion of the local (possibly different for each block
1640            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1641 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1642            submatrix (same for all local rows).
1643 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1644            off-diagonal portion of the local submatrix (possibly different for
1645            each block row) or PETSC_NULL.
1646 
1647 
1648    Options Database Keys:
1649 .   -mat_no_unroll - uses code that does not unroll the loops in the
1650                      block calculations (much slower)
1651 .   -mat_block_size - size of the blocks to use
1652 
1653    Notes:
1654 
1655    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1656    than it must be used on all processors that share the object for that argument.
1657 
1658    Storage Information:
1659    For a square global matrix we define each processor's diagonal portion
1660    to be its local rows and the corresponding columns (a square submatrix);
1661    each processor's off-diagonal portion encompasses the remainder of the
1662    local matrix (a rectangular submatrix).
1663 
1664    The user can specify preallocated storage for the diagonal part of
1665    the local submatrix with either d_nz or d_nnz (not both).  Set
1666    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1667    memory allocation.  Likewise, specify preallocated storage for the
1668    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1669 
1670    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1671    the figure below we depict these three local rows and all columns (0-11).
1672 
1673 .vb
1674            0 1 2 3 4 5 6 7 8 9 10 11
1675           -------------------
1676    row 3  |  o o o d d d o o o o o o
1677    row 4  |  o o o d d d o o o o o o
1678    row 5  |  o o o d d d o o o o o o
1679           -------------------
1680 .ve
1681 
1682    Thus, any entries in the d locations are stored in the d (diagonal)
1683    submatrix, and any entries in the o locations are stored in the
1684    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1685    stored simply in the MATSEQBAIJ format for compressed row storage.
1686 
1687    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1688    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1689    In general, for PDE problems in which most nonzeros are near the diagonal,
1690    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1691    or you will get TERRIBLE performance; see the users' manual chapter on
1692    matrices.
1693 
1694    Level: intermediate
1695 
1696 .keywords: matrix, block, aij, compressed row, sparse, parallel
1697 
1698 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1699 @*/
1700 
1701 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1702 {
1703   Mat_MPISBAIJ *b;
1704   int          ierr,i,mbs,Mbs=PETSC_DECIDE;
1705   PetscTruth   flg2;
1706 
1707   PetscFunctionBegin;
1708   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg2);CHKERRQ(ierr);
1709   if (!flg2) PetscFunctionReturn(0);
1710 
1711   ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1712 
1713   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1714   if (d_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than -2: value %d",d_nz);
1715   if (o_nz < -2) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than -2: value %d",o_nz);
1716   if (d_nnz) {
1717     for (i=0; i<B->m/bs; i++) {
1718       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1719     }
1720   }
1721   if (o_nnz) {
1722     for (i=0; i<B->m/bs; i++) {
1723       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1724     }
1725   }
1726   B->preallocated = PETSC_TRUE;
1727   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1728   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1729   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1730   ierr = MapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1731 
1732   b   = (Mat_MPISBAIJ*)B->data;
1733   mbs = B->m/bs;
1734   Mbs = B->M/bs;
1735   if (mbs*bs != B->m) {
1736     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1737   }
1738 
1739   b->bs  = bs;
1740   b->bs2 = bs*bs;
1741   b->mbs = mbs;
1742   b->nbs = mbs;
1743   b->Mbs = Mbs;
1744   b->Nbs = Mbs;
1745 
1746   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1747   b->rowners[0]    = 0;
1748   for (i=2; i<=b->size; i++) {
1749     b->rowners[i] += b->rowners[i-1];
1750   }
1751   b->rstart    = b->rowners[b->rank];
1752   b->rend      = b->rowners[b->rank+1];
1753   b->cstart    = b->rstart;
1754   b->cend      = b->rend;
1755   for (i=0; i<=b->size; i++) {
1756     b->rowners_bs[i] = b->rowners[i]*bs;
1757   }
1758   b->rstart_bs = b-> rstart*bs;
1759   b->rend_bs   = b->rend*bs;
1760 
1761   b->cstart_bs = b->cstart*bs;
1762   b->cend_bs   = b->cend*bs;
1763 
1764 
1765   if (d_nz == PETSC_DEFAULT) d_nz = 5;
1766   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1767   PetscLogObjectParent(B,b->A);
1768   if (o_nz == PETSC_DEFAULT) o_nz = 0;
1769   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1770   PetscLogObjectParent(B,b->B);
1771 
1772   /* build cache for off array entries formed */
1773   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1774 
1775   PetscFunctionReturn(0);
1776 }
1777 
1778 #undef __FUNC__
1779 #define __FUNC__ "MatCreateMPISBAIJ"
1780 /*@C
1781    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1782    (block compressed row).  For good matrix assembly performance
1783    the user should preallocate the matrix storage by setting the parameters
1784    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1785    performance can be increased by more than a factor of 50.
1786 
1787    Collective on MPI_Comm
1788 
1789    Input Parameters:
1790 +  comm - MPI communicator
1791 .  bs   - size of blockk
1792 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1793            This value should be the same as the local size used in creating the
1794            y vector for the matrix-vector product y = Ax.
1795 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1796            This value should be the same as the local size used in creating the
1797            x vector for the matrix-vector product y = Ax.
1798 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1799 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1800 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1801            submatrix  (same for all local rows)
1802 .  d_nnz - array containing the number of block nonzeros in the various block rows
1803            of the in diagonal portion of the local (possibly different for each block
1804            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1805 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1806            submatrix (same for all local rows).
1807 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1808            off-diagonal portion of the local submatrix (possibly different for
1809            each block row) or PETSC_NULL.
1810 
1811    Output Parameter:
1812 .  A - the matrix
1813 
1814    Options Database Keys:
1815 .   -mat_no_unroll - uses code that does not unroll the loops in the
1816                      block calculations (much slower)
1817 .   -mat_block_size - size of the blocks to use
1818 .   -mat_mpi - use the parallel matrix data structures even on one processor
1819                (defaults to using SeqBAIJ format on one processor)
1820 
1821    Notes:
1822    The user MUST specify either the local or global matrix dimensions
1823    (possibly both).
1824 
1825    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1826    than it must be used on all processors that share the object for that argument.
1827 
1828    Storage Information:
1829    For a square global matrix we define each processor's diagonal portion
1830    to be its local rows and the corresponding columns (a square submatrix);
1831    each processor's off-diagonal portion encompasses the remainder of the
1832    local matrix (a rectangular submatrix).
1833 
1834    The user can specify preallocated storage for the diagonal part of
1835    the local submatrix with either d_nz or d_nnz (not both).  Set
1836    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1837    memory allocation.  Likewise, specify preallocated storage for the
1838    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1839 
1840    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1841    the figure below we depict these three local rows and all columns (0-11).
1842 
1843 .vb
1844            0 1 2 3 4 5 6 7 8 9 10 11
1845           -------------------
1846    row 3  |  o o o d d d o o o o o o
1847    row 4  |  o o o d d d o o o o o o
1848    row 5  |  o o o d d d o o o o o o
1849           -------------------
1850 .ve
1851 
1852    Thus, any entries in the d locations are stored in the d (diagonal)
1853    submatrix, and any entries in the o locations are stored in the
1854    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1855    stored simply in the MATSEQBAIJ format for compressed row storage.
1856 
1857    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1858    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1859    In general, for PDE problems in which most nonzeros are near the diagonal,
1860    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1861    or you will get TERRIBLE performance; see the users' manual chapter on
1862    matrices.
1863 
1864    Level: intermediate
1865 
1866 .keywords: matrix, block, aij, compressed row, sparse, parallel
1867 
1868 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1869 @*/
1870 
1871 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,int *d_nnz,int o_nz,int *o_nnz,Mat *A)
1872 {
1873   int ierr,size;
1874 
1875   PetscFunctionBegin;
1876   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1877   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1878   if (size > 1) {
1879     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1880     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1881   } else {
1882     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1883     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1884   }
1885   PetscFunctionReturn(0);
1886 }
1887 
1888 
1889 #undef __FUNC__
1890 #define __FUNC__ "MatDuplicate_MPISBAIJ"
1891 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1892 {
1893   Mat          mat;
1894   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1895   int          ierr,len=0;
1896 
1897   PetscFunctionBegin;
1898   *newmat       = 0;
1899   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1900   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1901   mat->preallocated = PETSC_TRUE;
1902   a = (Mat_MPISBAIJ*)mat->data;
1903   a->bs  = oldmat->bs;
1904   a->bs2 = oldmat->bs2;
1905   a->mbs = oldmat->mbs;
1906   a->nbs = oldmat->nbs;
1907   a->Mbs = oldmat->Mbs;
1908   a->Nbs = oldmat->Nbs;
1909 
1910   a->rstart       = oldmat->rstart;
1911   a->rend         = oldmat->rend;
1912   a->cstart       = oldmat->cstart;
1913   a->cend         = oldmat->cend;
1914   a->size         = oldmat->size;
1915   a->rank         = oldmat->rank;
1916   a->donotstash   = oldmat->donotstash;
1917   a->roworiented  = oldmat->roworiented;
1918   a->rowindices   = 0;
1919   a->rowvalues    = 0;
1920   a->getrowactive = PETSC_FALSE;
1921   a->barray       = 0;
1922   a->rstart_bs    = oldmat->rstart_bs;
1923   a->rend_bs      = oldmat->rend_bs;
1924   a->cstart_bs    = oldmat->cstart_bs;
1925   a->cend_bs      = oldmat->cend_bs;
1926 
1927   /* hash table stuff */
1928   a->ht           = 0;
1929   a->hd           = 0;
1930   a->ht_size      = 0;
1931   a->ht_flag      = oldmat->ht_flag;
1932   a->ht_fact      = oldmat->ht_fact;
1933   a->ht_total_ct  = 0;
1934   a->ht_insert_ct = 0;
1935 
1936   ierr = PetscMalloc(3*(a->size+2)*sizeof(int),&a->rowners);CHKERRQ(ierr);
1937   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1938   a->cowners    = a->rowners + a->size + 2;
1939   a->rowners_bs = a->cowners + a->size + 2;
1940   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1941   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1942   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1943   if (oldmat->colmap) {
1944 #if defined (PETSC_USE_CTABLE)
1945     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1946 #else
1947     ierr = PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);CHKERRQ(ierr);
1948     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
1949     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
1950 #endif
1951   } else a->colmap = 0;
1952   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1953     ierr = PetscMalloc(len*sizeof(int),&a->garray);CHKERRQ(ierr);
1954     PetscLogObjectMemory(mat,len*sizeof(int));
1955     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
1956   } else a->garray = 0;
1957 
1958   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
1959   PetscLogObjectParent(mat,a->lvec);
1960   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
1961 
1962   PetscLogObjectParent(mat,a->Mvctx);
1963   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1964   PetscLogObjectParent(mat,a->A);
1965   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
1966   PetscLogObjectParent(mat,a->B);
1967   ierr = PetscFListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
1968   *newmat = mat;
1969   PetscFunctionReturn(0);
1970 }
1971 
1972 #include "petscsys.h"
1973 
1974 EXTERN_C_BEGIN
1975 #undef __FUNC__
1976 #define __FUNC__ "MatLoad_MPISBAIJ"
1977 int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
1978 {
1979   Mat          A;
1980   int          i,nz,ierr,j,rstart,rend,fd;
1981   Scalar       *vals,*buf;
1982   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1983   MPI_Status   status;
1984   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
1985   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
1986   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
1987   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1988   int          dcount,kmax,k,nzcount,tmp;
1989 
1990   PetscFunctionBegin;
1991   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1992 
1993   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1994   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1995   if (!rank) {
1996     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1997     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1998     if (header[0] != MAT_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1999     if (header[3] < 0) {
2000       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2001     }
2002   }
2003 
2004   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2005   M = header[1]; N = header[2];
2006 
2007   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2008 
2009   /*
2010      This code adds extra rows to make sure the number of rows is
2011      divisible by the blocksize
2012   */
2013   Mbs        = M/bs;
2014   extra_rows = bs - M + bs*(Mbs);
2015   if (extra_rows == bs) extra_rows = 0;
2016   else                  Mbs++;
2017   if (extra_rows &&!rank) {
2018     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2019   }
2020 
2021   /* determine ownership of all rows */
2022   mbs        = Mbs/size + ((Mbs % size) > rank);
2023   m          = mbs*bs;
2024   ierr       = PetscMalloc(2*(size+2)*sizeof(int),&rowners);CHKERRQ(ierr);
2025   browners   = rowners + size + 1;
2026   ierr       = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2027   rowners[0] = 0;
2028   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2029   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2030   rstart = rowners[rank];
2031   rend   = rowners[rank+1];
2032 
2033   /* distribute row lengths to all processors */
2034   ierr = PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);CHKERRQ(ierr);
2035   if (!rank) {
2036     ierr = PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);CHKERRQ(ierr);
2037     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2038     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2039     ierr = PetscMalloc(size*sizeof(int),&sndcounts);CHKERRQ(ierr);
2040     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2041     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2042     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2043   } else {
2044     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2045   }
2046 
2047   if (!rank) {   /* procs[0] */
2048     /* calculate the number of nonzeros on each processor */
2049     ierr = PetscMalloc(size*sizeof(int),&procsnz);CHKERRQ(ierr);
2050     ierr = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2051     for (i=0; i<size; i++) {
2052       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2053         procsnz[i] += rowlengths[j];
2054       }
2055     }
2056     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2057 
2058     /* determine max buffer needed and allocate it */
2059     maxnz = 0;
2060     for (i=0; i<size; i++) {
2061       maxnz = PetscMax(maxnz,procsnz[i]);
2062     }
2063     ierr = PetscMalloc(maxnz*sizeof(int),&cols);CHKERRQ(ierr);
2064 
2065     /* read in my part of the matrix column indices  */
2066     nz     = procsnz[0];
2067     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2068     mycols = ibuf;
2069     if (size == 1)  nz -= extra_rows;
2070     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2071     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2072 
2073     /* read in every ones (except the last) and ship off */
2074     for (i=1; i<size-1; i++) {
2075       nz   = procsnz[i];
2076       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2077       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2078     }
2079     /* read in the stuff for the last proc */
2080     if (size != 1) {
2081       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2082       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2083       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2084       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2085     }
2086     ierr = PetscFree(cols);CHKERRQ(ierr);
2087   } else {  /* procs[i], i>0 */
2088     /* determine buffer space needed for message */
2089     nz = 0;
2090     for (i=0; i<m; i++) {
2091       nz += locrowlens[i];
2092     }
2093     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2094     mycols = ibuf;
2095     /* receive message of column indices*/
2096     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2097     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2098     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2099   }
2100 
2101   /* loop over local rows, determining number of off diagonal entries */
2102   ierr     = PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);CHKERRQ(ierr);
2103   odlens   = dlens + (rend-rstart);
2104   ierr     = PetscMalloc(3*Mbs*sizeof(int),&mask);CHKERRQ(ierr);
2105   ierr     = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2106   masked1  = mask    + Mbs;
2107   masked2  = masked1 + Mbs;
2108   rowcount = 0; nzcount = 0;
2109   for (i=0; i<mbs; i++) {
2110     dcount  = 0;
2111     odcount = 0;
2112     for (j=0; j<bs; j++) {
2113       kmax = locrowlens[rowcount];
2114       for (k=0; k<kmax; k++) {
2115         tmp = mycols[nzcount++]/bs; /* block col. index */
2116         if (!mask[tmp]) {
2117           mask[tmp] = 1;
2118           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2119           else masked1[dcount++] = tmp; /* entry in diag portion */
2120         }
2121       }
2122       rowcount++;
2123     }
2124 
2125     dlens[i]  = dcount;  /* d_nzz[i] */
2126     odlens[i] = odcount; /* o_nzz[i] */
2127 
2128     /* zero out the mask elements we set */
2129     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2130     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2131   }
2132 
2133   /* create our matrix */
2134   ierr = MatCreateMPISBAIJ(comm,bs,m,m,PETSC_DETERMINE,PETSC_DETERMINE,0,dlens,0,odlens,newmat);
2135   CHKERRQ(ierr);
2136   A = *newmat;
2137   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2138 
2139   if (!rank) {
2140     ierr = PetscMalloc(maxnz*sizeof(Scalar),&buf);CHKERRQ(ierr);
2141     /* read in my part of the matrix numerical values  */
2142     nz = procsnz[0];
2143     vals = buf;
2144     mycols = ibuf;
2145     if (size == 1)  nz -= extra_rows;
2146     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2147     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2148 
2149     /* insert into matrix */
2150     jj      = rstart*bs;
2151     for (i=0; i<m; i++) {
2152       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2153       mycols += locrowlens[i];
2154       vals   += locrowlens[i];
2155       jj++;
2156     }
2157 
2158     /* read in other processors (except the last one) and ship out */
2159     for (i=1; i<size-1; i++) {
2160       nz   = procsnz[i];
2161       vals = buf;
2162       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2163       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2164     }
2165     /* the last proc */
2166     if (size != 1){
2167       nz   = procsnz[i] - extra_rows;
2168       vals = buf;
2169       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2170       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2171       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2172     }
2173     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2174 
2175   } else {
2176     /* receive numeric values */
2177     ierr = PetscMalloc(nz*sizeof(Scalar),&buf);CHKERRQ(ierr);
2178 
2179     /* receive message of values*/
2180     vals   = buf;
2181     mycols = ibuf;
2182     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2183     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2184     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2185 
2186     /* insert into matrix */
2187     jj      = rstart*bs;
2188     for (i=0; i<m; i++) {
2189       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2190       mycols += locrowlens[i];
2191       vals   += locrowlens[i];
2192       jj++;
2193     }
2194   }
2195 
2196   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2197   ierr = PetscFree(buf);CHKERRQ(ierr);
2198   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2199   ierr = PetscFree(rowners);CHKERRQ(ierr);
2200   ierr = PetscFree(dlens);CHKERRQ(ierr);
2201   ierr = PetscFree(mask);CHKERRQ(ierr);
2202   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2203   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2204   PetscFunctionReturn(0);
2205 }
2206 EXTERN_C_END
2207 
2208 #undef __FUNC__
2209 #define __FUNC__ "MatMPISBAIJSetHashTableFactor"
2210 /*@
2211    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2212 
2213    Input Parameters:
2214 .  mat  - the matrix
2215 .  fact - factor
2216 
2217    Collective on Mat
2218 
2219    Level: advanced
2220 
2221   Notes:
2222    This can also be set by the command line option: -mat_use_hash_table fact
2223 
2224 .keywords: matrix, hashtable, factor, HT
2225 
2226 .seealso: MatSetOption()
2227 @*/
2228 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2229 {
2230   PetscFunctionBegin;
2231   SETERRQ(1,"Function not yet written for SBAIJ format");
2232   /* PetscFunctionReturn(0); */
2233 }
2234 
2235 #undef __FUNC__
2236 #define __FUNC__ "MatGetRowMax_MPISBAIJ"
2237 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2238 {
2239   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2240   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2241   PetscReal    atmp;
2242   double       *work,*svalues,*rvalues;
2243   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2244   int          rank,size,*rowners_bs,dest,count,source;
2245   Scalar       *ba,*va;
2246   MPI_Status   stat;
2247 
2248   PetscFunctionBegin;
2249   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2250   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2251 
2252   ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
2253   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
2254 
2255   bs   = a->bs;
2256   mbs  = a->mbs;
2257   Mbs  = a->Mbs;
2258   ba   = b->a;
2259   bi   = b->i;
2260   bj   = b->j;
2261   /*
2262   PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2263   PetscSynchronizedFlush(PETSC_COMM_WORLD);
2264   */
2265 
2266   /* find ownerships */
2267   rowners_bs = a->rowners_bs;
2268   /*
2269   if (!rank){
2270     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2271   }
2272   */
2273 
2274   /* each proc creates an array to be distributed */
2275   ierr = PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);CHKERRQ(ierr);
2276   ierr = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2277 
2278   /* row_max for B */
2279   if (rank != size-1){
2280     for (i=0; i<mbs; i++) {
2281       ncols = bi[1] - bi[0]; bi++;
2282       brow  = bs*i;
2283       for (j=0; j<ncols; j++){
2284         bcol = bs*(*bj);
2285         for (kcol=0; kcol<bs; kcol++){
2286           col = bcol + kcol;                 /* local col index */
2287           col += rowners_bs[rank+1];      /* global col index */
2288           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2289           for (krow=0; krow<bs; krow++){
2290             atmp = PetscAbsScalar(*ba); ba++;
2291             row = brow + krow;    /* local row index */
2292             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2293             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2294             if (work[col] < atmp) work[col] = atmp;
2295           }
2296         }
2297         bj++;
2298       }
2299     }
2300     /*
2301       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2302       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2303       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2304       */
2305 
2306     /* send values to its owners */
2307     for (dest=rank+1; dest<size; dest++){
2308       svalues = work + rowners_bs[dest];
2309       count = rowners_bs[dest+1]-rowners_bs[dest];
2310       ierr = MPI_Send(svalues,count,MPI_DOUBLE,dest,rank,PETSC_COMM_WORLD);CHKERRQ(ierr);
2311       /*
2312       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2313       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2314       */
2315     }
2316   }
2317 
2318   /* receive values */
2319   if (rank){
2320     rvalues = work;
2321     count = rowners_bs[rank+1]-rowners_bs[rank];
2322     for (source=0; source<rank; source++){
2323       ierr = MPI_Recv(rvalues,count,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,PETSC_COMM_WORLD,&stat);CHKERRQ(ierr);
2324       /* process values */
2325       for (i=0; i<count; i++){
2326         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2327       }
2328       /*
2329       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2330       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2331       */
2332     }
2333   }
2334 
2335   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2336   ierr = PetscFree(work);CHKERRQ(ierr);
2337   PetscFunctionReturn(0);
2338 }
2339