xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision ea709b57ddb8cc304f11df2e9a39d6a8fdb28b53)
1 /*$Id: mpisbaij.c,v 1.58 2001/08/06 21:15:59 bsmith Exp balay $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int DisAssemble_MPISBAIJ(Mat);
10 extern int MatIncreaseOverlap_MPISBAIJ(Mat,int,IS *,int);
11 extern int MatGetSubMatrices_MPISBAIJ(Mat,int,IS *,IS *,MatReuse,Mat **);
12 extern int MatGetValues_SeqSBAIJ(Mat,int,int *,int,int *,PetscScalar *);
13 extern int MatSetValues_SeqSBAIJ(Mat,int,int *,int,int *,PetscScalar *,InsertMode);
14 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,int*,int,int*,PetscScalar*,InsertMode);
15 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
16 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
17 extern int MatPrintHelp_SeqSBAIJ(Mat);
18 extern int MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
19 extern int MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
20 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
21 
22 /*  UGLY, ugly, ugly
23    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
24    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
25    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
26    converts the entries into single precision and then calls ..._MatScalar() to put them
27    into the single precision data structures.
28 */
29 #if defined(PETSC_USE_MAT_SINGLE)
30 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
31 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
32 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
33 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
34 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
35 #else
36 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
37 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
38 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
39 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
40 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
41 #endif
42 
43 EXTERN_C_BEGIN
44 #undef __FUNCT__
45 #define __FUNCT__ "MatStoreValues_MPISBAIJ"
46 int MatStoreValues_MPISBAIJ(Mat mat)
47 {
48   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
49   int          ierr;
50 
51   PetscFunctionBegin;
52   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
53   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
54   PetscFunctionReturn(0);
55 }
56 EXTERN_C_END
57 
58 EXTERN_C_BEGIN
59 #undef __FUNCT__
60 #define __FUNCT__ "MatRetrieveValues_MPISBAIJ"
61 int MatRetrieveValues_MPISBAIJ(Mat mat)
62 {
63   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
64   int          ierr;
65 
66   PetscFunctionBegin;
67   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
68   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 EXTERN_C_END
72 
73 /*
74      Local utility routine that creates a mapping from the global column
75    number to the local number in the off-diagonal part of the local
76    storage of the matrix.  This is done in a non scable way since the
77    length of colmap equals the global matrix length.
78 */
79 #undef __FUNCT__
80 #define __FUNCT__ "CreateColmap_MPISBAIJ_Private"
81 static int CreateColmap_MPISBAIJ_Private(Mat mat)
82 {
83   PetscFunctionBegin;
84   SETERRQ(1,"Function not yet written for SBAIJ format");
85   /* PetscFunctionReturn(0); */
86 }
87 
88 #define CHUNKSIZE  10
89 
90 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
91 { \
92  \
93     brow = row/bs;  \
94     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
95     rmax = aimax[brow]; nrow = ailen[brow]; \
96       bcol = col/bs; \
97       ridx = row % bs; cidx = col % bs; \
98       low = 0; high = nrow; \
99       while (high-low > 3) { \
100         t = (low+high)/2; \
101         if (rp[t] > bcol) high = t; \
102         else              low  = t; \
103       } \
104       for (_i=low; _i<high; _i++) { \
105         if (rp[_i] > bcol) break; \
106         if (rp[_i] == bcol) { \
107           bap  = ap +  bs2*_i + bs*cidx + ridx; \
108           if (addv == ADD_VALUES) *bap += value;  \
109           else                    *bap  = value;  \
110           goto a_noinsert; \
111         } \
112       } \
113       if (a->nonew == 1) goto a_noinsert; \
114       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
115       if (nrow >= rmax) { \
116         /* there is no extra room in row, therefore enlarge */ \
117         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
118         MatScalar *new_a; \
119  \
120         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
121  \
122         /* malloc new storage space */ \
123         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
124         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
125         new_j = (int*)(new_a + bs2*new_nz); \
126         new_i = new_j + new_nz; \
127  \
128         /* copy over old data into new slots */ \
129         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
130         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
131         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
132         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
133         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
134         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
135         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar));CHKERRQ(ierr); \
136         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
137                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
138         /* free up old matrix storage */ \
139         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
140         if (!a->singlemalloc) { \
141           ierr = PetscFree(a->i);CHKERRQ(ierr); \
142           ierr = PetscFree(a->j);CHKERRQ(ierr);\
143         } \
144         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
145         a->singlemalloc = PETSC_TRUE; \
146  \
147         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
148         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
149         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
150         a->s_maxnz += bs2*CHUNKSIZE; \
151         a->reallocs++; \
152         a->s_nz++; \
153       } \
154       N = nrow++ - 1;  \
155       /* shift up all the later entries in this row */ \
156       for (ii=N; ii>=_i; ii--) { \
157         rp[ii+1] = rp[ii]; \
158         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
159       } \
160       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
161       rp[_i]                      = bcol;  \
162       ap[bs2*_i + bs*cidx + ridx] = value;  \
163       a_noinsert:; \
164     ailen[brow] = nrow; \
165 }
166 #ifndef MatSetValues_SeqBAIJ_B_Private
167 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
168 { \
169     brow = row/bs;  \
170     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
171     rmax = bimax[brow]; nrow = bilen[brow]; \
172       bcol = col/bs; \
173       ridx = row % bs; cidx = col % bs; \
174       low = 0; high = nrow; \
175       while (high-low > 3) { \
176         t = (low+high)/2; \
177         if (rp[t] > bcol) high = t; \
178         else              low  = t; \
179       } \
180       for (_i=low; _i<high; _i++) { \
181         if (rp[_i] > bcol) break; \
182         if (rp[_i] == bcol) { \
183           bap  = ap +  bs2*_i + bs*cidx + ridx; \
184           if (addv == ADD_VALUES) *bap += value;  \
185           else                    *bap  = value;  \
186           goto b_noinsert; \
187         } \
188       } \
189       if (b->nonew == 1) goto b_noinsert; \
190       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
191       if (nrow >= rmax) { \
192         /* there is no extra room in row, therefore enlarge */ \
193         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
194         MatScalar *new_a; \
195  \
196         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
197  \
198         /* malloc new storage space */ \
199         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
200         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
201         new_j = (int*)(new_a + bs2*new_nz); \
202         new_i = new_j + new_nz; \
203  \
204         /* copy over old data into new slots */ \
205         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
206         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
207         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
208         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
209         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
210         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
211         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
213                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
214         /* free up old matrix storage */ \
215         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
216         if (!b->singlemalloc) { \
217           ierr = PetscFree(b->i);CHKERRQ(ierr); \
218           ierr = PetscFree(b->j);CHKERRQ(ierr); \
219         } \
220         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
221         b->singlemalloc = PETSC_TRUE; \
222  \
223         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
224         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
225         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
226         b->maxnz += bs2*CHUNKSIZE; \
227         b->reallocs++; \
228         b->nz++; \
229       } \
230       N = nrow++ - 1;  \
231       /* shift up all the later entries in this row */ \
232       for (ii=N; ii>=_i; ii--) { \
233         rp[ii+1] = rp[ii]; \
234         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
235       } \
236       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
237       rp[_i]                      = bcol;  \
238       ap[bs2*_i + bs*cidx + ridx] = value;  \
239       b_noinsert:; \
240     bilen[brow] = nrow; \
241 }
242 #endif
243 
244 #if defined(PETSC_USE_MAT_SINGLE)
245 #undef __FUNCT__
246 #define __FUNCT__ "MatSetValues_MPISBAIJ"
247 int MatSetValues_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
248 {
249   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
250   int          ierr,i,N = m*n;
251   MatScalar    *vsingle;
252 
253   PetscFunctionBegin;
254   if (N > b->setvalueslen) {
255     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
256     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
257     b->setvalueslen  = N;
258   }
259   vsingle = b->setvaluescopy;
260 
261   for (i=0; i<N; i++) {
262     vsingle[i] = v[i];
263   }
264   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 
268 #undef __FUNCT__
269 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ"
270 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
271 {
272   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
273   int         ierr,i,N = m*n*b->bs2;
274   MatScalar   *vsingle;
275 
276   PetscFunctionBegin;
277   if (N > b->setvalueslen) {
278     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
279     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
280     b->setvalueslen  = N;
281   }
282   vsingle = b->setvaluescopy;
283   for (i=0; i<N; i++) {
284     vsingle[i] = v[i];
285   }
286   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNCT__
291 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT"
292 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
293 {
294   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
295   int         ierr,i,N = m*n;
296   MatScalar   *vsingle;
297 
298   PetscFunctionBegin;
299   SETERRQ(1,"Function not yet written for SBAIJ format");
300   /* PetscFunctionReturn(0); */
301 }
302 
303 #undef __FUNCT__
304 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT"
305 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
306 {
307   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
308   int         ierr,i,N = m*n*b->bs2;
309   MatScalar   *vsingle;
310 
311   PetscFunctionBegin;
312   SETERRQ(1,"Function not yet written for SBAIJ format");
313   /* PetscFunctionReturn(0); */
314 }
315 #endif
316 
317 /* Only add/insert a(i,j) with i<=j (blocks).
318    Any a(i,j) with i>j input by user is ingored.
319 */
320 #undef __FUNCT__
321 #define __FUNCT__ "MatSetValues_MPIBAIJ"
322 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
323 {
324   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
325   MatScalar    value;
326   PetscTruth   roworiented = baij->roworiented;
327   int          ierr,i,j,row,col;
328   int          rstart_orig=baij->rstart_bs;
329   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
330   int          cend_orig=baij->cend_bs,bs=baij->bs;
331 
332   /* Some Variables required in the macro */
333   Mat          A = baij->A;
334   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
335   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
336   MatScalar    *aa=a->a;
337 
338   Mat          B = baij->B;
339   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
340   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
341   MatScalar    *ba=b->a;
342 
343   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
344   int          low,high,t,ridx,cidx,bs2=a->bs2;
345   MatScalar    *ap,*bap;
346 
347   /* for stash */
348   int          n_loc, *in_loc=0;
349   MatScalar    *v_loc=0;
350 
351   PetscFunctionBegin;
352 
353   if(!baij->donotstash){
354     ierr = PetscMalloc(n*sizeof(int),&in_loc);CHKERRQ(ierr);
355     ierr = PetscMalloc(n*sizeof(MatScalar),&v_loc);CHKERRQ(ierr);
356   }
357 
358   for (i=0; i<m; i++) {
359     if (im[i] < 0) continue;
360 #if defined(PETSC_USE_BOPT_g)
361     if (im[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
362 #endif
363     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
364       row = im[i] - rstart_orig;              /* local row index */
365       for (j=0; j<n; j++) {
366         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
367         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
368           col = in[j] - cstart_orig;          /* local col index */
369           brow = row/bs; bcol = col/bs;
370           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
371           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
372           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
373           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
374         } else if (in[j] < 0) continue;
375 #if defined(PETSC_USE_BOPT_g)
376         else if (in[j] >= mat->N) {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Col too large");}
377 #endif
378         else {  /* off-diag entry (B) */
379           if (mat->was_assembled) {
380             if (!baij->colmap) {
381               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
382             }
383 #if defined (PETSC_USE_CTABLE)
384             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
385             col  = col - 1 + in[j]%bs;
386 #else
387             col = baij->colmap[in[j]/bs] - 1 + in[j]%bs;
388 #endif
389             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
390               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
391               col =  in[j];
392               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
393               B = baij->B;
394               b = (Mat_SeqBAIJ*)(B)->data;
395               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
396               ba=b->a;
397             }
398           } else col = in[j];
399           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
400           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
401           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
402         }
403       }
404     } else {  /* off processor entry */
405       if (!baij->donotstash) {
406         n_loc = 0;
407         for (j=0; j<n; j++){
408           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
409           in_loc[n_loc] = in[j];
410           if (roworiented) {
411             v_loc[n_loc] = v[i*n+j];
412           } else {
413             v_loc[n_loc] = v[j*m+i];
414           }
415           n_loc++;
416         }
417         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
418       }
419     }
420   }
421 
422   if(!baij->donotstash){
423     ierr = PetscFree(in_loc);CHKERRQ(ierr);
424     ierr = PetscFree(v_loc);CHKERRQ(ierr);
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 #undef __FUNCT__
430 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ"
431 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
432 {
433   PetscFunctionBegin;
434   SETERRQ(1,"Function not yet written for SBAIJ format");
435   /* PetscFunctionReturn(0); */
436 }
437 
438 #define HASH_KEY 0.6180339887
439 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
440 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
441 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
442 #undef __FUNCT__
443 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT_MatScalar"
444 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
445 {
446   PetscFunctionBegin;
447   SETERRQ(1,"Function not yet written for SBAIJ format");
448   /* PetscFunctionReturn(0); */
449 }
450 
451 #undef __FUNCT__
452 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
453 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
454 {
455   PetscFunctionBegin;
456   SETERRQ(1,"Function not yet written for SBAIJ format");
457   /* PetscFunctionReturn(0); */
458 }
459 
460 #undef __FUNCT__
461 #define __FUNCT__ "MatGetValues_MPISBAIJ"
462 int MatGetValues_MPISBAIJ(Mat mat,int m,int *idxm,int n,int *idxn,PetscScalar *v)
463 {
464   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
465   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
466   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
467 
468   PetscFunctionBegin;
469   for (i=0; i<m; i++) {
470     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
471     if (idxm[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
472     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
473       row = idxm[i] - bsrstart;
474       for (j=0; j<n; j++) {
475         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
476         if (idxn[j] >= mat->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
477         if (idxn[j] >= bscstart && idxn[j] < bscend){
478           col = idxn[j] - bscstart;
479           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
480         } else {
481           if (!baij->colmap) {
482             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
483           }
484 #if defined (PETSC_USE_CTABLE)
485           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
486           data --;
487 #else
488           data = baij->colmap[idxn[j]/bs]-1;
489 #endif
490           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
491           else {
492             col  = data + idxn[j]%bs;
493             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
494           }
495         }
496       }
497     } else {
498       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
499     }
500   }
501  PetscFunctionReturn(0);
502 }
503 
504 #undef __FUNCT__
505 #define __FUNCT__ "MatNorm_MPISBAIJ"
506 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
507 {
508   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
509   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
510   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
511   int        ierr;
512   PetscReal  sum[2],*lnorm2;
513 
514   PetscFunctionBegin;
515   if (baij->size == 1) {
516     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
517   } else {
518     if (type == NORM_FROBENIUS) {
519       ierr = PetscMalloc(2*sizeof(double),&lnorm2);CHKERRQ(ierr);
520       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
521       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
522       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
523       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
524       /*
525       ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
526       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
527       */
528       ierr = MPI_Allreduce(lnorm2,&sum,2,MPI_DOUBLE,MPI_SUM,mat->comm);CHKERRQ(ierr);
529       /*
530       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
531       PetscSynchronizedFlush(PETSC_COMM_WORLD); */
532 
533       *norm = sqrt(sum[0] + 2*sum[1]);
534       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
535     } else {
536       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
537     }
538   }
539   PetscFunctionReturn(0);
540 }
541 
542 /*
543   Creates the hash table, and sets the table
544   This table is created only once.
545   If new entried need to be added to the matrix
546   then the hash table has to be destroyed and
547   recreated.
548 */
549 #undef __FUNCT__
550 #define __FUNCT__ "MatCreateHashTable_MPISBAIJ_Private"
551 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
552 {
553   PetscFunctionBegin;
554   SETERRQ(1,"Function not yet written for SBAIJ format");
555   /* PetscFunctionReturn(0); */
556 }
557 
558 #undef __FUNCT__
559 #define __FUNCT__ "MatAssemblyBegin_MPISBAIJ"
560 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
561 {
562   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
563   int         ierr,nstash,reallocs;
564   InsertMode  addv;
565 
566   PetscFunctionBegin;
567   if (baij->donotstash) {
568     PetscFunctionReturn(0);
569   }
570 
571   /* make sure all processors are either in INSERTMODE or ADDMODE */
572   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
573   if (addv == (ADD_VALUES|INSERT_VALUES)) {
574     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
575   }
576   mat->insertmode = addv; /* in case this processor had no cache */
577 
578   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
579   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
580   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
581   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
582   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
583   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
584   PetscFunctionReturn(0);
585 }
586 
587 #undef __FUNCT__
588 #define __FUNCT__ "MatAssemblyEnd_MPISBAIJ"
589 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
590 {
591   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
592   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
593   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
594   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
595   int         *row,*col,other_disassembled;
596   PetscTruth  r1,r2,r3;
597   MatScalar   *val;
598   InsertMode  addv = mat->insertmode;
599   /* int         rank;*/
600 
601   PetscFunctionBegin;
602   /* remove 2 line below later */
603   /*ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr); */
604 
605   if (!baij->donotstash) {
606     while (1) {
607       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
608       /*
609       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
610       PetscSynchronizedFlush(PETSC_COMM_WORLD);
611       */
612       if (!flg) break;
613 
614       for (i=0; i<n;) {
615         /* Now identify the consecutive vals belonging to the same row */
616         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
617         if (j < n) ncols = j-i;
618         else       ncols = n-i;
619         /* Now assemble all these values with a single function call */
620         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
621         i = j;
622       }
623     }
624     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
625     /* Now process the block-stash. Since the values are stashed column-oriented,
626        set the roworiented flag to column oriented, and after MatSetValues()
627        restore the original flags */
628     r1 = baij->roworiented;
629     r2 = a->roworiented;
630     r3 = b->roworiented;
631     baij->roworiented = PETSC_FALSE;
632     a->roworiented    = PETSC_FALSE;
633     b->roworiented    = PETSC_FALSE;
634     while (1) {
635       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
636       if (!flg) break;
637 
638       for (i=0; i<n;) {
639         /* Now identify the consecutive vals belonging to the same row */
640         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
641         if (j < n) ncols = j-i;
642         else       ncols = n-i;
643         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
644         i = j;
645       }
646     }
647     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
648     baij->roworiented = r1;
649     a->roworiented    = r2;
650     b->roworiented    = r3;
651   }
652 
653   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
654   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
655 
656   /* determine if any processor has disassembled, if so we must
657      also disassemble ourselfs, in order that we may reassemble. */
658   /*
659      if nonzero structure of submatrix B cannot change then we know that
660      no processor disassembled thus we can skip this stuff
661   */
662   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
663     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
664     if (mat->was_assembled && !other_disassembled) {
665       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
666     }
667   }
668 
669   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
670     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr);
671   }
672   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
673   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
674 
675 #if defined(PETSC_USE_BOPT_g)
676   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
677     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((double)baij->ht_total_ct)/baij->ht_insert_ct);
678     baij->ht_total_ct  = 0;
679     baij->ht_insert_ct = 0;
680   }
681 #endif
682   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
683     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
684     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
685     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
686   }
687 
688   if (baij->rowvalues) {
689     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
690     baij->rowvalues = 0;
691   }
692   PetscFunctionReturn(0);
693 }
694 
695 #undef __FUNCT__
696 #define __FUNCT__ "MatView_MPISBAIJ_ASCIIorDraworSocket"
697 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
698 {
699   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
700   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
701   PetscTruth        isascii,isdraw;
702   PetscViewer       sviewer;
703   PetscViewerFormat format;
704 
705   PetscFunctionBegin;
706   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
707   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
708   if (isascii) {
709     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
710     if (format == PETSC_VIEWER_ASCII_INFO_LONG) {
711       MatInfo info;
712       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
713       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
714       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
715               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
716               baij->bs,(int)info.memory);CHKERRQ(ierr);
717       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
718       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
719       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
720       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
721       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
722       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
723       PetscFunctionReturn(0);
724     } else if (format == PETSC_VIEWER_ASCII_INFO) {
725       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
726       PetscFunctionReturn(0);
727     }
728   }
729 
730   if (isdraw) {
731     PetscDraw       draw;
732     PetscTruth isnull;
733     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
734     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
735   }
736 
737   if (size == 1) {
738     ierr = PetscObjectSetName((PetscObject)baij->A,mat->name);CHKERRQ(ierr);
739     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
740   } else {
741     /* assemble the entire matrix onto first processor. */
742     Mat         A;
743     Mat_SeqSBAIJ *Aloc;
744     Mat_SeqBAIJ *Bloc;
745     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
746     MatScalar   *a;
747 
748     if (!rank) {
749       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
750     } else {
751       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
752     }
753     PetscLogObjectParent(mat,A);
754 
755     /* copy over the A part */
756     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
757     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
758     ierr  = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
759 
760     for (i=0; i<mbs; i++) {
761       rvals[0] = bs*(baij->rstart + i);
762       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
763       for (j=ai[i]; j<ai[i+1]; j++) {
764         col = (baij->cstart+aj[j])*bs;
765         for (k=0; k<bs; k++) {
766           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
767           col++; a += bs;
768         }
769       }
770     }
771     /* copy over the B part */
772     Bloc = (Mat_SeqBAIJ*)baij->B->data;
773     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
774     for (i=0; i<mbs; i++) {
775       rvals[0] = bs*(baij->rstart + i);
776       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
777       for (j=ai[i]; j<ai[i+1]; j++) {
778         col = baij->garray[aj[j]]*bs;
779         for (k=0; k<bs; k++) {
780           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
781           col++; a += bs;
782         }
783       }
784     }
785     ierr = PetscFree(rvals);CHKERRQ(ierr);
786     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
787     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
788     /*
789        Everyone has to call to draw the matrix since the graphics waits are
790        synchronized across all processors that share the PetscDraw object
791     */
792     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
793     if (!rank) {
794       ierr = PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);CHKERRQ(ierr);
795       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
796     }
797     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
798     ierr = MatDestroy(A);CHKERRQ(ierr);
799   }
800   PetscFunctionReturn(0);
801 }
802 
803 #undef __FUNCT__
804 #define __FUNCT__ "MatView_MPISBAIJ"
805 int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
806 {
807   int        ierr;
808   PetscTruth isascii,isdraw,issocket,isbinary;
809 
810   PetscFunctionBegin;
811   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
812   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
813   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
814   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
815   if (isascii || isdraw || issocket || isbinary) {
816     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
817   } else {
818     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
819   }
820   PetscFunctionReturn(0);
821 }
822 
823 #undef __FUNCT__
824 #define __FUNCT__ "MatDestroy_MPISBAIJ"
825 int MatDestroy_MPISBAIJ(Mat mat)
826 {
827   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
828   int         ierr;
829 
830   PetscFunctionBegin;
831 #if defined(PETSC_USE_LOG)
832   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
833 #endif
834   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
835   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
836   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
837   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
838   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
839 #if defined (PETSC_USE_CTABLE)
840   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
841 #else
842   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
843 #endif
844   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
845   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
846   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
847   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
848   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
849   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
850 #if defined(PETSC_USE_MAT_SINGLE)
851   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
852 #endif
853   ierr = PetscFree(baij);CHKERRQ(ierr);
854   PetscFunctionReturn(0);
855 }
856 
857 #undef __FUNCT__
858 #define __FUNCT__ "MatMult_MPISBAIJ"
859 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
860 {
861   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
862   int         ierr,nt;
863 
864   PetscFunctionBegin;
865   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
866   if (nt != A->n) {
867     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
868   }
869   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
870   if (nt != A->m) {
871     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
872   }
873 
874   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
875   /* do diagonal part */
876   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
877   /* do supperdiagonal part */
878   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
879   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
880   /* do subdiagonal part */
881   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
882   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
883   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
884 
885   PetscFunctionReturn(0);
886 }
887 
888 #undef __FUNCT__
889 #define __FUNCT__ "MatMultAdd_MPISBAIJ"
890 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
891 {
892   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
893   int        ierr;
894 
895   PetscFunctionBegin;
896   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
897   /* do diagonal part */
898   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
899   /* do supperdiagonal part */
900   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
901   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
902 
903   /* do subdiagonal part */
904   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
905   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
906   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
907 
908   PetscFunctionReturn(0);
909 }
910 
911 #undef __FUNCT__
912 #define __FUNCT__ "MatMultTranspose_MPISBAIJ"
913 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
914 {
915   PetscFunctionBegin;
916   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
917   /* PetscFunctionReturn(0); */
918 }
919 
920 #undef __FUNCT__
921 #define __FUNCT__ "MatMultTransposeAdd_MPISBAIJ"
922 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
923 {
924   PetscFunctionBegin;
925   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
926   /* PetscFunctionReturn(0); */
927 }
928 
929 /*
930   This only works correctly for square matrices where the subblock A->A is the
931    diagonal block
932 */
933 #undef __FUNCT__
934 #define __FUNCT__ "MatGetDiagonal_MPISBAIJ"
935 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
936 {
937   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
938   int         ierr;
939 
940   PetscFunctionBegin;
941   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
942   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
943   PetscFunctionReturn(0);
944 }
945 
946 #undef __FUNCT__
947 #define __FUNCT__ "MatScale_MPISBAIJ"
948 int MatScale_MPISBAIJ(PetscScalar *aa,Mat A)
949 {
950   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
951   int         ierr;
952 
953   PetscFunctionBegin;
954   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
955   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
956   PetscFunctionReturn(0);
957 }
958 
959 #undef __FUNCT__
960 #define __FUNCT__ "MatGetOwnershipRange_MPISBAIJ"
961 int MatGetOwnershipRange_MPISBAIJ(Mat matin,int *m,int *n)
962 {
963   Mat_MPISBAIJ *mat = (Mat_MPISBAIJ*)matin->data;
964 
965   PetscFunctionBegin;
966   if (m) *m = mat->rstart*mat->bs;
967   if (n) *n = mat->rend*mat->bs;
968   PetscFunctionReturn(0);
969 }
970 
971 #undef __FUNCT__
972 #define __FUNCT__ "MatGetRow_MPISBAIJ"
973 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,PetscScalar **v)
974 {
975   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
976   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
977   int            bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
978   int            nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
979   int            *cmap,*idx_p,cstart = mat->cstart;
980 
981   PetscFunctionBegin;
982   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
983   mat->getrowactive = PETSC_TRUE;
984 
985   if (!mat->rowvalues && (idx || v)) {
986     /*
987         allocate enough space to hold information from the longest row.
988     */
989     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
990     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
991     int     max = 1,mbs = mat->mbs,tmp;
992     for (i=0; i<mbs; i++) {
993       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
994       if (max < tmp) { max = tmp; }
995     }
996     ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
997     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
998   }
999 
1000   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1001   lrow = row - brstart;  /* local row index */
1002 
1003   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1004   if (!v)   {pvA = 0; pvB = 0;}
1005   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1006   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1007   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1008   nztot = nzA + nzB;
1009 
1010   cmap  = mat->garray;
1011   if (v  || idx) {
1012     if (nztot) {
1013       /* Sort by increasing column numbers, assuming A and B already sorted */
1014       int imark = -1;
1015       if (v) {
1016         *v = v_p = mat->rowvalues;
1017         for (i=0; i<nzB; i++) {
1018           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1019           else break;
1020         }
1021         imark = i;
1022         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1023         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1024       }
1025       if (idx) {
1026         *idx = idx_p = mat->rowindices;
1027         if (imark > -1) {
1028           for (i=0; i<imark; i++) {
1029             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1030           }
1031         } else {
1032           for (i=0; i<nzB; i++) {
1033             if (cmap[cworkB[i]/bs] < cstart)
1034               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1035             else break;
1036           }
1037           imark = i;
1038         }
1039         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1040         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1041       }
1042     } else {
1043       if (idx) *idx = 0;
1044       if (v)   *v   = 0;
1045     }
1046   }
1047   *nz = nztot;
1048   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1049   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1050   PetscFunctionReturn(0);
1051 }
1052 
1053 #undef __FUNCT__
1054 #define __FUNCT__ "MatRestoreRow_MPISBAIJ"
1055 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,PetscScalar **v)
1056 {
1057   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1058 
1059   PetscFunctionBegin;
1060   if (baij->getrowactive == PETSC_FALSE) {
1061     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1062   }
1063   baij->getrowactive = PETSC_FALSE;
1064   PetscFunctionReturn(0);
1065 }
1066 
1067 #undef __FUNCT__
1068 #define __FUNCT__ "MatGetBlockSize_MPISBAIJ"
1069 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1070 {
1071   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1072 
1073   PetscFunctionBegin;
1074   *bs = baij->bs;
1075   PetscFunctionReturn(0);
1076 }
1077 
1078 #undef __FUNCT__
1079 #define __FUNCT__ "MatZeroEntries_MPISBAIJ"
1080 int MatZeroEntries_MPISBAIJ(Mat A)
1081 {
1082   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1083   int         ierr;
1084 
1085   PetscFunctionBegin;
1086   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1087   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1088   PetscFunctionReturn(0);
1089 }
1090 
1091 #undef __FUNCT__
1092 #define __FUNCT__ "MatGetInfo_MPISBAIJ"
1093 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1094 {
1095   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1096   Mat         A = a->A,B = a->B;
1097   int         ierr;
1098   PetscReal   isend[5],irecv[5];
1099 
1100   PetscFunctionBegin;
1101   info->block_size     = (double)a->bs;
1102   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1103   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1104   isend[3] = info->memory;  isend[4] = info->mallocs;
1105   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1106   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1107   isend[3] += info->memory;  isend[4] += info->mallocs;
1108   if (flag == MAT_LOCAL) {
1109     info->nz_used      = isend[0];
1110     info->nz_allocated = isend[1];
1111     info->nz_unneeded  = isend[2];
1112     info->memory       = isend[3];
1113     info->mallocs      = isend[4];
1114   } else if (flag == MAT_GLOBAL_MAX) {
1115     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_MAX,matin->comm);CHKERRQ(ierr);
1116     info->nz_used      = irecv[0];
1117     info->nz_allocated = irecv[1];
1118     info->nz_unneeded  = irecv[2];
1119     info->memory       = irecv[3];
1120     info->mallocs      = irecv[4];
1121   } else if (flag == MAT_GLOBAL_SUM) {
1122     ierr = MPI_Allreduce(isend,irecv,5,MPI_DOUBLE,MPI_SUM,matin->comm);CHKERRQ(ierr);
1123     info->nz_used      = irecv[0];
1124     info->nz_allocated = irecv[1];
1125     info->nz_unneeded  = irecv[2];
1126     info->memory       = irecv[3];
1127     info->mallocs      = irecv[4];
1128   } else {
1129     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1130   }
1131   info->rows_global       = (double)A->M;
1132   info->columns_global    = (double)A->N;
1133   info->rows_local        = (double)A->m;
1134   info->columns_local     = (double)A->N;
1135   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1136   info->fill_ratio_needed = 0;
1137   info->factor_mallocs    = 0;
1138   PetscFunctionReturn(0);
1139 }
1140 
1141 #undef __FUNCT__
1142 #define __FUNCT__ "MatSetOption_MPISBAIJ"
1143 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1144 {
1145   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1146   int         ierr;
1147 
1148   PetscFunctionBegin;
1149   switch (op) {
1150   case MAT_NO_NEW_NONZERO_LOCATIONS:
1151   case MAT_YES_NEW_NONZERO_LOCATIONS:
1152   case MAT_COLUMNS_UNSORTED:
1153   case MAT_COLUMNS_SORTED:
1154   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1155   case MAT_KEEP_ZEROED_ROWS:
1156   case MAT_NEW_NONZERO_LOCATION_ERR:
1157     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1158     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1159     break;
1160   case MAT_ROW_ORIENTED:
1161     a->roworiented = PETSC_TRUE;
1162     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1163     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1164     break;
1165   case MAT_ROWS_SORTED:
1166   case MAT_ROWS_UNSORTED:
1167   case MAT_YES_NEW_DIAGONALS:
1168   case MAT_USE_SINGLE_PRECISION_SOLVES:
1169     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1170     break;
1171   case MAT_COLUMN_ORIENTED:
1172     a->roworiented = PETSC_FALSE;
1173     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1174     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1175     break;
1176   case MAT_IGNORE_OFF_PROC_ENTRIES:
1177     a->donotstash = PETSC_TRUE;
1178     break;
1179   case MAT_NO_NEW_DIAGONALS:
1180     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1181     break;
1182   case MAT_USE_HASH_TABLE:
1183     a->ht_flag = PETSC_TRUE;
1184     break;
1185   default:
1186     SETERRQ(PETSC_ERR_SUP,"unknown option");
1187     break;
1188   }
1189   PetscFunctionReturn(0);
1190 }
1191 
1192 #undef __FUNCT__
1193 #define __FUNCT__ "MatTranspose_MPISBAIJ("
1194 int MatTranspose_MPISBAIJ(Mat A,Mat *matout)
1195 {
1196   PetscFunctionBegin;
1197   SETERRQ(1,"Matrix is symmetric. MatTranspose() should not be called");
1198   /* PetscFunctionReturn(0); */
1199 }
1200 
1201 #undef __FUNCT__
1202 #define __FUNCT__ "MatDiagonalScale_MPISBAIJ"
1203 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1204 {
1205   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1206   Mat         a = baij->A,b = baij->B;
1207   int         ierr,s1,s2,s3;
1208 
1209   PetscFunctionBegin;
1210   if (ll != rr) {
1211     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1212   }
1213   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1214   if (rr) {
1215     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1216     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1217     /* Overlap communication with computation. */
1218     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1219     /*} if (ll) { */
1220     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1221     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1222     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1223     /* } */
1224   /* scale  the diagonal block */
1225   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1226 
1227   /* if (rr) { */
1228     /* Do a scatter end and then right scale the off-diagonal block */
1229     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1230     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1231   }
1232 
1233   PetscFunctionReturn(0);
1234 }
1235 
1236 #undef __FUNCT__
1237 #define __FUNCT__ "MatZeroRows_MPISBAIJ"
1238 int MatZeroRows_MPISBAIJ(Mat A,IS is,PetscScalar *diag)
1239 {
1240   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;
1241   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1242   int            *procs,*nprocs,j,idx,nsends,*work,row;
1243   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1244   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1245   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1246   MPI_Comm       comm = A->comm;
1247   MPI_Request    *send_waits,*recv_waits;
1248   MPI_Status     recv_status,*send_status;
1249   IS             istmp;
1250   PetscTruth     found;
1251 
1252   PetscFunctionBegin;
1253   ierr = ISGetSize(is,&N);CHKERRQ(ierr);
1254   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1255 
1256   /*  first count number of contributors to each processor */
1257   ierr  = PetscMalloc(2*size*sizeof(int),&nprocs);CHKERRQ(ierr);
1258   ierr  = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1259   procs = nprocs + size;
1260   ierr  = PetscMalloc((N+1)*sizeof(int),&owner);CHKERRQ(ierr); /* see note*/
1261   for (i=0; i<N; i++) {
1262     idx   = rows[i];
1263     found = PETSC_FALSE;
1264     for (j=0; j<size; j++) {
1265       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1266         nprocs[j]++; procs[j] = 1; owner[i] = j; found = PETSC_TRUE; break;
1267       }
1268     }
1269     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1270   }
1271   nsends = 0;  for (i=0; i<size; i++) { nsends += procs[i];}
1272 
1273   /* inform other processors of number of messages and max length*/
1274   ierr   = PetscMalloc(2*size*sizeof(int),&work);CHKERRQ(ierr);
1275   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
1276   nmax   = work[rank];
1277   nrecvs = work[size+rank];
1278   ierr   = PetscFree(work);CHKERRQ(ierr);
1279 
1280   /* post receives:   */
1281   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&rvalues);CHKERRQ(ierr);
1282   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
1283   for (i=0; i<nrecvs; i++) {
1284     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1285   }
1286 
1287   /* do sends:
1288      1) starts[i] gives the starting index in svalues for stuff going to
1289      the ith processor
1290   */
1291   ierr = PetscMalloc((N+1)*sizeof(int),&svalues);CHKERRQ(ierr);
1292   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
1293   ierr = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
1294   starts[0]  = 0;
1295   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1296   for (i=0; i<N; i++) {
1297     svalues[starts[owner[i]]++] = rows[i];
1298   }
1299   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1300 
1301   starts[0] = 0;
1302   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1303   count = 0;
1304   for (i=0; i<size; i++) {
1305     if (procs[i]) {
1306       ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1307     }
1308   }
1309   ierr = PetscFree(starts);CHKERRQ(ierr);
1310 
1311   base = owners[rank]*bs;
1312 
1313   /*  wait on receives */
1314   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(int),&lens);CHKERRQ(ierr);
1315   source = lens + nrecvs;
1316   count  = nrecvs; slen = 0;
1317   while (count) {
1318     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1319     /* unpack receives into our local space */
1320     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1321     source[imdex]  = recv_status.MPI_SOURCE;
1322     lens[imdex]    = n;
1323     slen          += n;
1324     count--;
1325   }
1326   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1327 
1328   /* move the data into the send scatter */
1329   ierr = PetscMalloc((slen+1)*sizeof(int),&lrows);CHKERRQ(ierr);
1330   count = 0;
1331   for (i=0; i<nrecvs; i++) {
1332     values = rvalues + i*nmax;
1333     for (j=0; j<lens[i]; j++) {
1334       lrows[count++] = values[j] - base;
1335     }
1336   }
1337   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1338   ierr = PetscFree(lens);CHKERRQ(ierr);
1339   ierr = PetscFree(owner);CHKERRQ(ierr);
1340   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1341 
1342   /* actually zap the local rows */
1343   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1344   PetscLogObjectParent(A,istmp);
1345 
1346   /*
1347         Zero the required rows. If the "diagonal block" of the matrix
1348      is square and the user wishes to set the diagonal we use seperate
1349      code so that MatSetValues() is not called for each diagonal allocating
1350      new memory, thus calling lots of mallocs and slowing things down.
1351 
1352        Contributed by: Mathew Knepley
1353   */
1354   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1355   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1356   if (diag && (l->A->M == l->A->N)) {
1357     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1358   } else if (diag) {
1359     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1360     if (((Mat_SeqSBAIJ*)l->A->data)->nonew) {
1361       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1362 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1363     }
1364     for (i=0; i<slen; i++) {
1365       row  = lrows[i] + rstart_bs;
1366       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1367     }
1368     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1369     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1370   } else {
1371     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1372   }
1373 
1374   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1375   ierr = PetscFree(lrows);CHKERRQ(ierr);
1376 
1377   /* wait on sends */
1378   if (nsends) {
1379     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
1380     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1381     ierr        = PetscFree(send_status);CHKERRQ(ierr);
1382   }
1383   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1384   ierr = PetscFree(svalues);CHKERRQ(ierr);
1385 
1386   PetscFunctionReturn(0);
1387 }
1388 
1389 #undef __FUNCT__
1390 #define __FUNCT__ "MatPrintHelp_MPISBAIJ"
1391 int MatPrintHelp_MPISBAIJ(Mat A)
1392 {
1393   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1394   MPI_Comm    comm = A->comm;
1395   static int  called = 0;
1396   int         ierr;
1397 
1398   PetscFunctionBegin;
1399   if (!a->rank) {
1400     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1401   }
1402   if (called) {PetscFunctionReturn(0);} else called = 1;
1403   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1404   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1405   PetscFunctionReturn(0);
1406 }
1407 
1408 #undef __FUNCT__
1409 #define __FUNCT__ "MatSetUnfactored_MPISBAIJ"
1410 int MatSetUnfactored_MPISBAIJ(Mat A)
1411 {
1412   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1413   int         ierr;
1414 
1415   PetscFunctionBegin;
1416   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1417   PetscFunctionReturn(0);
1418 }
1419 
1420 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1421 
1422 #undef __FUNCT__
1423 #define __FUNCT__ "MatEqual_MPISBAIJ"
1424 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1425 {
1426   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1427   Mat         a,b,c,d;
1428   PetscTruth  flg;
1429   int         ierr;
1430 
1431   PetscFunctionBegin;
1432   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg);CHKERRQ(ierr);
1433   if (!flg) SETERRQ(PETSC_ERR_ARG_INCOMP,"Matrices must be same type");
1434   a = matA->A; b = matA->B;
1435   c = matB->A; d = matB->B;
1436 
1437   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1438   if (flg == PETSC_TRUE) {
1439     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1440   }
1441   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1442   PetscFunctionReturn(0);
1443 }
1444 
1445 #undef __FUNCT__
1446 #define __FUNCT__ "MatSetUpPreallocation_MPISBAIJ"
1447 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1448 {
1449   int        ierr;
1450 
1451   PetscFunctionBegin;
1452   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1453   PetscFunctionReturn(0);
1454 }
1455 /* -------------------------------------------------------------------*/
1456 static struct _MatOps MatOps_Values = {
1457   MatSetValues_MPISBAIJ,
1458   MatGetRow_MPISBAIJ,
1459   MatRestoreRow_MPISBAIJ,
1460   MatMult_MPISBAIJ,
1461   MatMultAdd_MPISBAIJ,
1462   MatMultTranspose_MPISBAIJ,
1463   MatMultTransposeAdd_MPISBAIJ,
1464   0,
1465   0,
1466   0,
1467   0,
1468   0,
1469   0,
1470   0,
1471   MatTranspose_MPISBAIJ,
1472   MatGetInfo_MPISBAIJ,
1473   MatEqual_MPISBAIJ,
1474   MatGetDiagonal_MPISBAIJ,
1475   MatDiagonalScale_MPISBAIJ,
1476   MatNorm_MPISBAIJ,
1477   MatAssemblyBegin_MPISBAIJ,
1478   MatAssemblyEnd_MPISBAIJ,
1479   0,
1480   MatSetOption_MPISBAIJ,
1481   MatZeroEntries_MPISBAIJ,
1482   MatZeroRows_MPISBAIJ,
1483   0,
1484   0,
1485   0,
1486   0,
1487   MatSetUpPreallocation_MPISBAIJ,
1488   0,
1489   MatGetOwnershipRange_MPISBAIJ,
1490   0,
1491   0,
1492   0,
1493   0,
1494   MatDuplicate_MPISBAIJ,
1495   0,
1496   0,
1497   0,
1498   0,
1499   0,
1500   MatGetSubMatrices_MPISBAIJ,
1501   MatIncreaseOverlap_MPISBAIJ,
1502   MatGetValues_MPISBAIJ,
1503   0,
1504   MatPrintHelp_MPISBAIJ,
1505   MatScale_MPISBAIJ,
1506   0,
1507   0,
1508   0,
1509   MatGetBlockSize_MPISBAIJ,
1510   0,
1511   0,
1512   0,
1513   0,
1514   0,
1515   0,
1516   MatSetUnfactored_MPISBAIJ,
1517   0,
1518   MatSetValuesBlocked_MPISBAIJ,
1519   0,
1520   0,
1521   0,
1522   MatGetPetscMaps_Petsc,
1523   0,
1524   0,
1525   0,
1526   0,
1527   0,
1528   0,
1529   MatGetRowMax_MPISBAIJ};
1530 
1531 
1532 EXTERN_C_BEGIN
1533 #undef __FUNCT__
1534 #define __FUNCT__ "MatGetDiagonalBlock_MPISBAIJ"
1535 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1536 {
1537   PetscFunctionBegin;
1538   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1539   *iscopy = PETSC_FALSE;
1540   PetscFunctionReturn(0);
1541 }
1542 EXTERN_C_END
1543 
1544 EXTERN_C_BEGIN
1545 #undef __FUNCT__
1546 #define __FUNCT__ "MatCreate_MPISBAIJ"
1547 int MatCreate_MPISBAIJ(Mat B)
1548 {
1549   Mat_MPISBAIJ *b;
1550   int          ierr;
1551   PetscTruth   flg;
1552 
1553   PetscFunctionBegin;
1554 
1555   ierr    = PetscNew(Mat_MPISBAIJ,&b);CHKERRQ(ierr);
1556   B->data = (void*)b;
1557   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1558   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1559 
1560   B->ops->destroy    = MatDestroy_MPISBAIJ;
1561   B->ops->view       = MatView_MPISBAIJ;
1562   B->mapping    = 0;
1563   B->factor     = 0;
1564   B->assembled  = PETSC_FALSE;
1565 
1566   B->insertmode = NOT_SET_VALUES;
1567   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1568   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1569 
1570   /* build local table of row and column ownerships */
1571   ierr          = PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);CHKERRQ(ierr);
1572   b->cowners    = b->rowners + b->size + 2;
1573   b->rowners_bs = b->cowners + b->size + 2;
1574   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1575 
1576   /* build cache for off array entries formed */
1577   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1578   b->donotstash  = PETSC_FALSE;
1579   b->colmap      = PETSC_NULL;
1580   b->garray      = PETSC_NULL;
1581   b->roworiented = PETSC_TRUE;
1582 
1583 #if defined(PETSC_USE_MAT_SINGLE)
1584   /* stuff for MatSetValues_XXX in single precision */
1585   b->setvalueslen     = 0;
1586   b->setvaluescopy    = PETSC_NULL;
1587 #endif
1588 
1589   /* stuff used in block assembly */
1590   b->barray       = 0;
1591 
1592   /* stuff used for matrix vector multiply */
1593   b->lvec         = 0;
1594   b->Mvctx        = 0;
1595 
1596   /* stuff for MatGetRow() */
1597   b->rowindices   = 0;
1598   b->rowvalues    = 0;
1599   b->getrowactive = PETSC_FALSE;
1600 
1601   /* hash table stuff */
1602   b->ht           = 0;
1603   b->hd           = 0;
1604   b->ht_size      = 0;
1605   b->ht_flag      = PETSC_FALSE;
1606   b->ht_fact      = 0;
1607   b->ht_total_ct  = 0;
1608   b->ht_insert_ct = 0;
1609 
1610   ierr = PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1611   if (flg) {
1612     double fact = 1.39;
1613     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1614     ierr = PetscOptionsGetReal(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1615     if (fact <= 1.0) fact = 1.39;
1616     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1617     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1618   }
1619   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1620                                      "MatStoreValues_MPISBAIJ",
1621                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1622   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1623                                      "MatRetrieveValues_MPISBAIJ",
1624                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1625   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1626                                      "MatGetDiagonalBlock_MPISBAIJ",
1627                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1628   PetscFunctionReturn(0);
1629 }
1630 EXTERN_C_END
1631 
1632 #undef __FUNCT__
1633 #define __FUNCT__ "MatMPISBAIJSetPreallocation"
1634 /*@C
1635    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1636    the user should preallocate the matrix storage by setting the parameters
1637    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1638    performance can be increased by more than a factor of 50.
1639 
1640    Collective on Mat
1641 
1642    Input Parameters:
1643 +  A - the matrix
1644 .  bs   - size of blockk
1645 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1646            submatrix  (same for all local rows)
1647 .  d_nnz - array containing the number of block nonzeros in the various block rows
1648            of the in diagonal portion of the local (possibly different for each block
1649            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1650 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1651            submatrix (same for all local rows).
1652 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1653            off-diagonal portion of the local submatrix (possibly different for
1654            each block row) or PETSC_NULL.
1655 
1656 
1657    Options Database Keys:
1658 .   -mat_no_unroll - uses code that does not unroll the loops in the
1659                      block calculations (much slower)
1660 .   -mat_block_size - size of the blocks to use
1661 
1662    Notes:
1663 
1664    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1665    than it must be used on all processors that share the object for that argument.
1666 
1667    Storage Information:
1668    For a square global matrix we define each processor's diagonal portion
1669    to be its local rows and the corresponding columns (a square submatrix);
1670    each processor's off-diagonal portion encompasses the remainder of the
1671    local matrix (a rectangular submatrix).
1672 
1673    The user can specify preallocated storage for the diagonal part of
1674    the local submatrix with either d_nz or d_nnz (not both).  Set
1675    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1676    memory allocation.  Likewise, specify preallocated storage for the
1677    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1678 
1679    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1680    the figure below we depict these three local rows and all columns (0-11).
1681 
1682 .vb
1683            0 1 2 3 4 5 6 7 8 9 10 11
1684           -------------------
1685    row 3  |  o o o d d d o o o o o o
1686    row 4  |  o o o d d d o o o o o o
1687    row 5  |  o o o d d d o o o o o o
1688           -------------------
1689 .ve
1690 
1691    Thus, any entries in the d locations are stored in the d (diagonal)
1692    submatrix, and any entries in the o locations are stored in the
1693    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1694    stored simply in the MATSEQBAIJ format for compressed row storage.
1695 
1696    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1697    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1698    In general, for PDE problems in which most nonzeros are near the diagonal,
1699    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1700    or you will get TERRIBLE performance; see the users' manual chapter on
1701    matrices.
1702 
1703    Level: intermediate
1704 
1705 .keywords: matrix, block, aij, compressed row, sparse, parallel
1706 
1707 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1708 @*/
1709 
1710 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1711 {
1712   Mat_MPISBAIJ *b;
1713   int          ierr,i,mbs,Mbs;
1714   PetscTruth   flg2;
1715 
1716   PetscFunctionBegin;
1717   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg2);CHKERRQ(ierr);
1718   if (!flg2) PetscFunctionReturn(0);
1719 
1720   ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1721 
1722   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1723   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1724   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1725   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %d",d_nz);
1726   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %d",o_nz);
1727   if (d_nnz) {
1728     for (i=0; i<B->m/bs; i++) {
1729       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1730     }
1731   }
1732   if (o_nnz) {
1733     for (i=0; i<B->m/bs; i++) {
1734       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1735     }
1736   }
1737   B->preallocated = PETSC_TRUE;
1738   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1739   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1740   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1741   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1742 
1743   b   = (Mat_MPISBAIJ*)B->data;
1744   mbs = B->m/bs;
1745   Mbs = B->M/bs;
1746   if (mbs*bs != B->m) {
1747     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1748   }
1749 
1750   b->bs  = bs;
1751   b->bs2 = bs*bs;
1752   b->mbs = mbs;
1753   b->nbs = mbs;
1754   b->Mbs = Mbs;
1755   b->Nbs = Mbs;
1756 
1757   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1758   b->rowners[0]    = 0;
1759   for (i=2; i<=b->size; i++) {
1760     b->rowners[i] += b->rowners[i-1];
1761   }
1762   b->rstart    = b->rowners[b->rank];
1763   b->rend      = b->rowners[b->rank+1];
1764   b->cstart    = b->rstart;
1765   b->cend      = b->rend;
1766   for (i=0; i<=b->size; i++) {
1767     b->rowners_bs[i] = b->rowners[i]*bs;
1768   }
1769   b->rstart_bs = b-> rstart*bs;
1770   b->rend_bs   = b->rend*bs;
1771 
1772   b->cstart_bs = b->cstart*bs;
1773   b->cend_bs   = b->cend*bs;
1774 
1775 
1776   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1777   PetscLogObjectParent(B,b->A);
1778   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1779   PetscLogObjectParent(B,b->B);
1780 
1781   /* build cache for off array entries formed */
1782   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1783 
1784   PetscFunctionReturn(0);
1785 }
1786 
1787 #undef __FUNCT__
1788 #define __FUNCT__ "MatCreateMPISBAIJ"
1789 /*@C
1790    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1791    (block compressed row).  For good matrix assembly performance
1792    the user should preallocate the matrix storage by setting the parameters
1793    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1794    performance can be increased by more than a factor of 50.
1795 
1796    Collective on MPI_Comm
1797 
1798    Input Parameters:
1799 +  comm - MPI communicator
1800 .  bs   - size of blockk
1801 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1802            This value should be the same as the local size used in creating the
1803            y vector for the matrix-vector product y = Ax.
1804 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1805            This value should be the same as the local size used in creating the
1806            x vector for the matrix-vector product y = Ax.
1807 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1808 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1809 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1810            submatrix  (same for all local rows)
1811 .  d_nnz - array containing the number of block nonzeros in the various block rows
1812            of the in diagonal portion of the local (possibly different for each block
1813            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1814 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1815            submatrix (same for all local rows).
1816 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1817            off-diagonal portion of the local submatrix (possibly different for
1818            each block row) or PETSC_NULL.
1819 
1820    Output Parameter:
1821 .  A - the matrix
1822 
1823    Options Database Keys:
1824 .   -mat_no_unroll - uses code that does not unroll the loops in the
1825                      block calculations (much slower)
1826 .   -mat_block_size - size of the blocks to use
1827 .   -mat_mpi - use the parallel matrix data structures even on one processor
1828                (defaults to using SeqBAIJ format on one processor)
1829 
1830    Notes:
1831    The user MUST specify either the local or global matrix dimensions
1832    (possibly both).
1833 
1834    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1835    than it must be used on all processors that share the object for that argument.
1836 
1837    Storage Information:
1838    For a square global matrix we define each processor's diagonal portion
1839    to be its local rows and the corresponding columns (a square submatrix);
1840    each processor's off-diagonal portion encompasses the remainder of the
1841    local matrix (a rectangular submatrix).
1842 
1843    The user can specify preallocated storage for the diagonal part of
1844    the local submatrix with either d_nz or d_nnz (not both).  Set
1845    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1846    memory allocation.  Likewise, specify preallocated storage for the
1847    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1848 
1849    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1850    the figure below we depict these three local rows and all columns (0-11).
1851 
1852 .vb
1853            0 1 2 3 4 5 6 7 8 9 10 11
1854           -------------------
1855    row 3  |  o o o d d d o o o o o o
1856    row 4  |  o o o d d d o o o o o o
1857    row 5  |  o o o d d d o o o o o o
1858           -------------------
1859 .ve
1860 
1861    Thus, any entries in the d locations are stored in the d (diagonal)
1862    submatrix, and any entries in the o locations are stored in the
1863    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1864    stored simply in the MATSEQBAIJ format for compressed row storage.
1865 
1866    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1867    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1868    In general, for PDE problems in which most nonzeros are near the diagonal,
1869    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1870    or you will get TERRIBLE performance; see the users' manual chapter on
1871    matrices.
1872 
1873    Level: intermediate
1874 
1875 .keywords: matrix, block, aij, compressed row, sparse, parallel
1876 
1877 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1878 @*/
1879 
1880 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,int *d_nnz,int o_nz,int *o_nnz,Mat *A)
1881 {
1882   int ierr,size;
1883 
1884   PetscFunctionBegin;
1885   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1886   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1887   if (size > 1) {
1888     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1889     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1890   } else {
1891     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1892     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1893   }
1894   PetscFunctionReturn(0);
1895 }
1896 
1897 
1898 #undef __FUNCT__
1899 #define __FUNCT__ "MatDuplicate_MPISBAIJ"
1900 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1901 {
1902   Mat          mat;
1903   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1904   int          ierr,len=0;
1905 
1906   PetscFunctionBegin;
1907   *newmat       = 0;
1908   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1909   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1910   mat->preallocated = PETSC_TRUE;
1911   a = (Mat_MPISBAIJ*)mat->data;
1912   a->bs  = oldmat->bs;
1913   a->bs2 = oldmat->bs2;
1914   a->mbs = oldmat->mbs;
1915   a->nbs = oldmat->nbs;
1916   a->Mbs = oldmat->Mbs;
1917   a->Nbs = oldmat->Nbs;
1918 
1919   a->rstart       = oldmat->rstart;
1920   a->rend         = oldmat->rend;
1921   a->cstart       = oldmat->cstart;
1922   a->cend         = oldmat->cend;
1923   a->size         = oldmat->size;
1924   a->rank         = oldmat->rank;
1925   a->donotstash   = oldmat->donotstash;
1926   a->roworiented  = oldmat->roworiented;
1927   a->rowindices   = 0;
1928   a->rowvalues    = 0;
1929   a->getrowactive = PETSC_FALSE;
1930   a->barray       = 0;
1931   a->rstart_bs    = oldmat->rstart_bs;
1932   a->rend_bs      = oldmat->rend_bs;
1933   a->cstart_bs    = oldmat->cstart_bs;
1934   a->cend_bs      = oldmat->cend_bs;
1935 
1936   /* hash table stuff */
1937   a->ht           = 0;
1938   a->hd           = 0;
1939   a->ht_size      = 0;
1940   a->ht_flag      = oldmat->ht_flag;
1941   a->ht_fact      = oldmat->ht_fact;
1942   a->ht_total_ct  = 0;
1943   a->ht_insert_ct = 0;
1944 
1945   ierr = PetscMalloc(3*(a->size+2)*sizeof(int),&a->rowners);CHKERRQ(ierr);
1946   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1947   a->cowners    = a->rowners + a->size + 2;
1948   a->rowners_bs = a->cowners + a->size + 2;
1949   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1950   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1951   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1952   if (oldmat->colmap) {
1953 #if defined (PETSC_USE_CTABLE)
1954     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1955 #else
1956     ierr = PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);CHKERRQ(ierr);
1957     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
1958     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
1959 #endif
1960   } else a->colmap = 0;
1961   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1962     ierr = PetscMalloc(len*sizeof(int),&a->garray);CHKERRQ(ierr);
1963     PetscLogObjectMemory(mat,len*sizeof(int));
1964     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
1965   } else a->garray = 0;
1966 
1967   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
1968   PetscLogObjectParent(mat,a->lvec);
1969   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
1970 
1971   PetscLogObjectParent(mat,a->Mvctx);
1972   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1973   PetscLogObjectParent(mat,a->A);
1974   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
1975   PetscLogObjectParent(mat,a->B);
1976   ierr = PetscFListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
1977   *newmat = mat;
1978   PetscFunctionReturn(0);
1979 }
1980 
1981 #include "petscsys.h"
1982 
1983 EXTERN_C_BEGIN
1984 #undef __FUNCT__
1985 #define __FUNCT__ "MatLoad_MPISBAIJ"
1986 int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
1987 {
1988   Mat          A;
1989   int          i,nz,ierr,j,rstart,rend,fd;
1990   PetscScalar  *vals,*buf;
1991   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1992   MPI_Status   status;
1993   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
1994   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
1995   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
1996   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1997   int          dcount,kmax,k,nzcount,tmp;
1998 
1999   PetscFunctionBegin;
2000   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
2001 
2002   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2003   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2004   if (!rank) {
2005     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2006     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
2007     if (header[0] != MAT_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2008     if (header[3] < 0) {
2009       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2010     }
2011   }
2012 
2013   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2014   M = header[1]; N = header[2];
2015 
2016   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2017 
2018   /*
2019      This code adds extra rows to make sure the number of rows is
2020      divisible by the blocksize
2021   */
2022   Mbs        = M/bs;
2023   extra_rows = bs - M + bs*(Mbs);
2024   if (extra_rows == bs) extra_rows = 0;
2025   else                  Mbs++;
2026   if (extra_rows &&!rank) {
2027     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2028   }
2029 
2030   /* determine ownership of all rows */
2031   mbs        = Mbs/size + ((Mbs % size) > rank);
2032   m          = mbs*bs;
2033   ierr       = PetscMalloc(2*(size+2)*sizeof(int),&rowners);CHKERRQ(ierr);
2034   browners   = rowners + size + 1;
2035   ierr       = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2036   rowners[0] = 0;
2037   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2038   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2039   rstart = rowners[rank];
2040   rend   = rowners[rank+1];
2041 
2042   /* distribute row lengths to all processors */
2043   ierr = PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);CHKERRQ(ierr);
2044   if (!rank) {
2045     ierr = PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);CHKERRQ(ierr);
2046     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2047     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2048     ierr = PetscMalloc(size*sizeof(int),&sndcounts);CHKERRQ(ierr);
2049     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2050     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2051     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2052   } else {
2053     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2054   }
2055 
2056   if (!rank) {   /* procs[0] */
2057     /* calculate the number of nonzeros on each processor */
2058     ierr = PetscMalloc(size*sizeof(int),&procsnz);CHKERRQ(ierr);
2059     ierr = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2060     for (i=0; i<size; i++) {
2061       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2062         procsnz[i] += rowlengths[j];
2063       }
2064     }
2065     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2066 
2067     /* determine max buffer needed and allocate it */
2068     maxnz = 0;
2069     for (i=0; i<size; i++) {
2070       maxnz = PetscMax(maxnz,procsnz[i]);
2071     }
2072     ierr = PetscMalloc(maxnz*sizeof(int),&cols);CHKERRQ(ierr);
2073 
2074     /* read in my part of the matrix column indices  */
2075     nz     = procsnz[0];
2076     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2077     mycols = ibuf;
2078     if (size == 1)  nz -= extra_rows;
2079     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2080     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2081 
2082     /* read in every ones (except the last) and ship off */
2083     for (i=1; i<size-1; i++) {
2084       nz   = procsnz[i];
2085       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2086       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2087     }
2088     /* read in the stuff for the last proc */
2089     if (size != 1) {
2090       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2091       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2092       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2093       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2094     }
2095     ierr = PetscFree(cols);CHKERRQ(ierr);
2096   } else {  /* procs[i], i>0 */
2097     /* determine buffer space needed for message */
2098     nz = 0;
2099     for (i=0; i<m; i++) {
2100       nz += locrowlens[i];
2101     }
2102     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2103     mycols = ibuf;
2104     /* receive message of column indices*/
2105     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2106     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2107     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2108   }
2109 
2110   /* loop over local rows, determining number of off diagonal entries */
2111   ierr     = PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);CHKERRQ(ierr);
2112   odlens   = dlens + (rend-rstart);
2113   ierr     = PetscMalloc(3*Mbs*sizeof(int),&mask);CHKERRQ(ierr);
2114   ierr     = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2115   masked1  = mask    + Mbs;
2116   masked2  = masked1 + Mbs;
2117   rowcount = 0; nzcount = 0;
2118   for (i=0; i<mbs; i++) {
2119     dcount  = 0;
2120     odcount = 0;
2121     for (j=0; j<bs; j++) {
2122       kmax = locrowlens[rowcount];
2123       for (k=0; k<kmax; k++) {
2124         tmp = mycols[nzcount++]/bs; /* block col. index */
2125         if (!mask[tmp]) {
2126           mask[tmp] = 1;
2127           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2128           else masked1[dcount++] = tmp; /* entry in diag portion */
2129         }
2130       }
2131       rowcount++;
2132     }
2133 
2134     dlens[i]  = dcount;  /* d_nzz[i] */
2135     odlens[i] = odcount; /* o_nzz[i] */
2136 
2137     /* zero out the mask elements we set */
2138     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2139     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2140   }
2141 
2142   /* create our matrix */
2143   ierr = MatCreateMPISBAIJ(comm,bs,m,m,PETSC_DETERMINE,PETSC_DETERMINE,0,dlens,0,odlens,newmat);
2144   CHKERRQ(ierr);
2145   A = *newmat;
2146   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2147 
2148   if (!rank) {
2149     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2150     /* read in my part of the matrix numerical values  */
2151     nz = procsnz[0];
2152     vals = buf;
2153     mycols = ibuf;
2154     if (size == 1)  nz -= extra_rows;
2155     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2156     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2157 
2158     /* insert into matrix */
2159     jj      = rstart*bs;
2160     for (i=0; i<m; i++) {
2161       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2162       mycols += locrowlens[i];
2163       vals   += locrowlens[i];
2164       jj++;
2165     }
2166 
2167     /* read in other processors (except the last one) and ship out */
2168     for (i=1; i<size-1; i++) {
2169       nz   = procsnz[i];
2170       vals = buf;
2171       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2172       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2173     }
2174     /* the last proc */
2175     if (size != 1){
2176       nz   = procsnz[i] - extra_rows;
2177       vals = buf;
2178       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2179       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2180       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2181     }
2182     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2183 
2184   } else {
2185     /* receive numeric values */
2186     ierr = PetscMalloc(nz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2187 
2188     /* receive message of values*/
2189     vals   = buf;
2190     mycols = ibuf;
2191     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2192     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2193     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2194 
2195     /* insert into matrix */
2196     jj      = rstart*bs;
2197     for (i=0; i<m; i++) {
2198       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2199       mycols += locrowlens[i];
2200       vals   += locrowlens[i];
2201       jj++;
2202     }
2203   }
2204 
2205   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2206   ierr = PetscFree(buf);CHKERRQ(ierr);
2207   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2208   ierr = PetscFree(rowners);CHKERRQ(ierr);
2209   ierr = PetscFree(dlens);CHKERRQ(ierr);
2210   ierr = PetscFree(mask);CHKERRQ(ierr);
2211   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2212   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2213   PetscFunctionReturn(0);
2214 }
2215 EXTERN_C_END
2216 
2217 #undef __FUNCT__
2218 #define __FUNCT__ "MatMPISBAIJSetHashTableFactor"
2219 /*@
2220    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2221 
2222    Input Parameters:
2223 .  mat  - the matrix
2224 .  fact - factor
2225 
2226    Collective on Mat
2227 
2228    Level: advanced
2229 
2230   Notes:
2231    This can also be set by the command line option: -mat_use_hash_table fact
2232 
2233 .keywords: matrix, hashtable, factor, HT
2234 
2235 .seealso: MatSetOption()
2236 @*/
2237 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2238 {
2239   PetscFunctionBegin;
2240   SETERRQ(1,"Function not yet written for SBAIJ format");
2241   /* PetscFunctionReturn(0); */
2242 }
2243 
2244 #undef __FUNCT__
2245 #define __FUNCT__ "MatGetRowMax_MPISBAIJ"
2246 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2247 {
2248   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2249   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2250   PetscReal    atmp;
2251   PetscReal    *work,*svalues,*rvalues;
2252   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2253   int          rank,size,*rowners_bs,dest,count,source;
2254   PetscScalar  *va;
2255   MatScalar    *ba;
2256   MPI_Status   stat;
2257 
2258   PetscFunctionBegin;
2259   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2260   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2261 
2262   ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
2263   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
2264 
2265   bs   = a->bs;
2266   mbs  = a->mbs;
2267   Mbs  = a->Mbs;
2268   ba   = b->a;
2269   bi   = b->i;
2270   bj   = b->j;
2271   /*
2272   PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2273   PetscSynchronizedFlush(PETSC_COMM_WORLD);
2274   */
2275 
2276   /* find ownerships */
2277   rowners_bs = a->rowners_bs;
2278   /*
2279   if (!rank){
2280     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2281   }
2282   */
2283 
2284   /* each proc creates an array to be distributed */
2285   ierr = PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);CHKERRQ(ierr);
2286   ierr = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2287 
2288   /* row_max for B */
2289   if (rank != size-1){
2290     for (i=0; i<mbs; i++) {
2291       ncols = bi[1] - bi[0]; bi++;
2292       brow  = bs*i;
2293       for (j=0; j<ncols; j++){
2294         bcol = bs*(*bj);
2295         for (kcol=0; kcol<bs; kcol++){
2296           col = bcol + kcol;                 /* local col index */
2297           col += rowners_bs[rank+1];      /* global col index */
2298           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2299           for (krow=0; krow<bs; krow++){
2300             atmp = PetscAbsScalar(*ba); ba++;
2301             row = brow + krow;    /* local row index */
2302             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2303             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2304             if (work[col] < atmp) work[col] = atmp;
2305           }
2306         }
2307         bj++;
2308       }
2309     }
2310     /*
2311       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2312       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2313       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2314       */
2315 
2316     /* send values to its owners */
2317     for (dest=rank+1; dest<size; dest++){
2318       svalues = work + rowners_bs[dest];
2319       count   = rowners_bs[dest+1]-rowners_bs[dest];
2320       ierr    = MPI_Send(svalues,count,MPI_DOUBLE,dest,rank,PETSC_COMM_WORLD);CHKERRQ(ierr);
2321       /*
2322       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2323       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2324       */
2325     }
2326   }
2327 
2328   /* receive values */
2329   if (rank){
2330     rvalues = work;
2331     count   = rowners_bs[rank+1]-rowners_bs[rank];
2332     for (source=0; source<rank; source++){
2333       ierr = MPI_Recv(rvalues,count,MPI_DOUBLE,MPI_ANY_SOURCE,MPI_ANY_TAG,PETSC_COMM_WORLD,&stat);CHKERRQ(ierr);
2334       /* process values */
2335       for (i=0; i<count; i++){
2336         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2337       }
2338       /*
2339       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2340       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2341       */
2342     }
2343   }
2344 
2345   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2346   ierr = PetscFree(work);CHKERRQ(ierr);
2347   PetscFunctionReturn(0);
2348 }
2349