xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision 8f968ba35c4541eaec8a8885f38b21f52fa2d3e6)
1 /*$Id: mpisbaij.c,v 1.61 2001/08/10 03:31:37 bsmith Exp $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int DisAssemble_MPISBAIJ(Mat);
10 extern int MatIncreaseOverlap_MPISBAIJ(Mat,int,IS *,int);
11 extern int MatGetSubMatrices_MPISBAIJ(Mat,int,IS *,IS *,MatReuse,Mat **);
12 extern int MatGetValues_SeqSBAIJ(Mat,int,int *,int,int *,PetscScalar *);
13 extern int MatSetValues_SeqSBAIJ(Mat,int,int *,int,int *,PetscScalar *,InsertMode);
14 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,int*,int,int*,PetscScalar*,InsertMode);
15 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
16 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
17 extern int MatPrintHelp_SeqSBAIJ(Mat);
18 extern int MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
19 extern int MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
20 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
21 extern int MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,int,int,Vec);
22 
23 /*  UGLY, ugly, ugly
24    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
25    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
26    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
27    converts the entries into single precision and then calls ..._MatScalar() to put them
28    into the single precision data structures.
29 */
30 #if defined(PETSC_USE_MAT_SINGLE)
31 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
32 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
33 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
34 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
35 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,int*,int,int*,MatScalar*,InsertMode);
36 #else
37 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
38 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
39 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
40 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
41 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
42 #endif
43 
44 EXTERN_C_BEGIN
45 #undef __FUNCT__
46 #define __FUNCT__ "MatStoreValues_MPISBAIJ"
47 int MatStoreValues_MPISBAIJ(Mat mat)
48 {
49   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
50   int          ierr;
51 
52   PetscFunctionBegin;
53   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
54   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
55   PetscFunctionReturn(0);
56 }
57 EXTERN_C_END
58 
59 EXTERN_C_BEGIN
60 #undef __FUNCT__
61 #define __FUNCT__ "MatRetrieveValues_MPISBAIJ"
62 int MatRetrieveValues_MPISBAIJ(Mat mat)
63 {
64   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
65   int          ierr;
66 
67   PetscFunctionBegin;
68   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
69   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
70   PetscFunctionReturn(0);
71 }
72 EXTERN_C_END
73 
74 /*
75      Local utility routine that creates a mapping from the global column
76    number to the local number in the off-diagonal part of the local
77    storage of the matrix.  This is done in a non scable way since the
78    length of colmap equals the global matrix length.
79 */
80 #undef __FUNCT__
81 #define __FUNCT__ "CreateColmap_MPISBAIJ_Private"
82 static int CreateColmap_MPISBAIJ_Private(Mat mat)
83 {
84   PetscFunctionBegin;
85   SETERRQ(1,"Function not yet written for SBAIJ format");
86   /* PetscFunctionReturn(0); */
87 }
88 
89 #define CHUNKSIZE  10
90 
91 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
92 { \
93  \
94     brow = row/bs;  \
95     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
96     rmax = aimax[brow]; nrow = ailen[brow]; \
97       bcol = col/bs; \
98       ridx = row % bs; cidx = col % bs; \
99       low = 0; high = nrow; \
100       while (high-low > 3) { \
101         t = (low+high)/2; \
102         if (rp[t] > bcol) high = t; \
103         else              low  = t; \
104       } \
105       for (_i=low; _i<high; _i++) { \
106         if (rp[_i] > bcol) break; \
107         if (rp[_i] == bcol) { \
108           bap  = ap +  bs2*_i + bs*cidx + ridx; \
109           if (addv == ADD_VALUES) *bap += value;  \
110           else                    *bap  = value;  \
111           goto a_noinsert; \
112         } \
113       } \
114       if (a->nonew == 1) goto a_noinsert; \
115       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
116       if (nrow >= rmax) { \
117         /* there is no extra room in row, therefore enlarge */ \
118         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
119         MatScalar *new_a; \
120  \
121         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
122  \
123         /* malloc new storage space */ \
124         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
125         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
126         new_j = (int*)(new_a + bs2*new_nz); \
127         new_i = new_j + new_nz; \
128  \
129         /* copy over old data into new slots */ \
130         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
131         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
132         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
133         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
134         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
135         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
136         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar));CHKERRQ(ierr); \
137         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
138                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
139         /* free up old matrix storage */ \
140         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
141         if (!a->singlemalloc) { \
142           ierr = PetscFree(a->i);CHKERRQ(ierr); \
143           ierr = PetscFree(a->j);CHKERRQ(ierr);\
144         } \
145         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
146         a->singlemalloc = PETSC_TRUE; \
147  \
148         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
149         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
150         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
151         a->s_maxnz += bs2*CHUNKSIZE; \
152         a->reallocs++; \
153         a->s_nz++; \
154       } \
155       N = nrow++ - 1;  \
156       /* shift up all the later entries in this row */ \
157       for (ii=N; ii>=_i; ii--) { \
158         rp[ii+1] = rp[ii]; \
159         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
160       } \
161       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
162       rp[_i]                      = bcol;  \
163       ap[bs2*_i + bs*cidx + ridx] = value;  \
164       a_noinsert:; \
165     ailen[brow] = nrow; \
166 }
167 #ifndef MatSetValues_SeqBAIJ_B_Private
168 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
169 { \
170     brow = row/bs;  \
171     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
172     rmax = bimax[brow]; nrow = bilen[brow]; \
173       bcol = col/bs; \
174       ridx = row % bs; cidx = col % bs; \
175       low = 0; high = nrow; \
176       while (high-low > 3) { \
177         t = (low+high)/2; \
178         if (rp[t] > bcol) high = t; \
179         else              low  = t; \
180       } \
181       for (_i=low; _i<high; _i++) { \
182         if (rp[_i] > bcol) break; \
183         if (rp[_i] == bcol) { \
184           bap  = ap +  bs2*_i + bs*cidx + ridx; \
185           if (addv == ADD_VALUES) *bap += value;  \
186           else                    *bap  = value;  \
187           goto b_noinsert; \
188         } \
189       } \
190       if (b->nonew == 1) goto b_noinsert; \
191       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
192       if (nrow >= rmax) { \
193         /* there is no extra room in row, therefore enlarge */ \
194         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
195         MatScalar *new_a; \
196  \
197         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
198  \
199         /* malloc new storage space */ \
200         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
201         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
202         new_j = (int*)(new_a + bs2*new_nz); \
203         new_i = new_j + new_nz; \
204  \
205         /* copy over old data into new slots */ \
206         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
207         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
208         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
209         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
210         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
211         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
213         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
214                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
215         /* free up old matrix storage */ \
216         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
217         if (!b->singlemalloc) { \
218           ierr = PetscFree(b->i);CHKERRQ(ierr); \
219           ierr = PetscFree(b->j);CHKERRQ(ierr); \
220         } \
221         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
222         b->singlemalloc = PETSC_TRUE; \
223  \
224         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
225         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
226         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
227         b->maxnz += bs2*CHUNKSIZE; \
228         b->reallocs++; \
229         b->nz++; \
230       } \
231       N = nrow++ - 1;  \
232       /* shift up all the later entries in this row */ \
233       for (ii=N; ii>=_i; ii--) { \
234         rp[ii+1] = rp[ii]; \
235         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
236       } \
237       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
238       rp[_i]                      = bcol;  \
239       ap[bs2*_i + bs*cidx + ridx] = value;  \
240       b_noinsert:; \
241     bilen[brow] = nrow; \
242 }
243 #endif
244 
245 #if defined(PETSC_USE_MAT_SINGLE)
246 #undef __FUNCT__
247 #define __FUNCT__ "MatSetValues_MPISBAIJ"
248 int MatSetValues_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
249 {
250   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
251   int          ierr,i,N = m*n;
252   MatScalar    *vsingle;
253 
254   PetscFunctionBegin;
255   if (N > b->setvalueslen) {
256     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
257     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
258     b->setvalueslen  = N;
259   }
260   vsingle = b->setvaluescopy;
261 
262   for (i=0; i<N; i++) {
263     vsingle[i] = v[i];
264   }
265   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
266   PetscFunctionReturn(0);
267 }
268 
269 #undef __FUNCT__
270 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ"
271 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
272 {
273   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
274   int         ierr,i,N = m*n*b->bs2;
275   MatScalar   *vsingle;
276 
277   PetscFunctionBegin;
278   if (N > b->setvalueslen) {
279     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
280     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
281     b->setvalueslen  = N;
282   }
283   vsingle = b->setvaluescopy;
284   for (i=0; i<N; i++) {
285     vsingle[i] = v[i];
286   }
287   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
288   PetscFunctionReturn(0);
289 }
290 
291 #undef __FUNCT__
292 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT"
293 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
294 {
295   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
296   int         ierr,i,N = m*n;
297   MatScalar   *vsingle;
298 
299   PetscFunctionBegin;
300   SETERRQ(1,"Function not yet written for SBAIJ format");
301   /* PetscFunctionReturn(0); */
302 }
303 
304 #undef __FUNCT__
305 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT"
306 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,int *im,int n,int *in,PetscScalar *v,InsertMode addv)
307 {
308   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
309   int         ierr,i,N = m*n*b->bs2;
310   MatScalar   *vsingle;
311 
312   PetscFunctionBegin;
313   SETERRQ(1,"Function not yet written for SBAIJ format");
314   /* PetscFunctionReturn(0); */
315 }
316 #endif
317 
318 /* Only add/insert a(i,j) with i<=j (blocks).
319    Any a(i,j) with i>j input by user is ingored.
320 */
321 #undef __FUNCT__
322 #define __FUNCT__ "MatSetValues_MPIBAIJ"
323 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
324 {
325   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
326   MatScalar    value;
327   PetscTruth   roworiented = baij->roworiented;
328   int          ierr,i,j,row,col;
329   int          rstart_orig=baij->rstart_bs;
330   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
331   int          cend_orig=baij->cend_bs,bs=baij->bs;
332 
333   /* Some Variables required in the macro */
334   Mat          A = baij->A;
335   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
336   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
337   MatScalar    *aa=a->a;
338 
339   Mat          B = baij->B;
340   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
341   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
342   MatScalar    *ba=b->a;
343 
344   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
345   int          low,high,t,ridx,cidx,bs2=a->bs2;
346   MatScalar    *ap,*bap;
347 
348   /* for stash */
349   int          n_loc, *in_loc=0;
350   MatScalar    *v_loc=0;
351 
352   PetscFunctionBegin;
353 
354   if(!baij->donotstash){
355     ierr = PetscMalloc(n*sizeof(int),&in_loc);CHKERRQ(ierr);
356     ierr = PetscMalloc(n*sizeof(MatScalar),&v_loc);CHKERRQ(ierr);
357   }
358 
359   for (i=0; i<m; i++) {
360     if (im[i] < 0) continue;
361 #if defined(PETSC_USE_BOPT_g)
362     if (im[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
363 #endif
364     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
365       row = im[i] - rstart_orig;              /* local row index */
366       for (j=0; j<n; j++) {
367         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
368         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
369           col = in[j] - cstart_orig;          /* local col index */
370           brow = row/bs; bcol = col/bs;
371           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
372           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
373           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
374           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
375         } else if (in[j] < 0) continue;
376 #if defined(PETSC_USE_BOPT_g)
377         else if (in[j] >= mat->N) {SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Col too large");}
378 #endif
379         else {  /* off-diag entry (B) */
380           if (mat->was_assembled) {
381             if (!baij->colmap) {
382               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
383             }
384 #if defined (PETSC_USE_CTABLE)
385             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
386             col  = col - 1;
387 #else
388             col = baij->colmap[in[j]/bs] - 1;
389 #endif
390             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
391               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
392               col =  in[j];
393               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
394               B = baij->B;
395               b = (Mat_SeqBAIJ*)(B)->data;
396               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
397               ba=b->a;
398             } else col += in[j]%bs;
399           } else col = in[j];
400           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
401           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
402           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
403         }
404       }
405     } else {  /* off processor entry */
406       if (!baij->donotstash) {
407         n_loc = 0;
408         for (j=0; j<n; j++){
409           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
410           in_loc[n_loc] = in[j];
411           if (roworiented) {
412             v_loc[n_loc] = v[i*n+j];
413           } else {
414             v_loc[n_loc] = v[j*m+i];
415           }
416           n_loc++;
417         }
418         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
419       }
420     }
421   }
422 
423   if(!baij->donotstash){
424     ierr = PetscFree(in_loc);CHKERRQ(ierr);
425     ierr = PetscFree(v_loc);CHKERRQ(ierr);
426   }
427   PetscFunctionReturn(0);
428 }
429 
430 #undef __FUNCT__
431 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ"
432 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
433 {
434   PetscFunctionBegin;
435   SETERRQ(1,"Function not yet written for SBAIJ format");
436   /* PetscFunctionReturn(0); */
437 }
438 
439 #define HASH_KEY 0.6180339887
440 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
441 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
442 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
443 #undef __FUNCT__
444 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT_MatScalar"
445 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
446 {
447   PetscFunctionBegin;
448   SETERRQ(1,"Function not yet written for SBAIJ format");
449   /* PetscFunctionReturn(0); */
450 }
451 
452 #undef __FUNCT__
453 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
454 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,int *im,int n,int *in,MatScalar *v,InsertMode addv)
455 {
456   PetscFunctionBegin;
457   SETERRQ(1,"Function not yet written for SBAIJ format");
458   /* PetscFunctionReturn(0); */
459 }
460 
461 #undef __FUNCT__
462 #define __FUNCT__ "MatGetValues_MPISBAIJ"
463 int MatGetValues_MPISBAIJ(Mat mat,int m,int *idxm,int n,int *idxn,PetscScalar *v)
464 {
465   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
466   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
467   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
468 
469   PetscFunctionBegin;
470   for (i=0; i<m; i++) {
471     if (idxm[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
472     if (idxm[i] >= mat->M) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Row too large");
473     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
474       row = idxm[i] - bsrstart;
475       for (j=0; j<n; j++) {
476         if (idxn[j] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative column");
477         if (idxn[j] >= mat->N) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Column too large");
478         if (idxn[j] >= bscstart && idxn[j] < bscend){
479           col = idxn[j] - bscstart;
480           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
481         } else {
482           if (!baij->colmap) {
483             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
484           }
485 #if defined (PETSC_USE_CTABLE)
486           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
487           data --;
488 #else
489           data = baij->colmap[idxn[j]/bs]-1;
490 #endif
491           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
492           else {
493             col  = data + idxn[j]%bs;
494             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
495           }
496         }
497       }
498     } else {
499       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
500     }
501   }
502  PetscFunctionReturn(0);
503 }
504 
505 #undef __FUNCT__
506 #define __FUNCT__ "MatNorm_MPISBAIJ"
507 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
508 {
509   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
510   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
511   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
512   int        ierr;
513   PetscReal  sum[2],*lnorm2;
514 
515   PetscFunctionBegin;
516   if (baij->size == 1) {
517     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
518   } else {
519     if (type == NORM_FROBENIUS) {
520       ierr = PetscMalloc(2*sizeof(PetscReal),&lnorm2);CHKERRQ(ierr);
521       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
522       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
523       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
524       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
525       /*
526       ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
527       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
528       */
529       ierr = MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);CHKERRQ(ierr);
530       /*
531       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
532       PetscSynchronizedFlush(PETSC_COMM_WORLD); */
533 
534       *norm = sqrt(sum[0] + 2*sum[1]);
535       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
536     } else {
537       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
538     }
539   }
540   PetscFunctionReturn(0);
541 }
542 
543 /*
544   Creates the hash table, and sets the table
545   This table is created only once.
546   If new entried need to be added to the matrix
547   then the hash table has to be destroyed and
548   recreated.
549 */
550 #undef __FUNCT__
551 #define __FUNCT__ "MatCreateHashTable_MPISBAIJ_Private"
552 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
553 {
554   PetscFunctionBegin;
555   SETERRQ(1,"Function not yet written for SBAIJ format");
556   /* PetscFunctionReturn(0); */
557 }
558 
559 #undef __FUNCT__
560 #define __FUNCT__ "MatAssemblyBegin_MPISBAIJ"
561 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
562 {
563   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
564   int         ierr,nstash,reallocs;
565   InsertMode  addv;
566 
567   PetscFunctionBegin;
568   if (baij->donotstash) {
569     PetscFunctionReturn(0);
570   }
571 
572   /* make sure all processors are either in INSERTMODE or ADDMODE */
573   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
574   if (addv == (ADD_VALUES|INSERT_VALUES)) {
575     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
576   }
577   mat->insertmode = addv; /* in case this processor had no cache */
578 
579   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
580   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
581   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
582   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
583   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
584   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
585   PetscFunctionReturn(0);
586 }
587 
588 #undef __FUNCT__
589 #define __FUNCT__ "MatAssemblyEnd_MPISBAIJ"
590 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
591 {
592   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
593   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
594   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
595   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
596   int         *row,*col,other_disassembled;
597   PetscTruth  r1,r2,r3;
598   MatScalar   *val;
599   InsertMode  addv = mat->insertmode;
600   /* int         rank;*/
601 
602   PetscFunctionBegin;
603   /* remove 2 line below later */
604   /*ierr = MPI_Comm_rank(PETSC_COMM_WORLD, &rank);CHKERRQ(ierr); */
605 
606   if (!baij->donotstash) {
607     while (1) {
608       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
609       /*
610       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
611       PetscSynchronizedFlush(PETSC_COMM_WORLD);
612       */
613       if (!flg) break;
614 
615       for (i=0; i<n;) {
616         /* Now identify the consecutive vals belonging to the same row */
617         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
618         if (j < n) ncols = j-i;
619         else       ncols = n-i;
620         /* Now assemble all these values with a single function call */
621         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
622         i = j;
623       }
624     }
625     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
626     /* Now process the block-stash. Since the values are stashed column-oriented,
627        set the roworiented flag to column oriented, and after MatSetValues()
628        restore the original flags */
629     r1 = baij->roworiented;
630     r2 = a->roworiented;
631     r3 = b->roworiented;
632     baij->roworiented = PETSC_FALSE;
633     a->roworiented    = PETSC_FALSE;
634     b->roworiented    = PETSC_FALSE;
635     while (1) {
636       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
637       if (!flg) break;
638 
639       for (i=0; i<n;) {
640         /* Now identify the consecutive vals belonging to the same row */
641         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
642         if (j < n) ncols = j-i;
643         else       ncols = n-i;
644         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
645         i = j;
646       }
647     }
648     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
649     baij->roworiented = r1;
650     a->roworiented    = r2;
651     b->roworiented    = r3;
652   }
653 
654   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
655   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
656 
657   /* determine if any processor has disassembled, if so we must
658      also disassemble ourselfs, in order that we may reassemble. */
659   /*
660      if nonzero structure of submatrix B cannot change then we know that
661      no processor disassembled thus we can skip this stuff
662   */
663   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
664     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
665     if (mat->was_assembled && !other_disassembled) {
666       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
667     }
668   }
669 
670   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
671     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr);
672   }
673   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
674   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
675 
676 #if defined(PETSC_USE_BOPT_g)
677   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
678     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
679     baij->ht_total_ct  = 0;
680     baij->ht_insert_ct = 0;
681   }
682 #endif
683   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
684     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
685     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
686     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
687   }
688 
689   if (baij->rowvalues) {
690     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
691     baij->rowvalues = 0;
692   }
693   PetscFunctionReturn(0);
694 }
695 
696 #undef __FUNCT__
697 #define __FUNCT__ "MatView_MPISBAIJ_ASCIIorDraworSocket"
698 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
699 {
700   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
701   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
702   PetscTruth        isascii,isdraw;
703   PetscViewer       sviewer;
704   PetscViewerFormat format;
705 
706   PetscFunctionBegin;
707   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
708   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
709   if (isascii) {
710     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
711     if (format == PETSC_VIEWER_ASCII_INFO_LONG) {
712       MatInfo info;
713       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
714       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
715       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
716               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
717               baij->bs,(int)info.memory);CHKERRQ(ierr);
718       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
719       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
720       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
721       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
722       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
723       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
724       PetscFunctionReturn(0);
725     } else if (format == PETSC_VIEWER_ASCII_INFO) {
726       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
727       PetscFunctionReturn(0);
728     }
729   }
730 
731   if (isdraw) {
732     PetscDraw       draw;
733     PetscTruth isnull;
734     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
735     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
736   }
737 
738   if (size == 1) {
739     ierr = PetscObjectSetName((PetscObject)baij->A,mat->name);CHKERRQ(ierr);
740     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
741   } else {
742     /* assemble the entire matrix onto first processor. */
743     Mat         A;
744     Mat_SeqSBAIJ *Aloc;
745     Mat_SeqBAIJ *Bloc;
746     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
747     MatScalar   *a;
748 
749     if (!rank) {
750       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
751     } else {
752       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
753     }
754     PetscLogObjectParent(mat,A);
755 
756     /* copy over the A part */
757     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
758     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
759     ierr  = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
760 
761     for (i=0; i<mbs; i++) {
762       rvals[0] = bs*(baij->rstart + i);
763       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
764       for (j=ai[i]; j<ai[i+1]; j++) {
765         col = (baij->cstart+aj[j])*bs;
766         for (k=0; k<bs; k++) {
767           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
768           col++; a += bs;
769         }
770       }
771     }
772     /* copy over the B part */
773     Bloc = (Mat_SeqBAIJ*)baij->B->data;
774     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
775     for (i=0; i<mbs; i++) {
776       rvals[0] = bs*(baij->rstart + i);
777       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
778       for (j=ai[i]; j<ai[i+1]; j++) {
779         col = baij->garray[aj[j]]*bs;
780         for (k=0; k<bs; k++) {
781           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
782           col++; a += bs;
783         }
784       }
785     }
786     ierr = PetscFree(rvals);CHKERRQ(ierr);
787     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
788     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
789     /*
790        Everyone has to call to draw the matrix since the graphics waits are
791        synchronized across all processors that share the PetscDraw object
792     */
793     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
794     if (!rank) {
795       ierr = PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);CHKERRQ(ierr);
796       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
797     }
798     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
799     ierr = MatDestroy(A);CHKERRQ(ierr);
800   }
801   PetscFunctionReturn(0);
802 }
803 
804 #undef __FUNCT__
805 #define __FUNCT__ "MatView_MPISBAIJ"
806 int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
807 {
808   int        ierr;
809   PetscTruth isascii,isdraw,issocket,isbinary;
810 
811   PetscFunctionBegin;
812   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
813   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
814   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
815   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
816   if (isascii || isdraw || issocket || isbinary) {
817     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
818   } else {
819     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
820   }
821   PetscFunctionReturn(0);
822 }
823 
824 #undef __FUNCT__
825 #define __FUNCT__ "MatDestroy_MPISBAIJ"
826 int MatDestroy_MPISBAIJ(Mat mat)
827 {
828   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
829   int         ierr;
830 
831   PetscFunctionBegin;
832 #if defined(PETSC_USE_LOG)
833   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
834 #endif
835   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
836   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
837   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
838   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
839   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
840 #if defined (PETSC_USE_CTABLE)
841   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
842 #else
843   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
844 #endif
845   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
846   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
847   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
848   if (baij->slvec0) {ierr = VecDestroy(baij->slvec0);CHKERRQ(ierr);}
849   if (baij->slvec1) {ierr = VecDestroy(baij->slvec1);CHKERRQ(ierr);}
850   if (baij->sMvctx)  {ierr = VecScatterDestroy(baij->sMvctx);CHKERRQ(ierr);}
851   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
852   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
853   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
854 #if defined(PETSC_USE_MAT_SINGLE)
855   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
856 #endif
857   ierr = PetscFree(baij);CHKERRQ(ierr);
858   PetscFunctionReturn(0);
859 }
860 
861 #undef __FUNCT__
862 #define __FUNCT__ "MatMult_MPISBAIJ"
863 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
864 {
865   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
866   int         ierr,nt;
867 
868   PetscFunctionBegin;
869   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
870   if (nt != A->n) {
871     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
872   }
873   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
874   if (nt != A->m) {
875     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
876   }
877 
878   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
879   /* do diagonal part */
880   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
881   /* do supperdiagonal part */
882   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
883   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
884   /* do subdiagonal part */
885   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
886   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
887   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
888 
889   PetscFunctionReturn(0);
890 }
891 
892 #undef __FUNCT__
893 #define __FUNCT__ "MatMultAdd_MPISBAIJ"
894 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
895 {
896   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
897   int        ierr;
898 
899   PetscFunctionBegin;
900   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
901   /* do diagonal part */
902   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
903   /* do supperdiagonal part */
904   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
905   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
906 
907   /* do subdiagonal part */
908   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
909   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
910   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
911 
912   PetscFunctionReturn(0);
913 }
914 
915 #undef __FUNCT__
916 #define __FUNCT__ "MatMultTranspose_MPISBAIJ"
917 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
918 {
919   PetscFunctionBegin;
920   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
921   /* PetscFunctionReturn(0); */
922 }
923 
924 #undef __FUNCT__
925 #define __FUNCT__ "MatMultTransposeAdd_MPISBAIJ"
926 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
927 {
928   PetscFunctionBegin;
929   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
930   /* PetscFunctionReturn(0); */
931 }
932 
933 /*
934   This only works correctly for square matrices where the subblock A->A is the
935    diagonal block
936 */
937 #undef __FUNCT__
938 #define __FUNCT__ "MatGetDiagonal_MPISBAIJ"
939 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
940 {
941   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
942   int         ierr;
943 
944   PetscFunctionBegin;
945   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
946   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
947   PetscFunctionReturn(0);
948 }
949 
950 #undef __FUNCT__
951 #define __FUNCT__ "MatScale_MPISBAIJ"
952 int MatScale_MPISBAIJ(PetscScalar *aa,Mat A)
953 {
954   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
955   int         ierr;
956 
957   PetscFunctionBegin;
958   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
959   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
960   PetscFunctionReturn(0);
961 }
962 
963 #undef __FUNCT__
964 #define __FUNCT__ "MatGetRow_MPISBAIJ"
965 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,PetscScalar **v)
966 {
967   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
968   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
969   int            bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
970   int            nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
971   int            *cmap,*idx_p,cstart = mat->cstart;
972 
973   PetscFunctionBegin;
974   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
975   mat->getrowactive = PETSC_TRUE;
976 
977   if (!mat->rowvalues && (idx || v)) {
978     /*
979         allocate enough space to hold information from the longest row.
980     */
981     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
982     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
983     int     max = 1,mbs = mat->mbs,tmp;
984     for (i=0; i<mbs; i++) {
985       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
986       if (max < tmp) { max = tmp; }
987     }
988     ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
989     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
990   }
991 
992   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
993   lrow = row - brstart;  /* local row index */
994 
995   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
996   if (!v)   {pvA = 0; pvB = 0;}
997   if (!idx) {pcA = 0; if (!v) pcB = 0;}
998   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
999   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1000   nztot = nzA + nzB;
1001 
1002   cmap  = mat->garray;
1003   if (v  || idx) {
1004     if (nztot) {
1005       /* Sort by increasing column numbers, assuming A and B already sorted */
1006       int imark = -1;
1007       if (v) {
1008         *v = v_p = mat->rowvalues;
1009         for (i=0; i<nzB; i++) {
1010           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1011           else break;
1012         }
1013         imark = i;
1014         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1015         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1016       }
1017       if (idx) {
1018         *idx = idx_p = mat->rowindices;
1019         if (imark > -1) {
1020           for (i=0; i<imark; i++) {
1021             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1022           }
1023         } else {
1024           for (i=0; i<nzB; i++) {
1025             if (cmap[cworkB[i]/bs] < cstart)
1026               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1027             else break;
1028           }
1029           imark = i;
1030         }
1031         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1032         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1033       }
1034     } else {
1035       if (idx) *idx = 0;
1036       if (v)   *v   = 0;
1037     }
1038   }
1039   *nz = nztot;
1040   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1041   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1042   PetscFunctionReturn(0);
1043 }
1044 
1045 #undef __FUNCT__
1046 #define __FUNCT__ "MatRestoreRow_MPISBAIJ"
1047 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,PetscScalar **v)
1048 {
1049   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1050 
1051   PetscFunctionBegin;
1052   if (baij->getrowactive == PETSC_FALSE) {
1053     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1054   }
1055   baij->getrowactive = PETSC_FALSE;
1056   PetscFunctionReturn(0);
1057 }
1058 
1059 #undef __FUNCT__
1060 #define __FUNCT__ "MatGetBlockSize_MPISBAIJ"
1061 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1062 {
1063   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1064 
1065   PetscFunctionBegin;
1066   *bs = baij->bs;
1067   PetscFunctionReturn(0);
1068 }
1069 
1070 #undef __FUNCT__
1071 #define __FUNCT__ "MatZeroEntries_MPISBAIJ"
1072 int MatZeroEntries_MPISBAIJ(Mat A)
1073 {
1074   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1075   int         ierr;
1076 
1077   PetscFunctionBegin;
1078   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1079   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1080   PetscFunctionReturn(0);
1081 }
1082 
1083 #undef __FUNCT__
1084 #define __FUNCT__ "MatGetInfo_MPISBAIJ"
1085 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1086 {
1087   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1088   Mat         A = a->A,B = a->B;
1089   int         ierr;
1090   PetscReal   isend[5],irecv[5];
1091 
1092   PetscFunctionBegin;
1093   info->block_size     = (PetscReal)a->bs;
1094   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1095   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1096   isend[3] = info->memory;  isend[4] = info->mallocs;
1097   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1098   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1099   isend[3] += info->memory;  isend[4] += info->mallocs;
1100   if (flag == MAT_LOCAL) {
1101     info->nz_used      = isend[0];
1102     info->nz_allocated = isend[1];
1103     info->nz_unneeded  = isend[2];
1104     info->memory       = isend[3];
1105     info->mallocs      = isend[4];
1106   } else if (flag == MAT_GLOBAL_MAX) {
1107     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);CHKERRQ(ierr);
1108     info->nz_used      = irecv[0];
1109     info->nz_allocated = irecv[1];
1110     info->nz_unneeded  = irecv[2];
1111     info->memory       = irecv[3];
1112     info->mallocs      = irecv[4];
1113   } else if (flag == MAT_GLOBAL_SUM) {
1114     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);CHKERRQ(ierr);
1115     info->nz_used      = irecv[0];
1116     info->nz_allocated = irecv[1];
1117     info->nz_unneeded  = irecv[2];
1118     info->memory       = irecv[3];
1119     info->mallocs      = irecv[4];
1120   } else {
1121     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1122   }
1123   info->rows_global       = (PetscReal)A->M;
1124   info->columns_global    = (PetscReal)A->N;
1125   info->rows_local        = (PetscReal)A->m;
1126   info->columns_local     = (PetscReal)A->N;
1127   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1128   info->fill_ratio_needed = 0;
1129   info->factor_mallocs    = 0;
1130   PetscFunctionReturn(0);
1131 }
1132 
1133 #undef __FUNCT__
1134 #define __FUNCT__ "MatSetOption_MPISBAIJ"
1135 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1136 {
1137   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1138   int         ierr;
1139 
1140   PetscFunctionBegin;
1141   switch (op) {
1142   case MAT_NO_NEW_NONZERO_LOCATIONS:
1143   case MAT_YES_NEW_NONZERO_LOCATIONS:
1144   case MAT_COLUMNS_UNSORTED:
1145   case MAT_COLUMNS_SORTED:
1146   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1147   case MAT_KEEP_ZEROED_ROWS:
1148   case MAT_NEW_NONZERO_LOCATION_ERR:
1149     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1150     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1151     break;
1152   case MAT_ROW_ORIENTED:
1153     a->roworiented = PETSC_TRUE;
1154     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1155     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1156     break;
1157   case MAT_ROWS_SORTED:
1158   case MAT_ROWS_UNSORTED:
1159   case MAT_YES_NEW_DIAGONALS:
1160   case MAT_USE_SINGLE_PRECISION_SOLVES:
1161     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1162     break;
1163   case MAT_COLUMN_ORIENTED:
1164     a->roworiented = PETSC_FALSE;
1165     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1166     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1167     break;
1168   case MAT_IGNORE_OFF_PROC_ENTRIES:
1169     a->donotstash = PETSC_TRUE;
1170     break;
1171   case MAT_NO_NEW_DIAGONALS:
1172     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1173     break;
1174   case MAT_USE_HASH_TABLE:
1175     a->ht_flag = PETSC_TRUE;
1176     break;
1177   default:
1178     SETERRQ(PETSC_ERR_SUP,"unknown option");
1179     break;
1180   }
1181   PetscFunctionReturn(0);
1182 }
1183 
1184 #undef __FUNCT__
1185 #define __FUNCT__ "MatTranspose_MPISBAIJ("
1186 int MatTranspose_MPISBAIJ(Mat A,Mat *matout)
1187 {
1188   PetscFunctionBegin;
1189   SETERRQ(1,"Matrix is symmetric. MatTranspose() should not be called");
1190   /* PetscFunctionReturn(0); */
1191 }
1192 
1193 #undef __FUNCT__
1194 #define __FUNCT__ "MatDiagonalScale_MPISBAIJ"
1195 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1196 {
1197   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1198   Mat         a = baij->A,b = baij->B;
1199   int         ierr,s1,s2,s3;
1200 
1201   PetscFunctionBegin;
1202   if (ll != rr) {
1203     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1204   }
1205   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1206   if (rr) {
1207     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1208     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1209     /* Overlap communication with computation. */
1210     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1211     /*} if (ll) { */
1212     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1213     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1214     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1215     /* } */
1216   /* scale  the diagonal block */
1217   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1218 
1219   /* if (rr) { */
1220     /* Do a scatter end and then right scale the off-diagonal block */
1221     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1222     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1223   }
1224 
1225   PetscFunctionReturn(0);
1226 }
1227 
1228 #undef __FUNCT__
1229 #define __FUNCT__ "MatZeroRows_MPISBAIJ"
1230 int MatZeroRows_MPISBAIJ(Mat A,IS is,PetscScalar *diag)
1231 {
1232   Mat_MPISBAIJ   *l = (Mat_MPISBAIJ*)A->data;
1233   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1234   int            *procs,*nprocs,j,idx,nsends,*work,row;
1235   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1236   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1237   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1238   MPI_Comm       comm = A->comm;
1239   MPI_Request    *send_waits,*recv_waits;
1240   MPI_Status     recv_status,*send_status;
1241   IS             istmp;
1242   PetscTruth     found;
1243 
1244   PetscFunctionBegin;
1245   ierr = ISGetSize(is,&N);CHKERRQ(ierr);
1246   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1247 
1248   /*  first count number of contributors to each processor */
1249   ierr  = PetscMalloc(2*size*sizeof(int),&nprocs);CHKERRQ(ierr);
1250   ierr  = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1251   procs = nprocs + size;
1252   ierr  = PetscMalloc((N+1)*sizeof(int),&owner);CHKERRQ(ierr); /* see note*/
1253   for (i=0; i<N; i++) {
1254     idx   = rows[i];
1255     found = PETSC_FALSE;
1256     for (j=0; j<size; j++) {
1257       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1258         nprocs[j]++; procs[j] = 1; owner[i] = j; found = PETSC_TRUE; break;
1259       }
1260     }
1261     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1262   }
1263   nsends = 0;  for (i=0; i<size; i++) { nsends += procs[i];}
1264 
1265   /* inform other processors of number of messages and max length*/
1266   ierr   = PetscMalloc(2*size*sizeof(int),&work);CHKERRQ(ierr);
1267   ierr   = MPI_Allreduce(nprocs,work,2*size,MPI_INT,PetscMaxSum_Op,comm);CHKERRQ(ierr);
1268   nmax   = work[rank];
1269   nrecvs = work[size+rank];
1270   ierr   = PetscFree(work);CHKERRQ(ierr);
1271 
1272   /* post receives:   */
1273   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&rvalues);CHKERRQ(ierr);
1274   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
1275   for (i=0; i<nrecvs; i++) {
1276     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1277   }
1278 
1279   /* do sends:
1280      1) starts[i] gives the starting index in svalues for stuff going to
1281      the ith processor
1282   */
1283   ierr = PetscMalloc((N+1)*sizeof(int),&svalues);CHKERRQ(ierr);
1284   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
1285   ierr = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
1286   starts[0]  = 0;
1287   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1288   for (i=0; i<N; i++) {
1289     svalues[starts[owner[i]]++] = rows[i];
1290   }
1291   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1292 
1293   starts[0] = 0;
1294   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[i-1];}
1295   count = 0;
1296   for (i=0; i<size; i++) {
1297     if (procs[i]) {
1298       ierr = MPI_Isend(svalues+starts[i],nprocs[i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1299     }
1300   }
1301   ierr = PetscFree(starts);CHKERRQ(ierr);
1302 
1303   base = owners[rank]*bs;
1304 
1305   /*  wait on receives */
1306   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(int),&lens);CHKERRQ(ierr);
1307   source = lens + nrecvs;
1308   count  = nrecvs; slen = 0;
1309   while (count) {
1310     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1311     /* unpack receives into our local space */
1312     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1313     source[imdex]  = recv_status.MPI_SOURCE;
1314     lens[imdex]    = n;
1315     slen          += n;
1316     count--;
1317   }
1318   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1319 
1320   /* move the data into the send scatter */
1321   ierr = PetscMalloc((slen+1)*sizeof(int),&lrows);CHKERRQ(ierr);
1322   count = 0;
1323   for (i=0; i<nrecvs; i++) {
1324     values = rvalues + i*nmax;
1325     for (j=0; j<lens[i]; j++) {
1326       lrows[count++] = values[j] - base;
1327     }
1328   }
1329   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1330   ierr = PetscFree(lens);CHKERRQ(ierr);
1331   ierr = PetscFree(owner);CHKERRQ(ierr);
1332   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1333 
1334   /* actually zap the local rows */
1335   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1336   PetscLogObjectParent(A,istmp);
1337 
1338   /*
1339         Zero the required rows. If the "diagonal block" of the matrix
1340      is square and the user wishes to set the diagonal we use seperate
1341      code so that MatSetValues() is not called for each diagonal allocating
1342      new memory, thus calling lots of mallocs and slowing things down.
1343 
1344        Contributed by: Mathew Knepley
1345   */
1346   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1347   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1348   if (diag && (l->A->M == l->A->N)) {
1349     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1350   } else if (diag) {
1351     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1352     if (((Mat_SeqSBAIJ*)l->A->data)->nonew) {
1353       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1354 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1355     }
1356     for (i=0; i<slen; i++) {
1357       row  = lrows[i] + rstart_bs;
1358       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1359     }
1360     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1361     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1362   } else {
1363     ierr = MatZeroRows_SeqSBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1364   }
1365 
1366   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1367   ierr = PetscFree(lrows);CHKERRQ(ierr);
1368 
1369   /* wait on sends */
1370   if (nsends) {
1371     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
1372     ierr        = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1373     ierr        = PetscFree(send_status);CHKERRQ(ierr);
1374   }
1375   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1376   ierr = PetscFree(svalues);CHKERRQ(ierr);
1377 
1378   PetscFunctionReturn(0);
1379 }
1380 
1381 #undef __FUNCT__
1382 #define __FUNCT__ "MatPrintHelp_MPISBAIJ"
1383 int MatPrintHelp_MPISBAIJ(Mat A)
1384 {
1385   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1386   MPI_Comm    comm = A->comm;
1387   static int  called = 0;
1388   int         ierr;
1389 
1390   PetscFunctionBegin;
1391   if (!a->rank) {
1392     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1393   }
1394   if (called) {PetscFunctionReturn(0);} else called = 1;
1395   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1396   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1397   PetscFunctionReturn(0);
1398 }
1399 
1400 #undef __FUNCT__
1401 #define __FUNCT__ "MatSetUnfactored_MPISBAIJ"
1402 int MatSetUnfactored_MPISBAIJ(Mat A)
1403 {
1404   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1405   int         ierr;
1406 
1407   PetscFunctionBegin;
1408   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1409   PetscFunctionReturn(0);
1410 }
1411 
1412 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1413 
1414 #undef __FUNCT__
1415 #define __FUNCT__ "MatEqual_MPISBAIJ"
1416 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1417 {
1418   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1419   Mat         a,b,c,d;
1420   PetscTruth  flg;
1421   int         ierr;
1422 
1423   PetscFunctionBegin;
1424   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg);CHKERRQ(ierr);
1425   if (!flg) SETERRQ(PETSC_ERR_ARG_INCOMP,"Matrices must be same type");
1426   a = matA->A; b = matA->B;
1427   c = matB->A; d = matB->B;
1428 
1429   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1430   if (flg == PETSC_TRUE) {
1431     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1432   }
1433   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1434   PetscFunctionReturn(0);
1435 }
1436 
1437 #undef __FUNCT__
1438 #define __FUNCT__ "MatSetUpPreallocation_MPISBAIJ"
1439 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1440 {
1441   int        ierr;
1442 
1443   PetscFunctionBegin;
1444   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1445   PetscFunctionReturn(0);
1446 }
1447 /* -------------------------------------------------------------------*/
1448 static struct _MatOps MatOps_Values = {
1449   MatSetValues_MPISBAIJ,
1450   MatGetRow_MPISBAIJ,
1451   MatRestoreRow_MPISBAIJ,
1452   MatMult_MPISBAIJ,
1453   MatMultAdd_MPISBAIJ,
1454   MatMultTranspose_MPISBAIJ,
1455   MatMultTransposeAdd_MPISBAIJ,
1456   0,
1457   0,
1458   0,
1459   0,
1460   0,
1461   0,
1462   MatRelax_MPISBAIJ,
1463   MatTranspose_MPISBAIJ,
1464   MatGetInfo_MPISBAIJ,
1465   MatEqual_MPISBAIJ,
1466   MatGetDiagonal_MPISBAIJ,
1467   MatDiagonalScale_MPISBAIJ,
1468   MatNorm_MPISBAIJ,
1469   MatAssemblyBegin_MPISBAIJ,
1470   MatAssemblyEnd_MPISBAIJ,
1471   0,
1472   MatSetOption_MPISBAIJ,
1473   MatZeroEntries_MPISBAIJ,
1474   MatZeroRows_MPISBAIJ,
1475   0,
1476   0,
1477   0,
1478   0,
1479   MatSetUpPreallocation_MPISBAIJ,
1480   0,
1481   0,
1482   0,
1483   0,
1484   MatDuplicate_MPISBAIJ,
1485   0,
1486   0,
1487   0,
1488   0,
1489   0,
1490   MatGetSubMatrices_MPISBAIJ,
1491   MatIncreaseOverlap_MPISBAIJ,
1492   MatGetValues_MPISBAIJ,
1493   0,
1494   MatPrintHelp_MPISBAIJ,
1495   MatScale_MPISBAIJ,
1496   0,
1497   0,
1498   0,
1499   MatGetBlockSize_MPISBAIJ,
1500   0,
1501   0,
1502   0,
1503   0,
1504   0,
1505   0,
1506   MatSetUnfactored_MPISBAIJ,
1507   0,
1508   MatSetValuesBlocked_MPISBAIJ,
1509   0,
1510   0,
1511   0,
1512   MatGetPetscMaps_Petsc,
1513   0,
1514   0,
1515   0,
1516   0,
1517   0,
1518   0,
1519   MatGetRowMax_MPISBAIJ};
1520 
1521 
1522 EXTERN_C_BEGIN
1523 #undef __FUNCT__
1524 #define __FUNCT__ "MatGetDiagonalBlock_MPISBAIJ"
1525 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1526 {
1527   PetscFunctionBegin;
1528   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1529   *iscopy = PETSC_FALSE;
1530   PetscFunctionReturn(0);
1531 }
1532 EXTERN_C_END
1533 
1534 EXTERN_C_BEGIN
1535 #undef __FUNCT__
1536 #define __FUNCT__ "MatCreate_MPISBAIJ"
1537 int MatCreate_MPISBAIJ(Mat B)
1538 {
1539   Mat_MPISBAIJ *b;
1540   int          ierr;
1541   PetscTruth   flg;
1542 
1543   PetscFunctionBegin;
1544 
1545   ierr    = PetscNew(Mat_MPISBAIJ,&b);CHKERRQ(ierr);
1546   B->data = (void*)b;
1547   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1548   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1549 
1550   B->ops->destroy    = MatDestroy_MPISBAIJ;
1551   B->ops->view       = MatView_MPISBAIJ;
1552   B->mapping    = 0;
1553   B->factor     = 0;
1554   B->assembled  = PETSC_FALSE;
1555 
1556   B->insertmode = NOT_SET_VALUES;
1557   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1558   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1559 
1560   /* build local table of row and column ownerships */
1561   ierr          = PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);CHKERRQ(ierr);
1562   b->cowners    = b->rowners + b->size + 2;
1563   b->rowners_bs = b->cowners + b->size + 2;
1564   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1565 
1566   /* build cache for off array entries formed */
1567   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1568   b->donotstash  = PETSC_FALSE;
1569   b->colmap      = PETSC_NULL;
1570   b->garray      = PETSC_NULL;
1571   b->roworiented = PETSC_TRUE;
1572 
1573 #if defined(PETSC_USE_MAT_SINGLE)
1574   /* stuff for MatSetValues_XXX in single precision */
1575   b->setvalueslen     = 0;
1576   b->setvaluescopy    = PETSC_NULL;
1577 #endif
1578 
1579   /* stuff used in block assembly */
1580   b->barray       = 0;
1581 
1582   /* stuff used for matrix vector multiply */
1583   b->lvec         = 0;
1584   b->Mvctx        = 0;
1585 
1586   /* stuff for MatGetRow() */
1587   b->rowindices   = 0;
1588   b->rowvalues    = 0;
1589   b->getrowactive = PETSC_FALSE;
1590 
1591   /* hash table stuff */
1592   b->ht           = 0;
1593   b->hd           = 0;
1594   b->ht_size      = 0;
1595   b->ht_flag      = PETSC_FALSE;
1596   b->ht_fact      = 0;
1597   b->ht_total_ct  = 0;
1598   b->ht_insert_ct = 0;
1599 
1600   ierr = PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1601   if (flg) {
1602     PetscReal fact = 1.39;
1603     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1604     ierr = PetscOptionsGetReal(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1605     if (fact <= 1.0) fact = 1.39;
1606     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1607     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1608   }
1609   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1610                                      "MatStoreValues_MPISBAIJ",
1611                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1612   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1613                                      "MatRetrieveValues_MPISBAIJ",
1614                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1615   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1616                                      "MatGetDiagonalBlock_MPISBAIJ",
1617                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1618   PetscFunctionReturn(0);
1619 }
1620 EXTERN_C_END
1621 
1622 #undef __FUNCT__
1623 #define __FUNCT__ "MatMPISBAIJSetPreallocation"
1624 /*@C
1625    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1626    the user should preallocate the matrix storage by setting the parameters
1627    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1628    performance can be increased by more than a factor of 50.
1629 
1630    Collective on Mat
1631 
1632    Input Parameters:
1633 +  A - the matrix
1634 .  bs   - size of blockk
1635 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1636            submatrix  (same for all local rows)
1637 .  d_nnz - array containing the number of block nonzeros in the various block rows
1638            of the in diagonal portion of the local (possibly different for each block
1639            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1640 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1641            submatrix (same for all local rows).
1642 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1643            off-diagonal portion of the local submatrix (possibly different for
1644            each block row) or PETSC_NULL.
1645 
1646 
1647    Options Database Keys:
1648 .   -mat_no_unroll - uses code that does not unroll the loops in the
1649                      block calculations (much slower)
1650 .   -mat_block_size - size of the blocks to use
1651 
1652    Notes:
1653 
1654    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1655    than it must be used on all processors that share the object for that argument.
1656 
1657    Storage Information:
1658    For a square global matrix we define each processor's diagonal portion
1659    to be its local rows and the corresponding columns (a square submatrix);
1660    each processor's off-diagonal portion encompasses the remainder of the
1661    local matrix (a rectangular submatrix).
1662 
1663    The user can specify preallocated storage for the diagonal part of
1664    the local submatrix with either d_nz or d_nnz (not both).  Set
1665    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1666    memory allocation.  Likewise, specify preallocated storage for the
1667    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1668 
1669    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1670    the figure below we depict these three local rows and all columns (0-11).
1671 
1672 .vb
1673            0 1 2 3 4 5 6 7 8 9 10 11
1674           -------------------
1675    row 3  |  o o o d d d o o o o o o
1676    row 4  |  o o o d d d o o o o o o
1677    row 5  |  o o o d d d o o o o o o
1678           -------------------
1679 .ve
1680 
1681    Thus, any entries in the d locations are stored in the d (diagonal)
1682    submatrix, and any entries in the o locations are stored in the
1683    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1684    stored simply in the MATSEQBAIJ format for compressed row storage.
1685 
1686    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1687    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1688    In general, for PDE problems in which most nonzeros are near the diagonal,
1689    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1690    or you will get TERRIBLE performance; see the users' manual chapter on
1691    matrices.
1692 
1693    Level: intermediate
1694 
1695 .keywords: matrix, block, aij, compressed row, sparse, parallel
1696 
1697 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1698 @*/
1699 
1700 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1701 {
1702   Mat_MPISBAIJ *b;
1703   int          ierr,i,mbs,Mbs;
1704   PetscTruth   flg2;
1705 
1706   PetscFunctionBegin;
1707   ierr = PetscTypeCompare((PetscObject)B,MATMPISBAIJ,&flg2);CHKERRQ(ierr);
1708   if (!flg2) PetscFunctionReturn(0);
1709 
1710   ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1711 
1712   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1713   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1714   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1715   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %d",d_nz);
1716   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %d",o_nz);
1717   if (d_nnz) {
1718     for (i=0; i<B->m/bs; i++) {
1719       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1720     }
1721   }
1722   if (o_nnz) {
1723     for (i=0; i<B->m/bs; i++) {
1724       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1725     }
1726   }
1727   B->preallocated = PETSC_TRUE;
1728   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1729   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1730   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1731   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1732 
1733   b   = (Mat_MPISBAIJ*)B->data;
1734   mbs = B->m/bs;
1735   Mbs = B->M/bs;
1736   if (mbs*bs != B->m) {
1737     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1738   }
1739 
1740   b->bs  = bs;
1741   b->bs2 = bs*bs;
1742   b->mbs = mbs;
1743   b->nbs = mbs;
1744   b->Mbs = Mbs;
1745   b->Nbs = Mbs;
1746 
1747   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1748   b->rowners[0]    = 0;
1749   for (i=2; i<=b->size; i++) {
1750     b->rowners[i] += b->rowners[i-1];
1751   }
1752   b->rstart    = b->rowners[b->rank];
1753   b->rend      = b->rowners[b->rank+1];
1754   b->cstart    = b->rstart;
1755   b->cend      = b->rend;
1756   for (i=0; i<=b->size; i++) {
1757     b->rowners_bs[i] = b->rowners[i]*bs;
1758   }
1759   b->rstart_bs = b-> rstart*bs;
1760   b->rend_bs   = b->rend*bs;
1761 
1762   b->cstart_bs = b->cstart*bs;
1763   b->cend_bs   = b->cend*bs;
1764 
1765 
1766   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1767   PetscLogObjectParent(B,b->A);
1768   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1769   PetscLogObjectParent(B,b->B);
1770 
1771   /* build cache for off array entries formed */
1772   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1773 
1774   PetscFunctionReturn(0);
1775 }
1776 
1777 #undef __FUNCT__
1778 #define __FUNCT__ "MatCreateMPISBAIJ"
1779 /*@C
1780    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1781    (block compressed row).  For good matrix assembly performance
1782    the user should preallocate the matrix storage by setting the parameters
1783    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1784    performance can be increased by more than a factor of 50.
1785 
1786    Collective on MPI_Comm
1787 
1788    Input Parameters:
1789 +  comm - MPI communicator
1790 .  bs   - size of blockk
1791 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1792            This value should be the same as the local size used in creating the
1793            y vector for the matrix-vector product y = Ax.
1794 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1795            This value should be the same as the local size used in creating the
1796            x vector for the matrix-vector product y = Ax.
1797 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1798 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1799 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1800            submatrix  (same for all local rows)
1801 .  d_nnz - array containing the number of block nonzeros in the various block rows
1802            of the in diagonal portion of the local (possibly different for each block
1803            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
1804 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1805            submatrix (same for all local rows).
1806 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1807            off-diagonal portion of the local submatrix (possibly different for
1808            each block row) or PETSC_NULL.
1809 
1810    Output Parameter:
1811 .  A - the matrix
1812 
1813    Options Database Keys:
1814 .   -mat_no_unroll - uses code that does not unroll the loops in the
1815                      block calculations (much slower)
1816 .   -mat_block_size - size of the blocks to use
1817 .   -mat_mpi - use the parallel matrix data structures even on one processor
1818                (defaults to using SeqBAIJ format on one processor)
1819 
1820    Notes:
1821    The user MUST specify either the local or global matrix dimensions
1822    (possibly both).
1823 
1824    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1825    than it must be used on all processors that share the object for that argument.
1826 
1827    Storage Information:
1828    For a square global matrix we define each processor's diagonal portion
1829    to be its local rows and the corresponding columns (a square submatrix);
1830    each processor's off-diagonal portion encompasses the remainder of the
1831    local matrix (a rectangular submatrix).
1832 
1833    The user can specify preallocated storage for the diagonal part of
1834    the local submatrix with either d_nz or d_nnz (not both).  Set
1835    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1836    memory allocation.  Likewise, specify preallocated storage for the
1837    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1838 
1839    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1840    the figure below we depict these three local rows and all columns (0-11).
1841 
1842 .vb
1843            0 1 2 3 4 5 6 7 8 9 10 11
1844           -------------------
1845    row 3  |  o o o d d d o o o o o o
1846    row 4  |  o o o d d d o o o o o o
1847    row 5  |  o o o d d d o o o o o o
1848           -------------------
1849 .ve
1850 
1851    Thus, any entries in the d locations are stored in the d (diagonal)
1852    submatrix, and any entries in the o locations are stored in the
1853    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
1854    stored simply in the MATSEQBAIJ format for compressed row storage.
1855 
1856    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
1857    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1858    In general, for PDE problems in which most nonzeros are near the diagonal,
1859    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1860    or you will get TERRIBLE performance; see the users' manual chapter on
1861    matrices.
1862 
1863    Level: intermediate
1864 
1865 .keywords: matrix, block, aij, compressed row, sparse, parallel
1866 
1867 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1868 @*/
1869 
1870 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,int *d_nnz,int o_nz,int *o_nnz,Mat *A)
1871 {
1872   int ierr,size;
1873 
1874   PetscFunctionBegin;
1875   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1876   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1877   if (size > 1) {
1878     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1879     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1880   } else {
1881     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1882     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1883   }
1884   PetscFunctionReturn(0);
1885 }
1886 
1887 
1888 #undef __FUNCT__
1889 #define __FUNCT__ "MatDuplicate_MPISBAIJ"
1890 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1891 {
1892   Mat          mat;
1893   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1894   int          ierr,len=0;
1895 
1896   PetscFunctionBegin;
1897   *newmat       = 0;
1898   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1899   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1900   mat->preallocated = PETSC_TRUE;
1901   a = (Mat_MPISBAIJ*)mat->data;
1902   a->bs  = oldmat->bs;
1903   a->bs2 = oldmat->bs2;
1904   a->mbs = oldmat->mbs;
1905   a->nbs = oldmat->nbs;
1906   a->Mbs = oldmat->Mbs;
1907   a->Nbs = oldmat->Nbs;
1908 
1909   a->rstart       = oldmat->rstart;
1910   a->rend         = oldmat->rend;
1911   a->cstart       = oldmat->cstart;
1912   a->cend         = oldmat->cend;
1913   a->size         = oldmat->size;
1914   a->rank         = oldmat->rank;
1915   a->donotstash   = oldmat->donotstash;
1916   a->roworiented  = oldmat->roworiented;
1917   a->rowindices   = 0;
1918   a->rowvalues    = 0;
1919   a->getrowactive = PETSC_FALSE;
1920   a->barray       = 0;
1921   a->rstart_bs    = oldmat->rstart_bs;
1922   a->rend_bs      = oldmat->rend_bs;
1923   a->cstart_bs    = oldmat->cstart_bs;
1924   a->cend_bs      = oldmat->cend_bs;
1925 
1926   /* hash table stuff */
1927   a->ht           = 0;
1928   a->hd           = 0;
1929   a->ht_size      = 0;
1930   a->ht_flag      = oldmat->ht_flag;
1931   a->ht_fact      = oldmat->ht_fact;
1932   a->ht_total_ct  = 0;
1933   a->ht_insert_ct = 0;
1934 
1935   ierr = PetscMalloc(3*(a->size+2)*sizeof(int),&a->rowners);CHKERRQ(ierr);
1936   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1937   a->cowners    = a->rowners + a->size + 2;
1938   a->rowners_bs = a->cowners + a->size + 2;
1939   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1940   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1941   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1942   if (oldmat->colmap) {
1943 #if defined (PETSC_USE_CTABLE)
1944     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1945 #else
1946     ierr = PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);CHKERRQ(ierr);
1947     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
1948     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
1949 #endif
1950   } else a->colmap = 0;
1951   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
1952     ierr = PetscMalloc(len*sizeof(int),&a->garray);CHKERRQ(ierr);
1953     PetscLogObjectMemory(mat,len*sizeof(int));
1954     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
1955   } else a->garray = 0;
1956 
1957   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
1958   PetscLogObjectParent(mat,a->lvec);
1959   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
1960 
1961   PetscLogObjectParent(mat,a->Mvctx);
1962   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
1963   PetscLogObjectParent(mat,a->A);
1964   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
1965   PetscLogObjectParent(mat,a->B);
1966   ierr = PetscFListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
1967   *newmat = mat;
1968   PetscFunctionReturn(0);
1969 }
1970 
1971 #include "petscsys.h"
1972 
1973 EXTERN_C_BEGIN
1974 #undef __FUNCT__
1975 #define __FUNCT__ "MatLoad_MPISBAIJ"
1976 int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
1977 {
1978   Mat          A;
1979   int          i,nz,ierr,j,rstart,rend,fd;
1980   PetscScalar  *vals,*buf;
1981   MPI_Comm     comm = ((PetscObject)viewer)->comm;
1982   MPI_Status   status;
1983   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
1984   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
1985   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
1986   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
1987   int          dcount,kmax,k,nzcount,tmp;
1988 
1989   PetscFunctionBegin;
1990   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1991 
1992   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1993   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1994   if (!rank) {
1995     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
1996     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
1997     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
1998     if (header[3] < 0) {
1999       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2000     }
2001   }
2002 
2003   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2004   M = header[1]; N = header[2];
2005 
2006   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2007 
2008   /*
2009      This code adds extra rows to make sure the number of rows is
2010      divisible by the blocksize
2011   */
2012   Mbs        = M/bs;
2013   extra_rows = bs - M + bs*(Mbs);
2014   if (extra_rows == bs) extra_rows = 0;
2015   else                  Mbs++;
2016   if (extra_rows &&!rank) {
2017     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2018   }
2019 
2020   /* determine ownership of all rows */
2021   mbs        = Mbs/size + ((Mbs % size) > rank);
2022   m          = mbs*bs;
2023   ierr       = PetscMalloc(2*(size+2)*sizeof(int),&rowners);CHKERRQ(ierr);
2024   browners   = rowners + size + 1;
2025   ierr       = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2026   rowners[0] = 0;
2027   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2028   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2029   rstart = rowners[rank];
2030   rend   = rowners[rank+1];
2031 
2032   /* distribute row lengths to all processors */
2033   ierr = PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);CHKERRQ(ierr);
2034   if (!rank) {
2035     ierr = PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);CHKERRQ(ierr);
2036     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2037     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2038     ierr = PetscMalloc(size*sizeof(int),&sndcounts);CHKERRQ(ierr);
2039     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2040     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2041     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2042   } else {
2043     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2044   }
2045 
2046   if (!rank) {   /* procs[0] */
2047     /* calculate the number of nonzeros on each processor */
2048     ierr = PetscMalloc(size*sizeof(int),&procsnz);CHKERRQ(ierr);
2049     ierr = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2050     for (i=0; i<size; i++) {
2051       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2052         procsnz[i] += rowlengths[j];
2053       }
2054     }
2055     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2056 
2057     /* determine max buffer needed and allocate it */
2058     maxnz = 0;
2059     for (i=0; i<size; i++) {
2060       maxnz = PetscMax(maxnz,procsnz[i]);
2061     }
2062     ierr = PetscMalloc(maxnz*sizeof(int),&cols);CHKERRQ(ierr);
2063 
2064     /* read in my part of the matrix column indices  */
2065     nz     = procsnz[0];
2066     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2067     mycols = ibuf;
2068     if (size == 1)  nz -= extra_rows;
2069     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2070     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2071 
2072     /* read in every ones (except the last) and ship off */
2073     for (i=1; i<size-1; i++) {
2074       nz   = procsnz[i];
2075       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2076       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2077     }
2078     /* read in the stuff for the last proc */
2079     if (size != 1) {
2080       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2081       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2082       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2083       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2084     }
2085     ierr = PetscFree(cols);CHKERRQ(ierr);
2086   } else {  /* procs[i], i>0 */
2087     /* determine buffer space needed for message */
2088     nz = 0;
2089     for (i=0; i<m; i++) {
2090       nz += locrowlens[i];
2091     }
2092     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2093     mycols = ibuf;
2094     /* receive message of column indices*/
2095     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2096     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2097     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2098   }
2099 
2100   /* loop over local rows, determining number of off diagonal entries */
2101   ierr     = PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);CHKERRQ(ierr);
2102   odlens   = dlens + (rend-rstart);
2103   ierr     = PetscMalloc(3*Mbs*sizeof(int),&mask);CHKERRQ(ierr);
2104   ierr     = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2105   masked1  = mask    + Mbs;
2106   masked2  = masked1 + Mbs;
2107   rowcount = 0; nzcount = 0;
2108   for (i=0; i<mbs; i++) {
2109     dcount  = 0;
2110     odcount = 0;
2111     for (j=0; j<bs; j++) {
2112       kmax = locrowlens[rowcount];
2113       for (k=0; k<kmax; k++) {
2114         tmp = mycols[nzcount++]/bs; /* block col. index */
2115         if (!mask[tmp]) {
2116           mask[tmp] = 1;
2117           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2118           else masked1[dcount++] = tmp; /* entry in diag portion */
2119         }
2120       }
2121       rowcount++;
2122     }
2123 
2124     dlens[i]  = dcount;  /* d_nzz[i] */
2125     odlens[i] = odcount; /* o_nzz[i] */
2126 
2127     /* zero out the mask elements we set */
2128     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2129     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2130   }
2131 
2132   /* create our matrix */
2133   ierr = MatCreateMPISBAIJ(comm,bs,m,m,PETSC_DETERMINE,PETSC_DETERMINE,0,dlens,0,odlens,newmat);
2134   CHKERRQ(ierr);
2135   A = *newmat;
2136   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2137 
2138   if (!rank) {
2139     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2140     /* read in my part of the matrix numerical values  */
2141     nz = procsnz[0];
2142     vals = buf;
2143     mycols = ibuf;
2144     if (size == 1)  nz -= extra_rows;
2145     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2146     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2147 
2148     /* insert into matrix */
2149     jj      = rstart*bs;
2150     for (i=0; i<m; i++) {
2151       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2152       mycols += locrowlens[i];
2153       vals   += locrowlens[i];
2154       jj++;
2155     }
2156 
2157     /* read in other processors (except the last one) and ship out */
2158     for (i=1; i<size-1; i++) {
2159       nz   = procsnz[i];
2160       vals = buf;
2161       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2162       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2163     }
2164     /* the last proc */
2165     if (size != 1){
2166       nz   = procsnz[i] - extra_rows;
2167       vals = buf;
2168       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2169       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2170       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2171     }
2172     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2173 
2174   } else {
2175     /* receive numeric values */
2176     ierr = PetscMalloc(nz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2177 
2178     /* receive message of values*/
2179     vals   = buf;
2180     mycols = ibuf;
2181     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2182     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2183     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2184 
2185     /* insert into matrix */
2186     jj      = rstart*bs;
2187     for (i=0; i<m; i++) {
2188       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2189       mycols += locrowlens[i];
2190       vals   += locrowlens[i];
2191       jj++;
2192     }
2193   }
2194 
2195   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2196   ierr = PetscFree(buf);CHKERRQ(ierr);
2197   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2198   ierr = PetscFree(rowners);CHKERRQ(ierr);
2199   ierr = PetscFree(dlens);CHKERRQ(ierr);
2200   ierr = PetscFree(mask);CHKERRQ(ierr);
2201   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2202   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2203   PetscFunctionReturn(0);
2204 }
2205 EXTERN_C_END
2206 
2207 #undef __FUNCT__
2208 #define __FUNCT__ "MatMPISBAIJSetHashTableFactor"
2209 /*@
2210    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2211 
2212    Input Parameters:
2213 .  mat  - the matrix
2214 .  fact - factor
2215 
2216    Collective on Mat
2217 
2218    Level: advanced
2219 
2220   Notes:
2221    This can also be set by the command line option: -mat_use_hash_table fact
2222 
2223 .keywords: matrix, hashtable, factor, HT
2224 
2225 .seealso: MatSetOption()
2226 @*/
2227 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2228 {
2229   PetscFunctionBegin;
2230   SETERRQ(1,"Function not yet written for SBAIJ format");
2231   /* PetscFunctionReturn(0); */
2232 }
2233 
2234 #undef __FUNCT__
2235 #define __FUNCT__ "MatGetRowMax_MPISBAIJ"
2236 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2237 {
2238   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2239   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2240   PetscReal    atmp;
2241   PetscReal    *work,*svalues,*rvalues;
2242   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2243   int          rank,size,*rowners_bs,dest,count,source;
2244   PetscScalar  *va;
2245   MatScalar    *ba;
2246   MPI_Status   stat;
2247 
2248   PetscFunctionBegin;
2249   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2250   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2251 
2252   ierr = MPI_Comm_size(PETSC_COMM_WORLD,&size);CHKERRQ(ierr);
2253   ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr);
2254 
2255   bs   = a->bs;
2256   mbs  = a->mbs;
2257   Mbs  = a->Mbs;
2258   ba   = b->a;
2259   bi   = b->i;
2260   bj   = b->j;
2261   /*
2262   PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2263   PetscSynchronizedFlush(PETSC_COMM_WORLD);
2264   */
2265 
2266   /* find ownerships */
2267   rowners_bs = a->rowners_bs;
2268   /*
2269   if (!rank){
2270     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2271   }
2272   */
2273 
2274   /* each proc creates an array to be distributed */
2275   ierr = PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);CHKERRQ(ierr);
2276   ierr = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2277 
2278   /* row_max for B */
2279   if (rank != size-1){
2280     for (i=0; i<mbs; i++) {
2281       ncols = bi[1] - bi[0]; bi++;
2282       brow  = bs*i;
2283       for (j=0; j<ncols; j++){
2284         bcol = bs*(*bj);
2285         for (kcol=0; kcol<bs; kcol++){
2286           col = bcol + kcol;                 /* local col index */
2287           col += rowners_bs[rank+1];      /* global col index */
2288           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2289           for (krow=0; krow<bs; krow++){
2290             atmp = PetscAbsScalar(*ba); ba++;
2291             row = brow + krow;    /* local row index */
2292             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2293             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2294             if (work[col] < atmp) work[col] = atmp;
2295           }
2296         }
2297         bj++;
2298       }
2299     }
2300     /*
2301       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2302       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2303       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2304       */
2305 
2306     /* send values to its owners */
2307     for (dest=rank+1; dest<size; dest++){
2308       svalues = work + rowners_bs[dest];
2309       count   = rowners_bs[dest+1]-rowners_bs[dest];
2310       ierr    = MPI_Send(svalues,count,MPIU_REAL,dest,rank,PETSC_COMM_WORLD);CHKERRQ(ierr);
2311       /*
2312       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2313       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2314       */
2315     }
2316   }
2317 
2318   /* receive values */
2319   if (rank){
2320     rvalues = work;
2321     count   = rowners_bs[rank+1]-rowners_bs[rank];
2322     for (source=0; source<rank; source++){
2323       ierr = MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,PETSC_COMM_WORLD,&stat);CHKERRQ(ierr);
2324       /* process values */
2325       for (i=0; i<count; i++){
2326         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2327       }
2328       /*
2329       PetscSynchronizedPrintf(PETSC_COMM_WORLD,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2330       PetscSynchronizedFlush(PETSC_COMM_WORLD);
2331       */
2332     }
2333   }
2334 
2335   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2336   ierr = PetscFree(work);CHKERRQ(ierr);
2337   PetscFunctionReturn(0);
2338 }
2339 
2340 #undef __FUNCT__
2341 #define __FUNCT__ "MatRelax_MPISBAIJ"
2342 int MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,int its,int lits,Vec xx)
2343 {
2344   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2345   int            ierr;
2346   PetscScalar    mone=-1.0;
2347   Vec            lvec1,bb1;
2348 
2349   PetscFunctionBegin;
2350   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %d and local its %d both positive",its,lits);
2351   if (mat->bs > 1)
2352     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2353 
2354   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2355     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2356       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
2357       its--;
2358     }
2359 
2360     ierr = VecDuplicate(mat->lvec,&lvec1);CHKERRQ(ierr);
2361     ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
2362     while (its--){
2363       ierr = VecScatterBegin(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);CHKERRQ(ierr);
2364 
2365       /* lower diagonal part: bb1 = bb - B^T*xx */
2366       ierr = (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);CHKERRQ(ierr);
2367       ierr = VecScale(&mone,lvec1);CHKERRQ(ierr);
2368 
2369       ierr = VecScatterEnd(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);CHKERRQ(ierr);
2370       ierr = VecCopy(bb,bb1);CHKERRQ(ierr);
2371       ierr = VecScatterBegin(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);CHKERRQ(ierr);
2372 
2373       /* upper diagonal part: bb1 = bb1 - B*x */
2374       ierr = VecScale(&mone,mat->lvec);CHKERRQ(ierr);
2375       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);CHKERRQ(ierr);
2376 
2377       ierr = VecScatterEnd(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);CHKERRQ(ierr);
2378 
2379       /* diagonal sweep */
2380       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,PETSC_NULL,xx);CHKERRQ(ierr);
2381     }
2382     ierr = VecDestroy(lvec1);CHKERRQ(ierr);
2383     ierr = VecDestroy(bb1);CHKERRQ(ierr);
2384   } else {
2385     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2386   }
2387   PetscFunctionReturn(0);
2388 }
2389 
2390