xref: /petsc/src/mat/impls/sbaij/mpi/mpisbaij.c (revision d34347eca8ac9657cccc548b3ba1c8201ddea9cf)
1 /*$Id: mpisbaij.c,v 1.61 2001/08/10 03:31:37 bsmith Exp $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"    /*I "petscmat.h" I*/
4 #include "src/vec/vecimpl.h"
5 #include "mpisbaij.h"
6 #include "src/mat/impls/sbaij/seq/sbaij.h"
7 
8 extern int MatSetUpMultiply_MPISBAIJ(Mat);
9 extern int MatSetUpMultiply_MPISBAIJ_2comm(Mat);
10 extern int DisAssemble_MPISBAIJ(Mat);
11 extern int MatGetValues_SeqSBAIJ(Mat,int,const int[],int,const int[],PetscScalar []);
12 extern int MatSetValues_SeqSBAIJ(Mat,int,const int [],int,const int [],const PetscScalar [],InsertMode);
13 extern int MatSetValuesBlocked_SeqSBAIJ(Mat,int,const int[],int,const int[],const PetscScalar[],InsertMode);
14 extern int MatGetRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
15 extern int MatRestoreRow_SeqSBAIJ(Mat,int,int*,int**,PetscScalar**);
16 extern int MatPrintHelp_SeqSBAIJ(Mat);
17 extern int MatZeroRows_SeqSBAIJ(Mat,IS,PetscScalar*);
18 extern int MatZeroRows_SeqBAIJ(Mat,IS,PetscScalar *);
19 extern int MatGetRowMax_MPISBAIJ(Mat,Vec);
20 extern int MatRelax_MPISBAIJ(Mat,Vec,PetscReal,MatSORType,PetscReal,int,int,Vec);
21 
22 /*  UGLY, ugly, ugly
23    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
24    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
25    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
26    converts the entries into single precision and then calls ..._MatScalar() to put them
27    into the single precision data structures.
28 */
29 #if defined(PETSC_USE_MAT_SINGLE)
30 extern int MatSetValuesBlocked_SeqSBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
31 extern int MatSetValues_MPISBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
32 extern int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
33 extern int MatSetValues_MPISBAIJ_HT_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
34 extern int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat,int,const int[],int,const int[],const MatScalar[],InsertMode);
35 #else
36 #define MatSetValuesBlocked_SeqSBAIJ_MatScalar      MatSetValuesBlocked_SeqSBAIJ
37 #define MatSetValues_MPISBAIJ_MatScalar             MatSetValues_MPISBAIJ
38 #define MatSetValuesBlocked_MPISBAIJ_MatScalar      MatSetValuesBlocked_MPISBAIJ
39 #define MatSetValues_MPISBAIJ_HT_MatScalar          MatSetValues_MPISBAIJ_HT
40 #define MatSetValuesBlocked_MPISBAIJ_HT_MatScalar   MatSetValuesBlocked_MPISBAIJ_HT
41 #endif
42 
43 EXTERN_C_BEGIN
44 #undef __FUNCT__
45 #define __FUNCT__ "MatStoreValues_MPISBAIJ"
46 int MatStoreValues_MPISBAIJ(Mat mat)
47 {
48   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
49   int          ierr;
50 
51   PetscFunctionBegin;
52   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
53   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
54   PetscFunctionReturn(0);
55 }
56 EXTERN_C_END
57 
58 EXTERN_C_BEGIN
59 #undef __FUNCT__
60 #define __FUNCT__ "MatRetrieveValues_MPISBAIJ"
61 int MatRetrieveValues_MPISBAIJ(Mat mat)
62 {
63   Mat_MPISBAIJ *aij = (Mat_MPISBAIJ *)mat->data;
64   int          ierr;
65 
66   PetscFunctionBegin;
67   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
68   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
69   PetscFunctionReturn(0);
70 }
71 EXTERN_C_END
72 
73 /*
74      Local utility routine that creates a mapping from the global column
75    number to the local number in the off-diagonal part of the local
76    storage of the matrix.  This is done in a non scable way since the
77    length of colmap equals the global matrix length.
78 */
79 #undef __FUNCT__
80 #define __FUNCT__ "CreateColmap_MPISBAIJ_Private"
81 static int CreateColmap_MPISBAIJ_Private(Mat mat)
82 {
83   PetscFunctionBegin;
84   SETERRQ(1,"Function not yet written for SBAIJ format");
85   /* PetscFunctionReturn(0); */
86 }
87 
88 #define CHUNKSIZE  10
89 
90 #define  MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv) \
91 { \
92  \
93     brow = row/bs;  \
94     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
95     rmax = aimax[brow]; nrow = ailen[brow]; \
96       bcol = col/bs; \
97       ridx = row % bs; cidx = col % bs; \
98       low = 0; high = nrow; \
99       while (high-low > 3) { \
100         t = (low+high)/2; \
101         if (rp[t] > bcol) high = t; \
102         else              low  = t; \
103       } \
104       for (_i=low; _i<high; _i++) { \
105         if (rp[_i] > bcol) break; \
106         if (rp[_i] == bcol) { \
107           bap  = ap +  bs2*_i + bs*cidx + ridx; \
108           if (addv == ADD_VALUES) *bap += value;  \
109           else                    *bap  = value;  \
110           goto a_noinsert; \
111         } \
112       } \
113       if (a->nonew == 1) goto a_noinsert; \
114       else if (a->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
115       if (nrow >= rmax) { \
116         /* there is no extra room in row, therefore enlarge */ \
117         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
118         MatScalar *new_a; \
119  \
120         if (a->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
121  \
122         /* malloc new storage space */ \
123         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
124         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
125         new_j = (int*)(new_a + bs2*new_nz); \
126         new_i = new_j + new_nz; \
127  \
128         /* copy over old data into new slots */ \
129         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
130         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
131         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
132         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
133         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
134         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
135         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar));CHKERRQ(ierr); \
136         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
137                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
138         /* free up old matrix storage */ \
139         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
140         if (!a->singlemalloc) { \
141           ierr = PetscFree(a->i);CHKERRQ(ierr); \
142           ierr = PetscFree(a->j);CHKERRQ(ierr);\
143         } \
144         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
145         a->singlemalloc = PETSC_TRUE; \
146  \
147         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
148         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
149         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
150         a->s_maxnz += bs2*CHUNKSIZE; \
151         a->reallocs++; \
152         a->s_nz++; \
153       } \
154       N = nrow++ - 1;  \
155       /* shift up all the later entries in this row */ \
156       for (ii=N; ii>=_i; ii--) { \
157         rp[ii+1] = rp[ii]; \
158         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
159       } \
160       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
161       rp[_i]                      = bcol;  \
162       ap[bs2*_i + bs*cidx + ridx] = value;  \
163       a_noinsert:; \
164     ailen[brow] = nrow; \
165 }
166 #ifndef MatSetValues_SeqBAIJ_B_Private
167 #define  MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv) \
168 { \
169     brow = row/bs;  \
170     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
171     rmax = bimax[brow]; nrow = bilen[brow]; \
172       bcol = col/bs; \
173       ridx = row % bs; cidx = col % bs; \
174       low = 0; high = nrow; \
175       while (high-low > 3) { \
176         t = (low+high)/2; \
177         if (rp[t] > bcol) high = t; \
178         else              low  = t; \
179       } \
180       for (_i=low; _i<high; _i++) { \
181         if (rp[_i] > bcol) break; \
182         if (rp[_i] == bcol) { \
183           bap  = ap +  bs2*_i + bs*cidx + ridx; \
184           if (addv == ADD_VALUES) *bap += value;  \
185           else                    *bap  = value;  \
186           goto b_noinsert; \
187         } \
188       } \
189       if (b->nonew == 1) goto b_noinsert; \
190       else if (b->nonew == -1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero into matrix"); \
191       if (nrow >= rmax) { \
192         /* there is no extra room in row, therefore enlarge */ \
193         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
194         MatScalar *new_a; \
195  \
196         if (b->nonew == -2) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero in the matrix"); \
197  \
198         /* malloc new storage space */ \
199         len   = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
200         ierr  = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
201         new_j = (int*)(new_a + bs2*new_nz); \
202         new_i = new_j + new_nz; \
203  \
204         /* copy over old data into new slots */ \
205         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
206         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
207         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
208         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
209         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
210         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
211         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
212         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
213                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
214         /* free up old matrix storage */ \
215         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
216         if (!b->singlemalloc) { \
217           ierr = PetscFree(b->i);CHKERRQ(ierr); \
218           ierr = PetscFree(b->j);CHKERRQ(ierr); \
219         } \
220         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
221         b->singlemalloc = PETSC_TRUE; \
222  \
223         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
224         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
225         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
226         b->maxnz += bs2*CHUNKSIZE; \
227         b->reallocs++; \
228         b->nz++; \
229       } \
230       N = nrow++ - 1;  \
231       /* shift up all the later entries in this row */ \
232       for (ii=N; ii>=_i; ii--) { \
233         rp[ii+1] = rp[ii]; \
234         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
235       } \
236       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
237       rp[_i]                      = bcol;  \
238       ap[bs2*_i + bs*cidx + ridx] = value;  \
239       b_noinsert:; \
240     bilen[brow] = nrow; \
241 }
242 #endif
243 
244 #if defined(PETSC_USE_MAT_SINGLE)
245 #undef __FUNCT__
246 #define __FUNCT__ "MatSetValues_MPISBAIJ"
247 int MatSetValues_MPISBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
248 {
249   Mat_MPISBAIJ *b = (Mat_MPISBAIJ*)mat->data;
250   int          ierr,i,N = m*n;
251   MatScalar    *vsingle;
252 
253   PetscFunctionBegin;
254   if (N > b->setvalueslen) {
255     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
256     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
257     b->setvalueslen  = N;
258   }
259   vsingle = b->setvaluescopy;
260 
261   for (i=0; i<N; i++) {
262     vsingle[i] = v[i];
263   }
264   ierr = MatSetValues_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
265   PetscFunctionReturn(0);
266 }
267 
268 #undef __FUNCT__
269 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ"
270 int MatSetValuesBlocked_MPISBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
271 {
272   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
273   int         ierr,i,N = m*n*b->bs2;
274   MatScalar   *vsingle;
275 
276   PetscFunctionBegin;
277   if (N > b->setvalueslen) {
278     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
279     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
280     b->setvalueslen  = N;
281   }
282   vsingle = b->setvaluescopy;
283   for (i=0; i<N; i++) {
284     vsingle[i] = v[i];
285   }
286   ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
287   PetscFunctionReturn(0);
288 }
289 
290 #undef __FUNCT__
291 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT"
292 int MatSetValues_MPISBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
293 {
294   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
295   int         ierr,i,N = m*n;
296   MatScalar   *vsingle;
297 
298   PetscFunctionBegin;
299   SETERRQ(1,"Function not yet written for SBAIJ format");
300   /* PetscFunctionReturn(0); */
301 }
302 
303 #undef __FUNCT__
304 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT"
305 int MatSetValuesBlocked_MPISBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
306 {
307   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
308   int         ierr,i,N = m*n*b->bs2;
309   MatScalar   *vsingle;
310 
311   PetscFunctionBegin;
312   SETERRQ(1,"Function not yet written for SBAIJ format");
313   /* PetscFunctionReturn(0); */
314 }
315 #endif
316 
317 /* Only add/insert a(i,j) with i<=j (blocks).
318    Any a(i,j) with i>j input by user is ingored.
319 */
320 #undef __FUNCT__
321 #define __FUNCT__ "MatSetValues_MPIBAIJ_MatScalar"
322 int MatSetValues_MPISBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
323 {
324   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
325   MatScalar    value;
326   PetscTruth   roworiented = baij->roworiented;
327   int          ierr,i,j,row,col;
328   int          rstart_orig=baij->rstart_bs;
329   int          rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
330   int          cend_orig=baij->cend_bs,bs=baij->bs;
331 
332   /* Some Variables required in the macro */
333   Mat          A = baij->A;
334   Mat_SeqSBAIJ *a = (Mat_SeqSBAIJ*)(A)->data;
335   int          *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
336   MatScalar    *aa=a->a;
337 
338   Mat          B = baij->B;
339   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(B)->data;
340   int          *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
341   MatScalar    *ba=b->a;
342 
343   int          *rp,ii,nrow,_i,rmax,N,brow,bcol;
344   int          low,high,t,ridx,cidx,bs2=a->bs2;
345   MatScalar    *ap,*bap;
346 
347   /* for stash */
348   int          n_loc, *in_loc=0;
349   MatScalar    *v_loc=0;
350 
351   PetscFunctionBegin;
352 
353   if(!baij->donotstash){
354     ierr = PetscMalloc(n*sizeof(int),&in_loc);CHKERRQ(ierr);
355     ierr = PetscMalloc(n*sizeof(MatScalar),&v_loc);CHKERRQ(ierr);
356   }
357 
358   for (i=0; i<m; i++) {
359     if (im[i] < 0) continue;
360 #if defined(PETSC_USE_BOPT_g)
361     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],mat->M-1);
362 #endif
363     if (im[i] >= rstart_orig && im[i] < rend_orig) { /* this processor entry */
364       row = im[i] - rstart_orig;              /* local row index */
365       for (j=0; j<n; j++) {
366         if (im[i]/bs > in[j]/bs) continue;    /* ignore lower triangular blocks */
367         if (in[j] >= cstart_orig && in[j] < cend_orig){  /* diag entry (A) */
368           col = in[j] - cstart_orig;          /* local col index */
369           brow = row/bs; bcol = col/bs;
370           if (brow > bcol) continue;  /* ignore lower triangular blocks of A */
371           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
372           MatSetValues_SeqSBAIJ_A_Private(row,col,value,addv);
373           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
374         } else if (in[j] < 0) continue;
375 #if defined(PETSC_USE_BOPT_g)
376         else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",in[j],mat->N-1);}
377 #endif
378         else {  /* off-diag entry (B) */
379           if (mat->was_assembled) {
380             if (!baij->colmap) {
381               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
382             }
383 #if defined (PETSC_USE_CTABLE)
384             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
385             col  = col - 1;
386 #else
387             col = baij->colmap[in[j]/bs] - 1;
388 #endif
389             if (col < 0 && !((Mat_SeqSBAIJ*)(baij->A->data))->nonew) {
390               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
391               col =  in[j];
392               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
393               B = baij->B;
394               b = (Mat_SeqBAIJ*)(B)->data;
395               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
396               ba=b->a;
397             } else col += in[j]%bs;
398           } else col = in[j];
399           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
400           MatSetValues_SeqSBAIJ_B_Private(row,col,value,addv);
401           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
402         }
403       }
404     } else {  /* off processor entry */
405       if (!baij->donotstash) {
406         n_loc = 0;
407         for (j=0; j<n; j++){
408           if (im[i]/bs > in[j]/bs) continue; /* ignore lower triangular blocks */
409           in_loc[n_loc] = in[j];
410           if (roworiented) {
411             v_loc[n_loc] = v[i*n+j];
412           } else {
413             v_loc[n_loc] = v[j*m+i];
414           }
415           n_loc++;
416         }
417         ierr = MatStashValuesRow_Private(&mat->stash,im[i],n_loc,in_loc,v_loc);CHKERRQ(ierr);
418       }
419     }
420   }
421 
422   if(!baij->donotstash){
423     ierr = PetscFree(in_loc);CHKERRQ(ierr);
424     ierr = PetscFree(v_loc);CHKERRQ(ierr);
425   }
426   PetscFunctionReturn(0);
427 }
428 
429 #undef __FUNCT__
430 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_MatScalar"
431 int MatSetValuesBlocked_MPISBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
432 {
433   Mat_MPISBAIJ    *baij = (Mat_MPISBAIJ*)mat->data;
434   const MatScalar *value;
435   MatScalar       *barray=baij->barray;
436   PetscTruth      roworiented = baij->roworiented;
437   int             ierr,i,j,ii,jj,row,col,rstart=baij->rstart;
438   int             rend=baij->rend,cstart=baij->cstart,stepval;
439   int             cend=baij->cend,bs=baij->bs,bs2=baij->bs2;
440 
441   PetscFunctionBegin;
442   if(!barray) {
443     ierr         = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr);
444     baij->barray = barray;
445   }
446 
447   if (roworiented) {
448     stepval = (n-1)*bs;
449   } else {
450     stepval = (m-1)*bs;
451   }
452   for (i=0; i<m; i++) {
453     if (im[i] < 0) continue;
454 #if defined(PETSC_USE_BOPT_g)
455     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %d max %d",im[i],baij->Mbs-1);
456 #endif
457     if (im[i] >= rstart && im[i] < rend) {
458       row = im[i] - rstart;
459       for (j=0; j<n; j++) {
460         /* If NumCol = 1 then a copy is not required */
461         if ((roworiented) && (n == 1)) {
462           barray = (MatScalar*) v + i*bs2;
463         } else if((!roworiented) && (m == 1)) {
464           barray = (MatScalar*) v + j*bs2;
465         } else { /* Here a copy is required */
466           if (roworiented) {
467             value = v + i*(stepval+bs)*bs + j*bs;
468           } else {
469             value = v + j*(stepval+bs)*bs + i*bs;
470           }
471           for (ii=0; ii<bs; ii++,value+=stepval) {
472             for (jj=0; jj<bs; jj++) {
473               *barray++  = *value++;
474             }
475           }
476           barray -=bs2;
477         }
478 
479         if (in[j] >= cstart && in[j] < cend){
480           col  = in[j] - cstart;
481           ierr = MatSetValuesBlocked_SeqSBAIJ(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr);
482         }
483         else if (in[j] < 0) continue;
484 #if defined(PETSC_USE_BOPT_g)
485         else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %d max %d",in[j],baij->Nbs-1);}
486 #endif
487         else {
488           if (mat->was_assembled) {
489             if (!baij->colmap) {
490               ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
491             }
492 
493 #if defined(PETSC_USE_BOPT_g)
494 #if defined (PETSC_USE_CTABLE)
495             { int data;
496               ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr);
497               if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
498             }
499 #else
500             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
501 #endif
502 #endif
503 #if defined (PETSC_USE_CTABLE)
504 	    ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr);
505             col  = (col - 1)/bs;
506 #else
507             col = (baij->colmap[in[j]] - 1)/bs;
508 #endif
509             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
510               ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
511               col =  in[j];
512             }
513           }
514           else col = in[j];
515           ierr = MatSetValuesBlocked_SeqSBAIJ(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr);
516         }
517       }
518     } else {
519       if (!baij->donotstash) {
520         if (roworiented) {
521           ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
522         } else {
523           ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
524         }
525       }
526     }
527   }
528   PetscFunctionReturn(0);
529 }
530 
531 #define HASH_KEY 0.6180339887
532 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
533 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
534 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
535 #undef __FUNCT__
536 #define __FUNCT__ "MatSetValues_MPISBAIJ_HT_MatScalar"
537 int MatSetValues_MPISBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
538 {
539   PetscFunctionBegin;
540   SETERRQ(1,"Function not yet written for SBAIJ format");
541   /* PetscFunctionReturn(0); */
542 }
543 
544 #undef __FUNCT__
545 #define __FUNCT__ "MatSetValuesBlocked_MPISBAIJ_HT_MatScalar"
546 int MatSetValuesBlocked_MPISBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
547 {
548   PetscFunctionBegin;
549   SETERRQ(1,"Function not yet written for SBAIJ format");
550   /* PetscFunctionReturn(0); */
551 }
552 
553 #undef __FUNCT__
554 #define __FUNCT__ "MatGetValues_MPISBAIJ"
555 int MatGetValues_MPISBAIJ(Mat mat,int m,const int idxm[],int n,const int idxn[],PetscScalar v[])
556 {
557   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
558   int          bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
559   int          bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
560 
561   PetscFunctionBegin;
562   for (i=0; i<m; i++) {
563     if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %d",idxm[i]);
564     if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",idxm[i],mat->M-1);
565     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
566       row = idxm[i] - bsrstart;
567       for (j=0; j<n; j++) {
568         if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column %d",idxn[j]);
569         if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",idxn[j],mat->N-1);
570         if (idxn[j] >= bscstart && idxn[j] < bscend){
571           col = idxn[j] - bscstart;
572           ierr = MatGetValues_SeqSBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
573         } else {
574           if (!baij->colmap) {
575             ierr = CreateColmap_MPISBAIJ_Private(mat);CHKERRQ(ierr);
576           }
577 #if defined (PETSC_USE_CTABLE)
578           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
579           data --;
580 #else
581           data = baij->colmap[idxn[j]/bs]-1;
582 #endif
583           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
584           else {
585             col  = data + idxn[j]%bs;
586             ierr = MatGetValues_SeqSBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
587           }
588         }
589       }
590     } else {
591       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
592     }
593   }
594  PetscFunctionReturn(0);
595 }
596 
597 #undef __FUNCT__
598 #define __FUNCT__ "MatNorm_MPISBAIJ"
599 int MatNorm_MPISBAIJ(Mat mat,NormType type,PetscReal *norm)
600 {
601   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
602   /* Mat_SeqSBAIJ *amat = (Mat_SeqSBAIJ*)baij->A->data; */
603   /* Mat_SeqBAIJ  *bmat = (Mat_SeqBAIJ*)baij->B->data; */
604   int        ierr;
605   PetscReal  sum[2],*lnorm2;
606 
607   PetscFunctionBegin;
608   if (baij->size == 1) {
609     ierr =  MatNorm(baij->A,type,norm);CHKERRQ(ierr);
610   } else {
611     if (type == NORM_FROBENIUS) {
612       ierr = PetscMalloc(2*sizeof(PetscReal),&lnorm2);CHKERRQ(ierr);
613       ierr =  MatNorm(baij->A,type,lnorm2);CHKERRQ(ierr);
614       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2++;            /* squar power of norm(A) */
615       ierr =  MatNorm(baij->B,type,lnorm2);CHKERRQ(ierr);
616       *lnorm2 = (*lnorm2)*(*lnorm2); lnorm2--;             /* squar power of norm(B) */
617       /*
618       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
619       PetscSynchronizedPrintf(mat->comm,"[%d], lnorm2=%g, %g\n",rank,lnorm2[0],lnorm2[1]);
620       */
621       ierr = MPI_Allreduce(lnorm2,&sum,2,MPIU_REAL,MPI_SUM,mat->comm);CHKERRQ(ierr);
622       /*
623       PetscSynchronizedPrintf(mat->comm,"[%d], sum=%g, %g\n",rank,sum[0],sum[1]);
624       PetscSynchronizedFlush(mat->comm); */
625 
626       *norm = sqrt(sum[0] + 2*sum[1]);
627       ierr = PetscFree(lnorm2);CHKERRQ(ierr);
628     } else {
629       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
630     }
631   }
632   PetscFunctionReturn(0);
633 }
634 
635 /*
636   Creates the hash table, and sets the table
637   This table is created only once.
638   If new entried need to be added to the matrix
639   then the hash table has to be destroyed and
640   recreated.
641 */
642 #undef __FUNCT__
643 #define __FUNCT__ "MatCreateHashTable_MPISBAIJ_Private"
644 int MatCreateHashTable_MPISBAIJ_Private(Mat mat,PetscReal factor)
645 {
646   PetscFunctionBegin;
647   SETERRQ(1,"Function not yet written for SBAIJ format");
648   /* PetscFunctionReturn(0); */
649 }
650 
651 #undef __FUNCT__
652 #define __FUNCT__ "MatAssemblyBegin_MPISBAIJ"
653 int MatAssemblyBegin_MPISBAIJ(Mat mat,MatAssemblyType mode)
654 {
655   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
656   int         ierr,nstash,reallocs;
657   InsertMode  addv;
658 
659   PetscFunctionBegin;
660   if (baij->donotstash) {
661     PetscFunctionReturn(0);
662   }
663 
664   /* make sure all processors are either in INSERTMODE or ADDMODE */
665   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
666   if (addv == (ADD_VALUES|INSERT_VALUES)) {
667     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
668   }
669   mat->insertmode = addv; /* in case this processor had no cache */
670 
671   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
672   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
673   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
674   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
675   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
676   PetscLogInfo(0,"MatAssemblyBegin_MPISBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
677   PetscFunctionReturn(0);
678 }
679 
680 #undef __FUNCT__
681 #define __FUNCT__ "MatAssemblyEnd_MPISBAIJ"
682 int MatAssemblyEnd_MPISBAIJ(Mat mat,MatAssemblyType mode)
683 {
684   Mat_MPISBAIJ *baij=(Mat_MPISBAIJ*)mat->data;
685   Mat_SeqSBAIJ  *a=(Mat_SeqSBAIJ*)baij->A->data;
686   Mat_SeqBAIJ  *b=(Mat_SeqBAIJ*)baij->B->data;
687   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
688   int         *row,*col,other_disassembled;
689   PetscTruth  r1,r2,r3;
690   MatScalar   *val;
691   InsertMode  addv = mat->insertmode;
692 
693   PetscFunctionBegin;
694 
695   if (!baij->donotstash) {
696     while (1) {
697       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
698       /*
699       PetscSynchronizedPrintf(mat->comm,"[%d]: in AssemblyEnd, stash, flg=%d\n",rank,flg);
700       PetscSynchronizedFlush(mat->comm);
701       */
702       if (!flg) break;
703 
704       for (i=0; i<n;) {
705         /* Now identify the consecutive vals belonging to the same row */
706         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
707         if (j < n) ncols = j-i;
708         else       ncols = n-i;
709         /* Now assemble all these values with a single function call */
710         ierr = MatSetValues_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
711         i = j;
712       }
713     }
714     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
715     /* Now process the block-stash. Since the values are stashed column-oriented,
716        set the roworiented flag to column oriented, and after MatSetValues()
717        restore the original flags */
718     r1 = baij->roworiented;
719     r2 = a->roworiented;
720     r3 = b->roworiented;
721     baij->roworiented = PETSC_FALSE;
722     a->roworiented    = PETSC_FALSE;
723     b->roworiented    = PETSC_FALSE;
724     while (1) {
725       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
726       if (!flg) break;
727 
728       for (i=0; i<n;) {
729         /* Now identify the consecutive vals belonging to the same row */
730         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
731         if (j < n) ncols = j-i;
732         else       ncols = n-i;
733         ierr = MatSetValuesBlocked_MPISBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
734         i = j;
735       }
736     }
737     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
738     baij->roworiented = r1;
739     a->roworiented    = r2;
740     b->roworiented    = r3;
741   }
742 
743   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
744   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
745 
746   /* determine if any processor has disassembled, if so we must
747      also disassemble ourselfs, in order that we may reassemble. */
748   /*
749      if nonzero structure of submatrix B cannot change then we know that
750      no processor disassembled thus we can skip this stuff
751   */
752   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
753     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
754     if (mat->was_assembled && !other_disassembled) {
755       ierr = DisAssemble_MPISBAIJ(mat);CHKERRQ(ierr);
756     }
757   }
758 
759   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
760     ierr = MatSetUpMultiply_MPISBAIJ(mat);CHKERRQ(ierr); /* setup Mvctx and sMvctx */
761   }
762   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
763   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
764 
765 #if defined(PETSC_USE_BOPT_g)
766   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
767     PetscLogInfo(0,"MatAssemblyEnd_MPISBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
768     baij->ht_total_ct  = 0;
769     baij->ht_insert_ct = 0;
770   }
771 #endif
772   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
773     ierr = MatCreateHashTable_MPISBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
774     mat->ops->setvalues        = MatSetValues_MPISBAIJ_HT;
775     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPISBAIJ_HT;
776   }
777 
778   if (baij->rowvalues) {
779     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
780     baij->rowvalues = 0;
781   }
782 
783   PetscFunctionReturn(0);
784 }
785 
786 #undef __FUNCT__
787 #define __FUNCT__ "MatView_MPISBAIJ_ASCIIorDraworSocket"
788 static int MatView_MPISBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
789 {
790   Mat_MPISBAIJ      *baij = (Mat_MPISBAIJ*)mat->data;
791   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
792   PetscTruth        isascii,isdraw;
793   PetscViewer       sviewer;
794   PetscViewerFormat format;
795 
796   PetscFunctionBegin;
797   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
798   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
799   if (isascii) {
800     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
801     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
802       MatInfo info;
803       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
804       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
805       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
806               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
807               baij->bs,(int)info.memory);CHKERRQ(ierr);
808       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
809       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
810       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
811       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
812       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
813       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
814       PetscFunctionReturn(0);
815     } else if (format == PETSC_VIEWER_ASCII_INFO) {
816       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
817       PetscFunctionReturn(0);
818     }
819   }
820 
821   if (isdraw) {
822     PetscDraw       draw;
823     PetscTruth isnull;
824     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
825     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
826   }
827 
828   if (size == 1) {
829     ierr = PetscObjectSetName((PetscObject)baij->A,mat->name);CHKERRQ(ierr);
830     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
831   } else {
832     /* assemble the entire matrix onto first processor. */
833     Mat         A;
834     Mat_SeqSBAIJ *Aloc;
835     Mat_SeqBAIJ *Bloc;
836     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
837     MatScalar   *a;
838 
839     if (!rank) {
840       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
841     } else {
842       ierr = MatCreateMPISBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
843     }
844     PetscLogObjectParent(mat,A);
845 
846     /* copy over the A part */
847     Aloc  = (Mat_SeqSBAIJ*)baij->A->data;
848     ai    = Aloc->i; aj = Aloc->j; a = Aloc->a;
849     ierr  = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
850 
851     for (i=0; i<mbs; i++) {
852       rvals[0] = bs*(baij->rstart + i);
853       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
854       for (j=ai[i]; j<ai[i+1]; j++) {
855         col = (baij->cstart+aj[j])*bs;
856         for (k=0; k<bs; k++) {
857           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
858           col++; a += bs;
859         }
860       }
861     }
862     /* copy over the B part */
863     Bloc = (Mat_SeqBAIJ*)baij->B->data;
864     ai = Bloc->i; aj = Bloc->j; a = Bloc->a;
865     for (i=0; i<mbs; i++) {
866       rvals[0] = bs*(baij->rstart + i);
867       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
868       for (j=ai[i]; j<ai[i+1]; j++) {
869         col = baij->garray[aj[j]]*bs;
870         for (k=0; k<bs; k++) {
871           ierr = MatSetValues_MPISBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
872           col++; a += bs;
873         }
874       }
875     }
876     ierr = PetscFree(rvals);CHKERRQ(ierr);
877     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
878     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
879     /*
880        Everyone has to call to draw the matrix since the graphics waits are
881        synchronized across all processors that share the PetscDraw object
882     */
883     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
884     if (!rank) {
885       ierr = PetscObjectSetName((PetscObject)((Mat_MPISBAIJ*)(A->data))->A,mat->name);CHKERRQ(ierr);
886       ierr = MatView(((Mat_MPISBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
887     }
888     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
889     ierr = MatDestroy(A);CHKERRQ(ierr);
890   }
891   PetscFunctionReturn(0);
892 }
893 
894 #undef __FUNCT__
895 #define __FUNCT__ "MatView_MPISBAIJ"
896 int MatView_MPISBAIJ(Mat mat,PetscViewer viewer)
897 {
898   int        ierr;
899   PetscTruth isascii,isdraw,issocket,isbinary;
900 
901   PetscFunctionBegin;
902   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
903   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
904   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
905   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
906   if (isascii || isdraw || issocket || isbinary) {
907     ierr = MatView_MPISBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
908   } else {
909     SETERRQ1(1,"Viewer type %s not supported by MPISBAIJ matrices",((PetscObject)viewer)->type_name);
910   }
911   PetscFunctionReturn(0);
912 }
913 
914 #undef __FUNCT__
915 #define __FUNCT__ "MatDestroy_MPISBAIJ"
916 int MatDestroy_MPISBAIJ(Mat mat)
917 {
918   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
919   int         ierr;
920 
921   PetscFunctionBegin;
922 #if defined(PETSC_USE_LOG)
923   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
924 #endif
925   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
926   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
927   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
928   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
929   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
930 #if defined (PETSC_USE_CTABLE)
931   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
932 #else
933   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
934 #endif
935   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
936   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
937   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
938   if (baij->slvec0) {
939     ierr = VecDestroy(baij->slvec0);CHKERRQ(ierr);
940     ierr = VecDestroy(baij->slvec0b);CHKERRQ(ierr);
941   }
942   if (baij->slvec1) {
943     ierr = VecDestroy(baij->slvec1);CHKERRQ(ierr);
944     ierr = VecDestroy(baij->slvec1a);CHKERRQ(ierr);
945     ierr = VecDestroy(baij->slvec1b);CHKERRQ(ierr);
946   }
947   if (baij->sMvctx)  {ierr = VecScatterDestroy(baij->sMvctx);CHKERRQ(ierr);}
948   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
949   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
950   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
951 #if defined(PETSC_USE_MAT_SINGLE)
952   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
953 #endif
954   ierr = PetscFree(baij);CHKERRQ(ierr);
955   PetscFunctionReturn(0);
956 }
957 
958 #undef __FUNCT__
959 #define __FUNCT__ "MatMult_MPISBAIJ"
960 int MatMult_MPISBAIJ(Mat A,Vec xx,Vec yy)
961 {
962   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
963   int         ierr,nt,mbs=a->mbs,bs=a->bs;
964   PetscScalar *x,*from,zero=0.0;
965 
966   PetscFunctionBegin;
967   /*
968   PetscSynchronizedPrintf(A->comm," _1comm is called ...\n");
969   PetscSynchronizedFlush(A->comm);
970   */
971   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
972   if (nt != A->n) {
973     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
974   }
975   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
976   if (nt != A->m) {
977     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
978   }
979 
980   /* diagonal part */
981   ierr = (*a->A->ops->mult)(a->A,xx,a->slvec1a);CHKERRQ(ierr);
982   ierr = VecSet(&zero,a->slvec1b);CHKERRQ(ierr);
983 
984   /* subdiagonal part */
985   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);CHKERRQ(ierr);
986 
987   /* copy x into the vec slvec0 */
988   ierr = VecGetArray(a->slvec0,&from);CHKERRQ(ierr);
989   ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
990   ierr = PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));CHKERRQ(ierr);
991   ierr = VecRestoreArray(a->slvec0,&from);CHKERRQ(ierr);
992 
993   ierr = VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);CHKERRQ(ierr);
994   ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
995   ierr = VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);CHKERRQ(ierr);
996 
997   /* supperdiagonal part */
998   ierr = (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,yy);CHKERRQ(ierr);
999 
1000   PetscFunctionReturn(0);
1001 }
1002 
1003 #undef __FUNCT__
1004 #define __FUNCT__ "MatMult_MPISBAIJ_2comm"
1005 int MatMult_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy)
1006 {
1007   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1008   int         ierr,nt;
1009 
1010   PetscFunctionBegin;
1011   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
1012   if (nt != A->n) {
1013     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1014   }
1015   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
1016   if (nt != A->m) {
1017     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1018   }
1019 
1020   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1021   /* do diagonal part */
1022   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
1023   /* do supperdiagonal part */
1024   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1025   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
1026   /* do subdiagonal part */
1027   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
1028   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1029   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1030 
1031   PetscFunctionReturn(0);
1032 }
1033 
1034 #undef __FUNCT__
1035 #define __FUNCT__ "MatMultAdd_MPISBAIJ"
1036 int MatMultAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1037 {
1038   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1039   int          ierr,mbs=a->mbs,bs=a->bs;
1040   PetscScalar  *x,*from,zero=0.0;
1041 
1042   PetscFunctionBegin;
1043   /*
1044   PetscSynchronizedPrintf(A->comm," MatMultAdd is called ...\n");
1045   PetscSynchronizedFlush(A->comm);
1046   */
1047   /* diagonal part */
1048   ierr = (*a->A->ops->multadd)(a->A,xx,yy,a->slvec1a);CHKERRQ(ierr);
1049   ierr = VecSet(&zero,a->slvec1b);CHKERRQ(ierr);
1050 
1051   /* subdiagonal part */
1052   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->slvec0b);CHKERRQ(ierr);
1053 
1054   /* copy x into the vec slvec0 */
1055   ierr = VecGetArray(a->slvec0,&from);CHKERRQ(ierr);
1056   ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
1057   ierr = PetscMemcpy(from,x,bs*mbs*sizeof(MatScalar));CHKERRQ(ierr);
1058   ierr = VecRestoreArray(a->slvec0,&from);CHKERRQ(ierr);
1059 
1060   ierr = VecScatterBegin(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);CHKERRQ(ierr);
1061   ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
1062   ierr = VecScatterEnd(a->slvec0,a->slvec1,ADD_VALUES,SCATTER_FORWARD,a->sMvctx);CHKERRQ(ierr);
1063 
1064   /* supperdiagonal part */
1065   ierr = (*a->B->ops->multadd)(a->B,a->slvec1b,a->slvec1a,zz);CHKERRQ(ierr);
1066 
1067   PetscFunctionReturn(0);
1068 }
1069 
1070 #undef __FUNCT__
1071 #define __FUNCT__ "MatMultAdd_MPISBAIJ_2comm"
1072 int MatMultAdd_MPISBAIJ_2comm(Mat A,Vec xx,Vec yy,Vec zz)
1073 {
1074   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1075   int        ierr;
1076 
1077   PetscFunctionBegin;
1078   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1079   /* do diagonal part */
1080   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
1081   /* do supperdiagonal part */
1082   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1083   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
1084 
1085   /* do subdiagonal part */
1086   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
1087   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1088   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1089 
1090   PetscFunctionReturn(0);
1091 }
1092 
1093 #undef __FUNCT__
1094 #define __FUNCT__ "MatMultTranspose_MPISBAIJ"
1095 int MatMultTranspose_MPISBAIJ(Mat A,Vec xx,Vec yy)
1096 {
1097   PetscFunctionBegin;
1098   SETERRQ(1,"Matrix is symmetric. Call MatMult().");
1099   /* PetscFunctionReturn(0); */
1100 }
1101 
1102 #undef __FUNCT__
1103 #define __FUNCT__ "MatMultTransposeAdd_MPISBAIJ"
1104 int MatMultTransposeAdd_MPISBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1105 {
1106   PetscFunctionBegin;
1107   SETERRQ(1,"Matrix is symmetric. Call MatMultAdd().");
1108   /* PetscFunctionReturn(0); */
1109 }
1110 
1111 /*
1112   This only works correctly for square matrices where the subblock A->A is the
1113    diagonal block
1114 */
1115 #undef __FUNCT__
1116 #define __FUNCT__ "MatGetDiagonal_MPISBAIJ"
1117 int MatGetDiagonal_MPISBAIJ(Mat A,Vec v)
1118 {
1119   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1120   int         ierr;
1121 
1122   PetscFunctionBegin;
1123   /* if (a->M != a->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block"); */
1124   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
1125   PetscFunctionReturn(0);
1126 }
1127 
1128 #undef __FUNCT__
1129 #define __FUNCT__ "MatScale_MPISBAIJ"
1130 int MatScale_MPISBAIJ(const PetscScalar *aa,Mat A)
1131 {
1132   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1133   int         ierr;
1134 
1135   PetscFunctionBegin;
1136   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
1137   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
1138   PetscFunctionReturn(0);
1139 }
1140 
1141 #undef __FUNCT__
1142 #define __FUNCT__ "MatGetRow_MPISBAIJ"
1143 int MatGetRow_MPISBAIJ(Mat matin,int row,int *nz,int **idx,PetscScalar **v)
1144 {
1145   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
1146   PetscScalar    *vworkA,*vworkB,**pvA,**pvB,*v_p;
1147   int            bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
1148   int            nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
1149   int            *cmap,*idx_p,cstart = mat->cstart;
1150 
1151   PetscFunctionBegin;
1152   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1153   mat->getrowactive = PETSC_TRUE;
1154 
1155   if (!mat->rowvalues && (idx || v)) {
1156     /*
1157         allocate enough space to hold information from the longest row.
1158     */
1159     Mat_SeqSBAIJ *Aa = (Mat_SeqSBAIJ*)mat->A->data;
1160     Mat_SeqBAIJ  *Ba = (Mat_SeqBAIJ*)mat->B->data;
1161     int     max = 1,mbs = mat->mbs,tmp;
1162     for (i=0; i<mbs; i++) {
1163       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i]; /* row length */
1164       if (max < tmp) { max = tmp; }
1165     }
1166     ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
1167     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
1168   }
1169 
1170   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1171   lrow = row - brstart;  /* local row index */
1172 
1173   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1174   if (!v)   {pvA = 0; pvB = 0;}
1175   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1176   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1177   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1178   nztot = nzA + nzB;
1179 
1180   cmap  = mat->garray;
1181   if (v  || idx) {
1182     if (nztot) {
1183       /* Sort by increasing column numbers, assuming A and B already sorted */
1184       int imark = -1;
1185       if (v) {
1186         *v = v_p = mat->rowvalues;
1187         for (i=0; i<nzB; i++) {
1188           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1189           else break;
1190         }
1191         imark = i;
1192         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1193         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1194       }
1195       if (idx) {
1196         *idx = idx_p = mat->rowindices;
1197         if (imark > -1) {
1198           for (i=0; i<imark; i++) {
1199             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1200           }
1201         } else {
1202           for (i=0; i<nzB; i++) {
1203             if (cmap[cworkB[i]/bs] < cstart)
1204               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1205             else break;
1206           }
1207           imark = i;
1208         }
1209         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1210         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1211       }
1212     } else {
1213       if (idx) *idx = 0;
1214       if (v)   *v   = 0;
1215     }
1216   }
1217   *nz = nztot;
1218   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1219   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1220   PetscFunctionReturn(0);
1221 }
1222 
1223 #undef __FUNCT__
1224 #define __FUNCT__ "MatRestoreRow_MPISBAIJ"
1225 int MatRestoreRow_MPISBAIJ(Mat mat,int row,int *nz,int **idx,PetscScalar **v)
1226 {
1227   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1228 
1229   PetscFunctionBegin;
1230   if (baij->getrowactive == PETSC_FALSE) {
1231     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1232   }
1233   baij->getrowactive = PETSC_FALSE;
1234   PetscFunctionReturn(0);
1235 }
1236 
1237 #undef __FUNCT__
1238 #define __FUNCT__ "MatGetBlockSize_MPISBAIJ"
1239 int MatGetBlockSize_MPISBAIJ(Mat mat,int *bs)
1240 {
1241   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1242 
1243   PetscFunctionBegin;
1244   *bs = baij->bs;
1245   PetscFunctionReturn(0);
1246 }
1247 
1248 #undef __FUNCT__
1249 #define __FUNCT__ "MatZeroEntries_MPISBAIJ"
1250 int MatZeroEntries_MPISBAIJ(Mat A)
1251 {
1252   Mat_MPISBAIJ *l = (Mat_MPISBAIJ*)A->data;
1253   int         ierr;
1254 
1255   PetscFunctionBegin;
1256   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1257   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1258   PetscFunctionReturn(0);
1259 }
1260 
1261 #undef __FUNCT__
1262 #define __FUNCT__ "MatGetInfo_MPISBAIJ"
1263 int MatGetInfo_MPISBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1264 {
1265   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)matin->data;
1266   Mat         A = a->A,B = a->B;
1267   int         ierr;
1268   PetscReal   isend[5],irecv[5];
1269 
1270   PetscFunctionBegin;
1271   info->block_size     = (PetscReal)a->bs;
1272   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1273   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1274   isend[3] = info->memory;  isend[4] = info->mallocs;
1275   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1276   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1277   isend[3] += info->memory;  isend[4] += info->mallocs;
1278   if (flag == MAT_LOCAL) {
1279     info->nz_used      = isend[0];
1280     info->nz_allocated = isend[1];
1281     info->nz_unneeded  = isend[2];
1282     info->memory       = isend[3];
1283     info->mallocs      = isend[4];
1284   } else if (flag == MAT_GLOBAL_MAX) {
1285     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);CHKERRQ(ierr);
1286     info->nz_used      = irecv[0];
1287     info->nz_allocated = irecv[1];
1288     info->nz_unneeded  = irecv[2];
1289     info->memory       = irecv[3];
1290     info->mallocs      = irecv[4];
1291   } else if (flag == MAT_GLOBAL_SUM) {
1292     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);CHKERRQ(ierr);
1293     info->nz_used      = irecv[0];
1294     info->nz_allocated = irecv[1];
1295     info->nz_unneeded  = irecv[2];
1296     info->memory       = irecv[3];
1297     info->mallocs      = irecv[4];
1298   } else {
1299     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1300   }
1301   info->rows_global       = (PetscReal)A->M;
1302   info->columns_global    = (PetscReal)A->N;
1303   info->rows_local        = (PetscReal)A->m;
1304   info->columns_local     = (PetscReal)A->N;
1305   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1306   info->fill_ratio_needed = 0;
1307   info->factor_mallocs    = 0;
1308   PetscFunctionReturn(0);
1309 }
1310 
1311 #undef __FUNCT__
1312 #define __FUNCT__ "MatSetOption_MPISBAIJ"
1313 int MatSetOption_MPISBAIJ(Mat A,MatOption op)
1314 {
1315   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
1316   int         ierr;
1317 
1318   PetscFunctionBegin;
1319   switch (op) {
1320   case MAT_NO_NEW_NONZERO_LOCATIONS:
1321   case MAT_YES_NEW_NONZERO_LOCATIONS:
1322   case MAT_COLUMNS_UNSORTED:
1323   case MAT_COLUMNS_SORTED:
1324   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1325   case MAT_KEEP_ZEROED_ROWS:
1326   case MAT_NEW_NONZERO_LOCATION_ERR:
1327     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1328     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1329     break;
1330   case MAT_ROW_ORIENTED:
1331     a->roworiented = PETSC_TRUE;
1332     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1333     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1334     break;
1335   case MAT_ROWS_SORTED:
1336   case MAT_ROWS_UNSORTED:
1337   case MAT_YES_NEW_DIAGONALS:
1338     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1339     break;
1340   case MAT_COLUMN_ORIENTED:
1341     a->roworiented = PETSC_FALSE;
1342     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1343     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1344     break;
1345   case MAT_IGNORE_OFF_PROC_ENTRIES:
1346     a->donotstash = PETSC_TRUE;
1347     break;
1348   case MAT_NO_NEW_DIAGONALS:
1349     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1350   case MAT_USE_HASH_TABLE:
1351     a->ht_flag = PETSC_TRUE;
1352     break;
1353   default:
1354     SETERRQ(PETSC_ERR_SUP,"unknown option");
1355   }
1356   PetscFunctionReturn(0);
1357 }
1358 
1359 #undef __FUNCT__
1360 #define __FUNCT__ "MatTranspose_MPISBAIJ"
1361 int MatTranspose_MPISBAIJ(Mat A,Mat *B)
1362 {
1363   int ierr;
1364   PetscFunctionBegin;
1365   ierr = MatDuplicate(A,MAT_COPY_VALUES,B);CHKERRQ(ierr);
1366   PetscFunctionReturn(0);
1367 }
1368 
1369 #undef __FUNCT__
1370 #define __FUNCT__ "MatDiagonalScale_MPISBAIJ"
1371 int MatDiagonalScale_MPISBAIJ(Mat mat,Vec ll,Vec rr)
1372 {
1373   Mat_MPISBAIJ *baij = (Mat_MPISBAIJ*)mat->data;
1374   Mat         a = baij->A,b = baij->B;
1375   int         ierr,s1,s2,s3;
1376 
1377   PetscFunctionBegin;
1378   if (ll != rr) {
1379     SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"For symmetric format, left and right scaling vectors must be same\n");
1380   }
1381   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1382   if (rr) {
1383     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1384     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1385     /* Overlap communication with computation. */
1386     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1387     /*} if (ll) { */
1388     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1389     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1390     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1391     /* } */
1392   /* scale  the diagonal block */
1393   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1394 
1395   /* if (rr) { */
1396     /* Do a scatter end and then right scale the off-diagonal block */
1397     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1398     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1399   }
1400 
1401   PetscFunctionReturn(0);
1402 }
1403 
1404 #undef __FUNCT__
1405 #define __FUNCT__ "MatZeroRows_MPISBAIJ"
1406 int MatZeroRows_MPISBAIJ(Mat A,IS is,const PetscScalar *diag)
1407 {
1408   PetscFunctionBegin;
1409   SETERRQ(PETSC_ERR_SUP,"No support for this function yet");
1410 }
1411 
1412 #undef __FUNCT__
1413 #define __FUNCT__ "MatPrintHelp_MPISBAIJ"
1414 int MatPrintHelp_MPISBAIJ(Mat A)
1415 {
1416   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1417   MPI_Comm    comm = A->comm;
1418   static int  called = 0;
1419   int         ierr;
1420 
1421   PetscFunctionBegin;
1422   if (!a->rank) {
1423     ierr = MatPrintHelp_SeqSBAIJ(a->A);CHKERRQ(ierr);
1424   }
1425   if (called) {PetscFunctionReturn(0);} else called = 1;
1426   ierr = (*PetscHelpPrintf)(comm," Options for MATMPISBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1427   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1428   PetscFunctionReturn(0);
1429 }
1430 
1431 #undef __FUNCT__
1432 #define __FUNCT__ "MatSetUnfactored_MPISBAIJ"
1433 int MatSetUnfactored_MPISBAIJ(Mat A)
1434 {
1435   Mat_MPISBAIJ *a   = (Mat_MPISBAIJ*)A->data;
1436   int         ierr;
1437 
1438   PetscFunctionBegin;
1439   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1440   PetscFunctionReturn(0);
1441 }
1442 
1443 static int MatDuplicate_MPISBAIJ(Mat,MatDuplicateOption,Mat *);
1444 
1445 #undef __FUNCT__
1446 #define __FUNCT__ "MatEqual_MPISBAIJ"
1447 int MatEqual_MPISBAIJ(Mat A,Mat B,PetscTruth *flag)
1448 {
1449   Mat_MPISBAIJ *matB = (Mat_MPISBAIJ*)B->data,*matA = (Mat_MPISBAIJ*)A->data;
1450   Mat         a,b,c,d;
1451   PetscTruth  flg;
1452   int         ierr;
1453 
1454   PetscFunctionBegin;
1455   a = matA->A; b = matA->B;
1456   c = matB->A; d = matB->B;
1457 
1458   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1459   if (flg == PETSC_TRUE) {
1460     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1461   }
1462   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1463   PetscFunctionReturn(0);
1464 }
1465 
1466 #undef __FUNCT__
1467 #define __FUNCT__ "MatSetUpPreallocation_MPISBAIJ"
1468 int MatSetUpPreallocation_MPISBAIJ(Mat A)
1469 {
1470   int        ierr;
1471 
1472   PetscFunctionBegin;
1473   ierr = MatMPISBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1474   PetscFunctionReturn(0);
1475 }
1476 /* -------------------------------------------------------------------*/
1477 static struct _MatOps MatOps_Values = {
1478   MatSetValues_MPISBAIJ,
1479   MatGetRow_MPISBAIJ,
1480   MatRestoreRow_MPISBAIJ,
1481   MatMult_MPISBAIJ,
1482   MatMultAdd_MPISBAIJ,
1483   MatMultTranspose_MPISBAIJ,
1484   MatMultTransposeAdd_MPISBAIJ,
1485   0,
1486   0,
1487   0,
1488   0,
1489   0,
1490   0,
1491   MatRelax_MPISBAIJ,
1492   MatTranspose_MPISBAIJ,
1493   MatGetInfo_MPISBAIJ,
1494   MatEqual_MPISBAIJ,
1495   MatGetDiagonal_MPISBAIJ,
1496   MatDiagonalScale_MPISBAIJ,
1497   MatNorm_MPISBAIJ,
1498   MatAssemblyBegin_MPISBAIJ,
1499   MatAssemblyEnd_MPISBAIJ,
1500   0,
1501   MatSetOption_MPISBAIJ,
1502   MatZeroEntries_MPISBAIJ,
1503   MatZeroRows_MPISBAIJ,
1504   0,
1505   0,
1506   0,
1507   0,
1508   MatSetUpPreallocation_MPISBAIJ,
1509   0,
1510   0,
1511   0,
1512   0,
1513   MatDuplicate_MPISBAIJ,
1514   0,
1515   0,
1516   0,
1517   0,
1518   0,
1519   0,
1520   0,
1521   MatGetValues_MPISBAIJ,
1522   0,
1523   MatPrintHelp_MPISBAIJ,
1524   MatScale_MPISBAIJ,
1525   0,
1526   0,
1527   0,
1528   MatGetBlockSize_MPISBAIJ,
1529   0,
1530   0,
1531   0,
1532   0,
1533   0,
1534   0,
1535   MatSetUnfactored_MPISBAIJ,
1536   0,
1537   MatSetValuesBlocked_MPISBAIJ,
1538   0,
1539   0,
1540   0,
1541   MatGetPetscMaps_Petsc,
1542   0,
1543   0,
1544   0,
1545   0,
1546   0,
1547   0,
1548   MatGetRowMax_MPISBAIJ};
1549 
1550 
1551 EXTERN_C_BEGIN
1552 #undef __FUNCT__
1553 #define __FUNCT__ "MatGetDiagonalBlock_MPISBAIJ"
1554 int MatGetDiagonalBlock_MPISBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1555 {
1556   PetscFunctionBegin;
1557   *a      = ((Mat_MPISBAIJ *)A->data)->A;
1558   *iscopy = PETSC_FALSE;
1559   PetscFunctionReturn(0);
1560 }
1561 EXTERN_C_END
1562 
1563 EXTERN_C_BEGIN
1564 #undef __FUNCT__
1565 #define __FUNCT__ "MatMPISBAIJSetPreallocation_MPISBAIJ"
1566 int MatMPISBAIJSetPreallocation_MPISBAIJ(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1567 {
1568   Mat_MPISBAIJ *b;
1569   int          ierr,i,mbs,Mbs;
1570 
1571   PetscFunctionBegin;
1572   ierr = PetscOptionsGetInt(B->prefix,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1573 
1574   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1575   if (d_nz == PETSC_DECIDE || d_nz == PETSC_DEFAULT) d_nz = 3;
1576   if (o_nz == PETSC_DECIDE || o_nz == PETSC_DEFAULT) o_nz = 1;
1577   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %d",d_nz);
1578   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %d",o_nz);
1579   if (d_nnz) {
1580     for (i=0; i<B->m/bs; i++) {
1581       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1582     }
1583   }
1584   if (o_nnz) {
1585     for (i=0; i<B->m/bs; i++) {
1586       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1587     }
1588   }
1589   B->preallocated = PETSC_TRUE;
1590   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1591   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1592   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
1593   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->cmap);CHKERRQ(ierr);
1594 
1595   b   = (Mat_MPISBAIJ*)B->data;
1596   mbs = B->m/bs;
1597   Mbs = B->M/bs;
1598   if (mbs*bs != B->m) {
1599     SETERRQ2(PETSC_ERR_ARG_SIZ,"No of local rows %d must be divisible by blocksize %d",B->m,bs);
1600   }
1601 
1602   b->bs  = bs;
1603   b->bs2 = bs*bs;
1604   b->mbs = mbs;
1605   b->nbs = mbs;
1606   b->Mbs = Mbs;
1607   b->Nbs = Mbs;
1608 
1609   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
1610   b->rowners[0]    = 0;
1611   for (i=2; i<=b->size; i++) {
1612     b->rowners[i] += b->rowners[i-1];
1613   }
1614   b->rstart    = b->rowners[b->rank];
1615   b->rend      = b->rowners[b->rank+1];
1616   b->cstart    = b->rstart;
1617   b->cend      = b->rend;
1618   for (i=0; i<=b->size; i++) {
1619     b->rowners_bs[i] = b->rowners[i]*bs;
1620   }
1621   b->rstart_bs = b-> rstart*bs;
1622   b->rend_bs   = b->rend*bs;
1623 
1624   b->cstart_bs = b->cstart*bs;
1625   b->cend_bs   = b->cend*bs;
1626 
1627 
1628   ierr = MatCreateSeqSBAIJ(PETSC_COMM_SELF,bs,B->m,B->m,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
1629   PetscLogObjectParent(B,b->A);
1630   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->M,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
1631   PetscLogObjectParent(B,b->B);
1632 
1633   /* build cache for off array entries formed */
1634   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
1635 
1636   PetscFunctionReturn(0);
1637 }
1638 EXTERN_C_END
1639 
1640 EXTERN_C_BEGIN
1641 #undef __FUNCT__
1642 #define __FUNCT__ "MatCreate_MPISBAIJ"
1643 int MatCreate_MPISBAIJ(Mat B)
1644 {
1645   Mat_MPISBAIJ *b;
1646   int          ierr;
1647   PetscTruth   flg;
1648 
1649   PetscFunctionBegin;
1650 
1651   ierr    = PetscNew(Mat_MPISBAIJ,&b);CHKERRQ(ierr);
1652   B->data = (void*)b;
1653   ierr    = PetscMemzero(b,sizeof(Mat_MPISBAIJ));CHKERRQ(ierr);
1654   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
1655 
1656   B->ops->destroy    = MatDestroy_MPISBAIJ;
1657   B->ops->view       = MatView_MPISBAIJ;
1658   B->mapping    = 0;
1659   B->factor     = 0;
1660   B->assembled  = PETSC_FALSE;
1661 
1662   B->insertmode = NOT_SET_VALUES;
1663   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
1664   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
1665 
1666   /* build local table of row and column ownerships */
1667   ierr          = PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);CHKERRQ(ierr);
1668   b->cowners    = b->rowners + b->size + 2;
1669   b->rowners_bs = b->cowners + b->size + 2;
1670   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1671 
1672   /* build cache for off array entries formed */
1673   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
1674   b->donotstash  = PETSC_FALSE;
1675   b->colmap      = PETSC_NULL;
1676   b->garray      = PETSC_NULL;
1677   b->roworiented = PETSC_TRUE;
1678 
1679 #if defined(PETSC_USE_MAT_SINGLE)
1680   /* stuff for MatSetValues_XXX in single precision */
1681   b->setvalueslen     = 0;
1682   b->setvaluescopy    = PETSC_NULL;
1683 #endif
1684 
1685   /* stuff used in block assembly */
1686   b->barray       = 0;
1687 
1688   /* stuff used for matrix vector multiply */
1689   b->lvec         = 0;
1690   b->Mvctx        = 0;
1691   b->slvec0       = 0;
1692   b->slvec0b      = 0;
1693   b->slvec1       = 0;
1694   b->slvec1a      = 0;
1695   b->slvec1b      = 0;
1696   b->sMvctx       = 0;
1697 
1698   /* stuff for MatGetRow() */
1699   b->rowindices   = 0;
1700   b->rowvalues    = 0;
1701   b->getrowactive = PETSC_FALSE;
1702 
1703   /* hash table stuff */
1704   b->ht           = 0;
1705   b->hd           = 0;
1706   b->ht_size      = 0;
1707   b->ht_flag      = PETSC_FALSE;
1708   b->ht_fact      = 0;
1709   b->ht_total_ct  = 0;
1710   b->ht_insert_ct = 0;
1711 
1712   ierr = PetscOptionsHasName(B->prefix,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
1713   if (flg) {
1714     PetscReal fact = 1.39;
1715     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
1716     ierr = PetscOptionsGetReal(B->prefix,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
1717     if (fact <= 1.0) fact = 1.39;
1718     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
1719     PetscLogInfo(0,"MatCreateMPISBAIJ:Hash table Factor used %5.2f\n",fact);
1720   }
1721   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
1722                                      "MatStoreValues_MPISBAIJ",
1723                                      MatStoreValues_MPISBAIJ);CHKERRQ(ierr);
1724   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
1725                                      "MatRetrieveValues_MPISBAIJ",
1726                                      MatRetrieveValues_MPISBAIJ);CHKERRQ(ierr);
1727   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
1728                                      "MatGetDiagonalBlock_MPISBAIJ",
1729                                      MatGetDiagonalBlock_MPISBAIJ);CHKERRQ(ierr);
1730   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
1731                                      "MatMPISBAIJSetPreallocation_MPISBAIJ",
1732                                      MatMPISBAIJSetPreallocation_MPISBAIJ);CHKERRQ(ierr);
1733   PetscFunctionReturn(0);
1734 }
1735 EXTERN_C_END
1736 
1737 #undef __FUNCT__
1738 #define __FUNCT__ "MatMPISBAIJSetPreallocation"
1739 /*@C
1740    MatMPISBAIJSetPreallocation - For good matrix assembly performance
1741    the user should preallocate the matrix storage by setting the parameters
1742    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1743    performance can be increased by more than a factor of 50.
1744 
1745    Collective on Mat
1746 
1747    Input Parameters:
1748 +  A - the matrix
1749 .  bs   - size of blockk
1750 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1751            submatrix  (same for all local rows)
1752 .  d_nnz - array containing the number of block nonzeros in the various block rows
1753            in the upper triangular and diagonal part of the in diagonal portion of the local
1754            (possibly different for each block row) or PETSC_NULL.  You must leave room
1755            for the diagonal entry even if it is zero.
1756 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1757            submatrix (same for all local rows).
1758 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1759            off-diagonal portion of the local submatrix (possibly different for
1760            each block row) or PETSC_NULL.
1761 
1762 
1763    Options Database Keys:
1764 .   -mat_no_unroll - uses code that does not unroll the loops in the
1765                      block calculations (much slower)
1766 .   -mat_block_size - size of the blocks to use
1767 
1768    Notes:
1769 
1770    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1771    than it must be used on all processors that share the object for that argument.
1772 
1773    Storage Information:
1774    For a square global matrix we define each processor's diagonal portion
1775    to be its local rows and the corresponding columns (a square submatrix);
1776    each processor's off-diagonal portion encompasses the remainder of the
1777    local matrix (a rectangular submatrix).
1778 
1779    The user can specify preallocated storage for the diagonal part of
1780    the local submatrix with either d_nz or d_nnz (not both).  Set
1781    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1782    memory allocation.  Likewise, specify preallocated storage for the
1783    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1784 
1785    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1786    the figure below we depict these three local rows and all columns (0-11).
1787 
1788 .vb
1789            0 1 2 3 4 5 6 7 8 9 10 11
1790           -------------------
1791    row 3  |  o o o d d d o o o o o o
1792    row 4  |  o o o d d d o o o o o o
1793    row 5  |  o o o d d d o o o o o o
1794           -------------------
1795 .ve
1796 
1797    Thus, any entries in the d locations are stored in the d (diagonal)
1798    submatrix, and any entries in the o locations are stored in the
1799    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1800    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1801 
1802    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1803    plus the diagonal part of the d matrix,
1804    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1805    In general, for PDE problems in which most nonzeros are near the diagonal,
1806    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1807    or you will get TERRIBLE performance; see the users' manual chapter on
1808    matrices.
1809 
1810    Level: intermediate
1811 
1812 .keywords: matrix, block, aij, compressed row, sparse, parallel
1813 
1814 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1815 @*/
1816 int MatMPISBAIJSetPreallocation(Mat B,int bs,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[])
1817 {
1818   int ierr,(*f)(Mat,int,int,const int[],int,const int[]);
1819 
1820   PetscFunctionBegin;
1821   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
1822   if (f) {
1823     ierr = (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1824   }
1825   PetscFunctionReturn(0);
1826 }
1827 
1828 #undef __FUNCT__
1829 #define __FUNCT__ "MatCreateMPISBAIJ"
1830 /*@C
1831    MatCreateMPISBAIJ - Creates a sparse parallel matrix in symmetric block AIJ format
1832    (block compressed row).  For good matrix assembly performance
1833    the user should preallocate the matrix storage by setting the parameters
1834    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
1835    performance can be increased by more than a factor of 50.
1836 
1837    Collective on MPI_Comm
1838 
1839    Input Parameters:
1840 +  comm - MPI communicator
1841 .  bs   - size of blockk
1842 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
1843            This value should be the same as the local size used in creating the
1844            y vector for the matrix-vector product y = Ax.
1845 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
1846            This value should be the same as the local size used in creating the
1847            x vector for the matrix-vector product y = Ax.
1848 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
1849 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
1850 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
1851            submatrix  (same for all local rows)
1852 .  d_nnz - array containing the number of block nonzeros in the various block rows
1853            in the upper triangular portion of the in diagonal portion of the local
1854            (possibly different for each block block row) or PETSC_NULL.
1855            You must leave room for the diagonal entry even if it is zero.
1856 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
1857            submatrix (same for all local rows).
1858 -  o_nnz - array containing the number of nonzeros in the various block rows of the
1859            off-diagonal portion of the local submatrix (possibly different for
1860            each block row) or PETSC_NULL.
1861 
1862    Output Parameter:
1863 .  A - the matrix
1864 
1865    Options Database Keys:
1866 .   -mat_no_unroll - uses code that does not unroll the loops in the
1867                      block calculations (much slower)
1868 .   -mat_block_size - size of the blocks to use
1869 .   -mat_mpi - use the parallel matrix data structures even on one processor
1870                (defaults to using SeqBAIJ format on one processor)
1871 
1872    Notes:
1873    The user MUST specify either the local or global matrix dimensions
1874    (possibly both).
1875 
1876    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
1877    than it must be used on all processors that share the object for that argument.
1878 
1879    Storage Information:
1880    For a square global matrix we define each processor's diagonal portion
1881    to be its local rows and the corresponding columns (a square submatrix);
1882    each processor's off-diagonal portion encompasses the remainder of the
1883    local matrix (a rectangular submatrix).
1884 
1885    The user can specify preallocated storage for the diagonal part of
1886    the local submatrix with either d_nz or d_nnz (not both).  Set
1887    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
1888    memory allocation.  Likewise, specify preallocated storage for the
1889    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
1890 
1891    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
1892    the figure below we depict these three local rows and all columns (0-11).
1893 
1894 .vb
1895            0 1 2 3 4 5 6 7 8 9 10 11
1896           -------------------
1897    row 3  |  o o o d d d o o o o o o
1898    row 4  |  o o o d d d o o o o o o
1899    row 5  |  o o o d d d o o o o o o
1900           -------------------
1901 .ve
1902 
1903    Thus, any entries in the d locations are stored in the d (diagonal)
1904    submatrix, and any entries in the o locations are stored in the
1905    o (off-diagonal) submatrix.  Note that the d matrix is stored in
1906    MatSeqSBAIJ format and the o submatrix in MATSEQBAIJ format.
1907 
1908    Now d_nz should indicate the number of block nonzeros per row in the upper triangular
1909    plus the diagonal part of the d matrix,
1910    and o_nz should indicate the number of block nonzeros per row in the o matrix.
1911    In general, for PDE problems in which most nonzeros are near the diagonal,
1912    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
1913    or you will get TERRIBLE performance; see the users' manual chapter on
1914    matrices.
1915 
1916    Level: intermediate
1917 
1918 .keywords: matrix, block, aij, compressed row, sparse, parallel
1919 
1920 .seealso: MatCreate(), MatCreateSeqSBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
1921 @*/
1922 
1923 int MatCreateMPISBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[],Mat *A)
1924 {
1925   int ierr,size;
1926 
1927   PetscFunctionBegin;
1928   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
1929   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1930   if (size > 1) {
1931     ierr = MatSetType(*A,MATMPISBAIJ);CHKERRQ(ierr);
1932     ierr = MatMPISBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
1933   } else {
1934     ierr = MatSetType(*A,MATSEQSBAIJ);CHKERRQ(ierr);
1935     ierr = MatSeqSBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
1936   }
1937   PetscFunctionReturn(0);
1938 }
1939 
1940 
1941 #undef __FUNCT__
1942 #define __FUNCT__ "MatDuplicate_MPISBAIJ"
1943 static int MatDuplicate_MPISBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
1944 {
1945   Mat          mat;
1946   Mat_MPISBAIJ *a,*oldmat = (Mat_MPISBAIJ*)matin->data;
1947   int          ierr,len=0;
1948 
1949   PetscFunctionBegin;
1950   *newmat       = 0;
1951   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
1952   ierr = MatSetType(mat,MATMPISBAIJ);CHKERRQ(ierr);
1953   mat->preallocated = PETSC_TRUE;
1954   a = (Mat_MPISBAIJ*)mat->data;
1955   a->bs  = oldmat->bs;
1956   a->bs2 = oldmat->bs2;
1957   a->mbs = oldmat->mbs;
1958   a->nbs = oldmat->nbs;
1959   a->Mbs = oldmat->Mbs;
1960   a->Nbs = oldmat->Nbs;
1961 
1962   a->rstart       = oldmat->rstart;
1963   a->rend         = oldmat->rend;
1964   a->cstart       = oldmat->cstart;
1965   a->cend         = oldmat->cend;
1966   a->size         = oldmat->size;
1967   a->rank         = oldmat->rank;
1968   a->donotstash   = oldmat->donotstash;
1969   a->roworiented  = oldmat->roworiented;
1970   a->rowindices   = 0;
1971   a->rowvalues    = 0;
1972   a->getrowactive = PETSC_FALSE;
1973   a->barray       = 0;
1974   a->rstart_bs    = oldmat->rstart_bs;
1975   a->rend_bs      = oldmat->rend_bs;
1976   a->cstart_bs    = oldmat->cstart_bs;
1977   a->cend_bs      = oldmat->cend_bs;
1978 
1979   /* hash table stuff */
1980   a->ht           = 0;
1981   a->hd           = 0;
1982   a->ht_size      = 0;
1983   a->ht_flag      = oldmat->ht_flag;
1984   a->ht_fact      = oldmat->ht_fact;
1985   a->ht_total_ct  = 0;
1986   a->ht_insert_ct = 0;
1987 
1988   ierr = PetscMalloc(3*(a->size+2)*sizeof(int),&a->rowners);CHKERRQ(ierr);
1989   PetscLogObjectMemory(mat,3*(a->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPISBAIJ));
1990   a->cowners    = a->rowners + a->size + 2;
1991   a->rowners_bs = a->cowners + a->size + 2;
1992   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
1993   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
1994   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
1995   if (oldmat->colmap) {
1996 #if defined (PETSC_USE_CTABLE)
1997     ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
1998 #else
1999     ierr = PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);CHKERRQ(ierr);
2000     PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
2001     ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
2002 #endif
2003   } else a->colmap = 0;
2004   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2005     ierr = PetscMalloc(len*sizeof(int),&a->garray);CHKERRQ(ierr);
2006     PetscLogObjectMemory(mat,len*sizeof(int));
2007     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
2008   } else a->garray = 0;
2009 
2010   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
2011   PetscLogObjectParent(mat,a->lvec);
2012   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
2013 
2014   PetscLogObjectParent(mat,a->Mvctx);
2015   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
2016   PetscLogObjectParent(mat,a->A);
2017   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
2018   PetscLogObjectParent(mat,a->B);
2019   ierr = PetscFListDuplicate(mat->qlist,&matin->qlist);CHKERRQ(ierr);
2020   *newmat = mat;
2021   PetscFunctionReturn(0);
2022 }
2023 
2024 #include "petscsys.h"
2025 
2026 EXTERN_C_BEGIN
2027 #undef __FUNCT__
2028 #define __FUNCT__ "MatLoad_MPISBAIJ"
2029 int MatLoad_MPISBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
2030 {
2031   Mat          A;
2032   int          i,nz,ierr,j,rstart,rend,fd;
2033   PetscScalar  *vals,*buf;
2034   MPI_Comm     comm = ((PetscObject)viewer)->comm;
2035   MPI_Status   status;
2036   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
2037   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
2038   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
2039   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2040   int          dcount,kmax,k,nzcount,tmp;
2041 
2042   PetscFunctionBegin;
2043   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
2044 
2045   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2046   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2047   if (!rank) {
2048     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2049     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
2050     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2051     if (header[3] < 0) {
2052       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPISBAIJ");
2053     }
2054   }
2055 
2056   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2057   M = header[1]; N = header[2];
2058 
2059   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2060 
2061   /*
2062      This code adds extra rows to make sure the number of rows is
2063      divisible by the blocksize
2064   */
2065   Mbs        = M/bs;
2066   extra_rows = bs - M + bs*(Mbs);
2067   if (extra_rows == bs) extra_rows = 0;
2068   else                  Mbs++;
2069   if (extra_rows &&!rank) {
2070     PetscLogInfo(0,"MatLoad_MPISBAIJ:Padding loaded matrix to match blocksize\n");
2071   }
2072 
2073   /* determine ownership of all rows */
2074   mbs        = Mbs/size + ((Mbs % size) > rank);
2075   m          = mbs*bs;
2076   ierr       = PetscMalloc(2*(size+2)*sizeof(int),&rowners);CHKERRQ(ierr);
2077   browners   = rowners + size + 1;
2078   ierr       = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2079   rowners[0] = 0;
2080   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2081   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2082   rstart = rowners[rank];
2083   rend   = rowners[rank+1];
2084 
2085   /* distribute row lengths to all processors */
2086   ierr = PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);CHKERRQ(ierr);
2087   if (!rank) {
2088     ierr = PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);CHKERRQ(ierr);
2089     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2090     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2091     ierr = PetscMalloc(size*sizeof(int),&sndcounts);CHKERRQ(ierr);
2092     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2093     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2094     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2095   } else {
2096     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2097   }
2098 
2099   if (!rank) {   /* procs[0] */
2100     /* calculate the number of nonzeros on each processor */
2101     ierr = PetscMalloc(size*sizeof(int),&procsnz);CHKERRQ(ierr);
2102     ierr = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2103     for (i=0; i<size; i++) {
2104       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2105         procsnz[i] += rowlengths[j];
2106       }
2107     }
2108     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2109 
2110     /* determine max buffer needed and allocate it */
2111     maxnz = 0;
2112     for (i=0; i<size; i++) {
2113       maxnz = PetscMax(maxnz,procsnz[i]);
2114     }
2115     ierr = PetscMalloc(maxnz*sizeof(int),&cols);CHKERRQ(ierr);
2116 
2117     /* read in my part of the matrix column indices  */
2118     nz     = procsnz[0];
2119     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2120     mycols = ibuf;
2121     if (size == 1)  nz -= extra_rows;
2122     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2123     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2124 
2125     /* read in every ones (except the last) and ship off */
2126     for (i=1; i<size-1; i++) {
2127       nz   = procsnz[i];
2128       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2129       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2130     }
2131     /* read in the stuff for the last proc */
2132     if (size != 1) {
2133       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2134       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2135       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2136       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2137     }
2138     ierr = PetscFree(cols);CHKERRQ(ierr);
2139   } else {  /* procs[i], i>0 */
2140     /* determine buffer space needed for message */
2141     nz = 0;
2142     for (i=0; i<m; i++) {
2143       nz += locrowlens[i];
2144     }
2145     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2146     mycols = ibuf;
2147     /* receive message of column indices*/
2148     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2149     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2150     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2151   }
2152 
2153   /* loop over local rows, determining number of off diagonal entries */
2154   ierr     = PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);CHKERRQ(ierr);
2155   odlens   = dlens + (rend-rstart);
2156   ierr     = PetscMalloc(3*Mbs*sizeof(int),&mask);CHKERRQ(ierr);
2157   ierr     = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2158   masked1  = mask    + Mbs;
2159   masked2  = masked1 + Mbs;
2160   rowcount = 0; nzcount = 0;
2161   for (i=0; i<mbs; i++) {
2162     dcount  = 0;
2163     odcount = 0;
2164     for (j=0; j<bs; j++) {
2165       kmax = locrowlens[rowcount];
2166       for (k=0; k<kmax; k++) {
2167         tmp = mycols[nzcount++]/bs; /* block col. index */
2168         if (!mask[tmp]) {
2169           mask[tmp] = 1;
2170           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp; /* entry in off-diag portion */
2171           else masked1[dcount++] = tmp; /* entry in diag portion */
2172         }
2173       }
2174       rowcount++;
2175     }
2176 
2177     dlens[i]  = dcount;  /* d_nzz[i] */
2178     odlens[i] = odcount; /* o_nzz[i] */
2179 
2180     /* zero out the mask elements we set */
2181     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2182     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2183   }
2184 
2185   /* create our matrix */
2186   ierr = MatCreate(comm,m,m,PETSC_DETERMINE,PETSC_DETERMINE,&A);CHKERRQ(ierr);
2187   ierr = MatSetType(A,type);CHKERRQ(ierr);
2188   ierr = MatMPISBAIJSetPreallocation(A,bs,0,dlens,0,odlens);CHKERRQ(ierr);
2189   ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr);
2190 
2191   if (!rank) {
2192     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2193     /* read in my part of the matrix numerical values  */
2194     nz = procsnz[0];
2195     vals = buf;
2196     mycols = ibuf;
2197     if (size == 1)  nz -= extra_rows;
2198     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2199     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2200 
2201     /* insert into matrix */
2202     jj      = rstart*bs;
2203     for (i=0; i<m; i++) {
2204       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2205       mycols += locrowlens[i];
2206       vals   += locrowlens[i];
2207       jj++;
2208     }
2209 
2210     /* read in other processors (except the last one) and ship out */
2211     for (i=1; i<size-1; i++) {
2212       nz   = procsnz[i];
2213       vals = buf;
2214       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2215       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2216     }
2217     /* the last proc */
2218     if (size != 1){
2219       nz   = procsnz[i] - extra_rows;
2220       vals = buf;
2221       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2222       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2223       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2224     }
2225     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2226 
2227   } else {
2228     /* receive numeric values */
2229     ierr = PetscMalloc(nz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2230 
2231     /* receive message of values*/
2232     vals   = buf;
2233     mycols = ibuf;
2234     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2235     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2236     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2237 
2238     /* insert into matrix */
2239     jj      = rstart*bs;
2240     for (i=0; i<m; i++) {
2241       ierr    = MatSetValues_MPISBAIJ(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2242       mycols += locrowlens[i];
2243       vals   += locrowlens[i];
2244       jj++;
2245     }
2246   }
2247 
2248   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2249   ierr = PetscFree(buf);CHKERRQ(ierr);
2250   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2251   ierr = PetscFree(rowners);CHKERRQ(ierr);
2252   ierr = PetscFree(dlens);CHKERRQ(ierr);
2253   ierr = PetscFree(mask);CHKERRQ(ierr);
2254   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2255   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2256   *newmat = A;
2257   PetscFunctionReturn(0);
2258 }
2259 EXTERN_C_END
2260 
2261 #undef __FUNCT__
2262 #define __FUNCT__ "MatMPISBAIJSetHashTableFactor"
2263 /*@
2264    MatMPISBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2265 
2266    Input Parameters:
2267 .  mat  - the matrix
2268 .  fact - factor
2269 
2270    Collective on Mat
2271 
2272    Level: advanced
2273 
2274   Notes:
2275    This can also be set by the command line option: -mat_use_hash_table fact
2276 
2277 .keywords: matrix, hashtable, factor, HT
2278 
2279 .seealso: MatSetOption()
2280 @*/
2281 int MatMPISBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2282 {
2283   PetscFunctionBegin;
2284   SETERRQ(1,"Function not yet written for SBAIJ format");
2285   /* PetscFunctionReturn(0); */
2286 }
2287 
2288 #undef __FUNCT__
2289 #define __FUNCT__ "MatGetRowMax_MPISBAIJ"
2290 int MatGetRowMax_MPISBAIJ(Mat A,Vec v)
2291 {
2292   Mat_MPISBAIJ *a = (Mat_MPISBAIJ*)A->data;
2293   Mat_SeqBAIJ  *b = (Mat_SeqBAIJ*)(a->B)->data;
2294   PetscReal    atmp;
2295   PetscReal    *work,*svalues,*rvalues;
2296   int          ierr,i,bs,mbs,*bi,*bj,brow,j,ncols,krow,kcol,col,row,Mbs,bcol;
2297   int          rank,size,*rowners_bs,dest,count,source;
2298   PetscScalar  *va;
2299   MatScalar    *ba;
2300   MPI_Status   stat;
2301 
2302   PetscFunctionBegin;
2303   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
2304   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
2305 
2306   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
2307   ierr = MPI_Comm_rank(A->comm,&rank);CHKERRQ(ierr);
2308 
2309   bs   = a->bs;
2310   mbs  = a->mbs;
2311   Mbs  = a->Mbs;
2312   ba   = b->a;
2313   bi   = b->i;
2314   bj   = b->j;
2315   /*
2316   PetscSynchronizedPrintf(A->comm,"[%d] M: %d, bs: %d, mbs: %d \n",rank,bs*Mbs,bs,mbs);
2317   PetscSynchronizedFlush(A->comm);
2318   */
2319 
2320   /* find ownerships */
2321   rowners_bs = a->rowners_bs;
2322   /*
2323   if (!rank){
2324     for (i=0; i<size+1; i++) PetscPrintf(PETSC_COMM_SELF," rowners_bs[%d]: %d\n",i,rowners_bs[i]);
2325   }
2326   */
2327 
2328   /* each proc creates an array to be distributed */
2329   ierr = PetscMalloc(bs*Mbs*sizeof(PetscReal),&work);CHKERRQ(ierr);
2330   ierr = PetscMemzero(work,bs*Mbs*sizeof(PetscReal));CHKERRQ(ierr);
2331 
2332   /* row_max for B */
2333   if (rank != size-1){
2334     for (i=0; i<mbs; i++) {
2335       ncols = bi[1] - bi[0]; bi++;
2336       brow  = bs*i;
2337       for (j=0; j<ncols; j++){
2338         bcol = bs*(*bj);
2339         for (kcol=0; kcol<bs; kcol++){
2340           col = bcol + kcol;                 /* local col index */
2341           col += rowners_bs[rank+1];      /* global col index */
2342           /* PetscPrintf(PETSC_COMM_SELF,"[%d], col: %d\n",rank,col); */
2343           for (krow=0; krow<bs; krow++){
2344             atmp = PetscAbsScalar(*ba); ba++;
2345             row = brow + krow;    /* local row index */
2346             /* printf("val[%d,%d]: %g\n",row,col,atmp); */
2347             if (PetscRealPart(va[row]) < atmp) va[row] = atmp;
2348             if (work[col] < atmp) work[col] = atmp;
2349           }
2350         }
2351         bj++;
2352       }
2353     }
2354     /*
2355       PetscPrintf(PETSC_COMM_SELF,"[%d], work: ",rank);
2356       for (i=0; i<bs*Mbs; i++) PetscPrintf(PETSC_COMM_SELF,"%g ",work[i]);
2357       PetscPrintf(PETSC_COMM_SELF,"[%d]: \n");
2358       */
2359 
2360     /* send values to its owners */
2361     for (dest=rank+1; dest<size; dest++){
2362       svalues = work + rowners_bs[dest];
2363       count   = rowners_bs[dest+1]-rowners_bs[dest];
2364       ierr    = MPI_Send(svalues,count,MPIU_REAL,dest,rank,A->comm);CHKERRQ(ierr);
2365       /*
2366       PetscSynchronizedPrintf(A->comm,"[%d] sends %d values to [%d]: %g, %g, %g, %g\n",rank,count,dest,svalues[0],svalues[1],svalues[2],svalues[3]);
2367       PetscSynchronizedFlush(A->comm);
2368       */
2369     }
2370   }
2371 
2372   /* receive values */
2373   if (rank){
2374     rvalues = work;
2375     count   = rowners_bs[rank+1]-rowners_bs[rank];
2376     for (source=0; source<rank; source++){
2377       ierr = MPI_Recv(rvalues,count,MPIU_REAL,MPI_ANY_SOURCE,MPI_ANY_TAG,A->comm,&stat);CHKERRQ(ierr);
2378       /* process values */
2379       for (i=0; i<count; i++){
2380         if (PetscRealPart(va[i]) < rvalues[i]) va[i] = rvalues[i];
2381       }
2382       /*
2383       PetscSynchronizedPrintf(A->comm,"[%d] received %d values from [%d]: %g, %g, %g, %g \n",rank,count,stat.MPI_SOURCE,rvalues[0],rvalues[1],rvalues[2],rvalues[3]);
2384       PetscSynchronizedFlush(A->comm);
2385       */
2386     }
2387   }
2388 
2389   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
2390   ierr = PetscFree(work);CHKERRQ(ierr);
2391   PetscFunctionReturn(0);
2392 }
2393 
2394 #undef __FUNCT__
2395 #define __FUNCT__ "MatRelax_MPISBAIJ"
2396 int MatRelax_MPISBAIJ(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,int its,int lits,Vec xx)
2397 {
2398   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2399   int            ierr,mbs=mat->mbs,bs=mat->bs;
2400   PetscScalar    mone=-1.0,*x,*b,*ptr,zero=0.0;
2401   Vec            bb1;
2402 
2403   PetscFunctionBegin;
2404   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %d and local its %d both positive",its,lits);
2405   if (bs > 1)
2406     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2407 
2408   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2409     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2410       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);CHKERRQ(ierr);
2411       its--;
2412     }
2413 
2414     ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
2415     while (its--){
2416 
2417       /* lower triangular part: slvec0b = - B^T*xx */
2418       ierr = (*mat->B->ops->multtranspose)(mat->B,xx,mat->slvec0b);CHKERRQ(ierr);
2419 
2420       /* copy xx into slvec0a */
2421       ierr = VecGetArray(mat->slvec0,&ptr);CHKERRQ(ierr);
2422       ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
2423       ierr = PetscMemcpy(ptr,x,bs*mbs*sizeof(MatScalar));CHKERRQ(ierr);
2424       ierr = VecRestoreArray(mat->slvec0,&ptr);CHKERRQ(ierr);
2425 
2426       ierr = VecScale(&mone,mat->slvec0);CHKERRQ(ierr);
2427 
2428       /* copy bb into slvec1a */
2429       ierr = VecGetArray(mat->slvec1,&ptr);CHKERRQ(ierr);
2430       ierr = VecGetArray(bb,&b);CHKERRQ(ierr);
2431       ierr = PetscMemcpy(ptr,b,bs*mbs*sizeof(MatScalar));CHKERRQ(ierr);
2432       ierr = VecRestoreArray(mat->slvec1,&ptr);CHKERRQ(ierr);
2433 
2434       /* set slvec1b = 0 */
2435       ierr = VecSet(&zero,mat->slvec1b);CHKERRQ(ierr);
2436 
2437       ierr = VecScatterBegin(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);CHKERRQ(ierr);
2438       ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
2439       ierr = VecRestoreArray(bb,&b);CHKERRQ(ierr);
2440       ierr = VecScatterEnd(mat->slvec0,mat->slvec1,ADD_VALUES,SCATTER_FORWARD,mat->sMvctx);CHKERRQ(ierr);
2441 
2442       /* upper triangular part: bb1 = bb1 - B*x */
2443       ierr = (*mat->B->ops->multadd)(mat->B,mat->slvec1b,mat->slvec1a,bb1);CHKERRQ(ierr);
2444 
2445       /* local diagonal sweep */
2446       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);CHKERRQ(ierr);
2447     }
2448     ierr = VecDestroy(bb1);CHKERRQ(ierr);
2449   } else {
2450     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2451   }
2452   PetscFunctionReturn(0);
2453 }
2454 
2455 #undef __FUNCT__
2456 #define __FUNCT__ "MatRelax_MPISBAIJ_2comm"
2457 int MatRelax_MPISBAIJ_2comm(Mat matin,Vec bb,PetscReal omega,MatSORType flag,PetscReal fshift,int its,int lits,Vec xx)
2458 {
2459   Mat_MPISBAIJ   *mat = (Mat_MPISBAIJ*)matin->data;
2460   int            ierr;
2461   PetscScalar    mone=-1.0;
2462   Vec            lvec1,bb1;
2463 
2464   PetscFunctionBegin;
2465   if (its <= 0 || lits <= 0) SETERRQ2(PETSC_ERR_ARG_WRONG,"Relaxation requires global its %d and local its %d both positive",its,lits);
2466   if (mat->bs > 1)
2467     SETERRQ(PETSC_ERR_SUP,"SSOR for block size > 1 is not yet implemented");
2468 
2469   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP){
2470     if ( flag & SOR_ZERO_INITIAL_GUESS ) {
2471       ierr = (*mat->A->ops->relax)(mat->A,bb,omega,flag,fshift,lits,lits,xx);CHKERRQ(ierr);
2472       its--;
2473     }
2474 
2475     ierr = VecDuplicate(mat->lvec,&lvec1);CHKERRQ(ierr);
2476     ierr = VecDuplicate(bb,&bb1);CHKERRQ(ierr);
2477     while (its--){
2478       ierr = VecScatterBegin(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);CHKERRQ(ierr);
2479 
2480       /* lower diagonal part: bb1 = bb - B^T*xx */
2481       ierr = (*mat->B->ops->multtranspose)(mat->B,xx,lvec1);CHKERRQ(ierr);
2482       ierr = VecScale(&mone,lvec1);CHKERRQ(ierr);
2483 
2484       ierr = VecScatterEnd(xx,mat->lvec,INSERT_VALUES,SCATTER_FORWARD,mat->Mvctx);CHKERRQ(ierr);
2485       ierr = VecCopy(bb,bb1);CHKERRQ(ierr);
2486       ierr = VecScatterBegin(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);CHKERRQ(ierr);
2487 
2488       /* upper diagonal part: bb1 = bb1 - B*x */
2489       ierr = VecScale(&mone,mat->lvec);CHKERRQ(ierr);
2490       ierr = (*mat->B->ops->multadd)(mat->B,mat->lvec,bb1,bb1);CHKERRQ(ierr);
2491 
2492       ierr = VecScatterEnd(lvec1,bb1,ADD_VALUES,SCATTER_REVERSE,mat->Mvctx);CHKERRQ(ierr);
2493 
2494       /* diagonal sweep */
2495       ierr = (*mat->A->ops->relax)(mat->A,bb1,omega,SOR_SYMMETRIC_SWEEP,fshift,lits,lits,xx);CHKERRQ(ierr);
2496     }
2497     ierr = VecDestroy(lvec1);CHKERRQ(ierr);
2498     ierr = VecDestroy(bb1);CHKERRQ(ierr);
2499   } else {
2500     SETERRQ(PETSC_ERR_SUP,"MatSORType is not supported for SBAIJ matrix format");
2501   }
2502   PetscFunctionReturn(0);
2503 }
2504 
2505