xref: /petsc/src/mat/impls/baij/mpi/mpibaij.c (revision 2aa32adbb5e8a0c7c8984d1934cdfb31775e9c6d)
1 /*$Id: mpibaij.c,v 1.234 2001/09/25 22:56:49 balay Exp $*/
2 
3 #include "src/mat/impls/baij/mpi/mpibaij.h"   /*I  "petscmat.h"  I*/
4 #include "src/vec/vecimpl.h"
5 
6 EXTERN int MatSetUpMultiply_MPIBAIJ(Mat);
7 EXTERN int DisAssemble_MPIBAIJ(Mat);
8 EXTERN int MatIncreaseOverlap_MPIBAIJ(Mat,int,IS[],int);
9 EXTERN int MatGetSubMatrices_MPIBAIJ(Mat,int,const IS[],const IS[],MatReuse,Mat *[]);
10 EXTERN int MatGetValues_SeqBAIJ(Mat,int,const int[],int,const int [],PetscScalar []);
11 EXTERN int MatSetValues_SeqBAIJ(Mat,int,const int[],int,const int [],const PetscScalar [],InsertMode);
12 EXTERN int MatSetValuesBlocked_SeqBAIJ(Mat,int,const int[],int,const int[],const PetscScalar[],InsertMode);
13 EXTERN int MatGetRow_SeqBAIJ(Mat,int,int*,int*[],PetscScalar*[]);
14 EXTERN int MatRestoreRow_SeqBAIJ(Mat,int,int*,int*[],PetscScalar*[]);
15 EXTERN int MatPrintHelp_SeqBAIJ(Mat);
16 EXTERN int MatZeroRows_SeqBAIJ(Mat,IS,const PetscScalar*);
17 
18 /*  UGLY, ugly, ugly
19    When MatScalar == PetscScalar the function MatSetValuesBlocked_MPIBAIJ_MatScalar() does
20    not exist. Otherwise ..._MatScalar() takes matrix elements in single precision and
21    inserts them into the single precision data structure. The function MatSetValuesBlocked_MPIBAIJ()
22    converts the entries into single precision and then calls ..._MatScalar() to put them
23    into the single precision data structures.
24 */
25 #if defined(PETSC_USE_MAT_SINGLE)
26 EXTERN int MatSetValuesBlocked_SeqBAIJ_MatScalar(Mat,int,const int*,int,const int*,const MatScalar*,InsertMode);
27 EXTERN int MatSetValues_MPIBAIJ_MatScalar(Mat,int,const int*,int,const int*,const MatScalar*,InsertMode);
28 EXTERN int MatSetValuesBlocked_MPIBAIJ_MatScalar(Mat,int,const int*,int,const int*,const MatScalar*,InsertMode);
29 EXTERN int MatSetValues_MPIBAIJ_HT_MatScalar(Mat,int,const int*,int,const int*,const MatScalar*,InsertMode);
30 EXTERN int MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(Mat,int,const int*,int,const int*,const MatScalar*,InsertMode);
31 #else
32 #define MatSetValuesBlocked_SeqBAIJ_MatScalar      MatSetValuesBlocked_SeqBAIJ
33 #define MatSetValues_MPIBAIJ_MatScalar             MatSetValues_MPIBAIJ
34 #define MatSetValuesBlocked_MPIBAIJ_MatScalar      MatSetValuesBlocked_MPIBAIJ
35 #define MatSetValues_MPIBAIJ_HT_MatScalar          MatSetValues_MPIBAIJ_HT
36 #define MatSetValuesBlocked_MPIBAIJ_HT_MatScalar   MatSetValuesBlocked_MPIBAIJ_HT
37 #endif
38 
39 #undef __FUNCT__
40 #define __FUNCT__ "MatGetRowMax_MPIBAIJ"
41 int MatGetRowMax_MPIBAIJ(Mat A,Vec v)
42 {
43   Mat_MPIBAIJ  *a = (Mat_MPIBAIJ*)A->data;
44   int          ierr,i;
45   PetscScalar  *va,*vb;
46   Vec          vtmp;
47 
48   PetscFunctionBegin;
49 
50   ierr = MatGetRowMax(a->A,v);CHKERRQ(ierr);
51   ierr = VecGetArray(v,&va);CHKERRQ(ierr);
52 
53   ierr = VecCreateSeq(PETSC_COMM_SELF,A->m,&vtmp);CHKERRQ(ierr);
54   ierr = MatGetRowMax(a->B,vtmp);CHKERRQ(ierr);
55   ierr = VecGetArray(vtmp,&vb);CHKERRQ(ierr);
56 
57   for (i=0; i<A->m; i++){
58     if (PetscAbsScalar(va[i]) < PetscAbsScalar(vb[i])) va[i] = vb[i];
59   }
60 
61   ierr = VecRestoreArray(v,&va);CHKERRQ(ierr);
62   ierr = VecRestoreArray(vtmp,&vb);CHKERRQ(ierr);
63   ierr = VecDestroy(vtmp);CHKERRQ(ierr);
64 
65   PetscFunctionReturn(0);
66 }
67 
68 EXTERN_C_BEGIN
69 #undef __FUNCT__
70 #define __FUNCT__ "MatStoreValues_MPIBAIJ"
71 int MatStoreValues_MPIBAIJ(Mat mat)
72 {
73   Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
74   int         ierr;
75 
76   PetscFunctionBegin;
77   ierr = MatStoreValues(aij->A);CHKERRQ(ierr);
78   ierr = MatStoreValues(aij->B);CHKERRQ(ierr);
79   PetscFunctionReturn(0);
80 }
81 EXTERN_C_END
82 
83 EXTERN_C_BEGIN
84 #undef __FUNCT__
85 #define __FUNCT__ "MatRetrieveValues_MPIBAIJ"
86 int MatRetrieveValues_MPIBAIJ(Mat mat)
87 {
88   Mat_MPIBAIJ *aij = (Mat_MPIBAIJ *)mat->data;
89   int         ierr;
90 
91   PetscFunctionBegin;
92   ierr = MatRetrieveValues(aij->A);CHKERRQ(ierr);
93   ierr = MatRetrieveValues(aij->B);CHKERRQ(ierr);
94   PetscFunctionReturn(0);
95 }
96 EXTERN_C_END
97 
98 /*
99      Local utility routine that creates a mapping from the global column
100    number to the local number in the off-diagonal part of the local
101    storage of the matrix.  This is done in a non scable way since the
102    length of colmap equals the global matrix length.
103 */
104 #undef __FUNCT__
105 #define __FUNCT__ "CreateColmap_MPIBAIJ_Private"
106 static int CreateColmap_MPIBAIJ_Private(Mat mat)
107 {
108   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
109   Mat_SeqBAIJ *B = (Mat_SeqBAIJ*)baij->B->data;
110   int         nbs = B->nbs,i,bs=B->bs,ierr;
111 
112   PetscFunctionBegin;
113 #if defined (PETSC_USE_CTABLE)
114   ierr = PetscTableCreate(baij->nbs,&baij->colmap);CHKERRQ(ierr);
115   for (i=0; i<nbs; i++){
116     ierr = PetscTableAdd(baij->colmap,baij->garray[i]+1,i*bs+1);CHKERRQ(ierr);
117   }
118 #else
119   ierr = PetscMalloc((baij->Nbs+1)*sizeof(int),&baij->colmap);CHKERRQ(ierr);
120   PetscLogObjectMemory(mat,baij->Nbs*sizeof(int));
121   ierr = PetscMemzero(baij->colmap,baij->Nbs*sizeof(int));CHKERRQ(ierr);
122   for (i=0; i<nbs; i++) baij->colmap[baij->garray[i]] = i*bs+1;
123 #endif
124   PetscFunctionReturn(0);
125 }
126 
127 #define CHUNKSIZE  10
128 
129 #define  MatSetValues_SeqBAIJ_A_Private(row,col,value,addv) \
130 { \
131  \
132     brow = row/bs;  \
133     rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
134     rmax = aimax[brow]; nrow = ailen[brow]; \
135       bcol = col/bs; \
136       ridx = row % bs; cidx = col % bs; \
137       low = 0; high = nrow; \
138       while (high-low > 3) { \
139         t = (low+high)/2; \
140         if (rp[t] > bcol) high = t; \
141         else              low  = t; \
142       } \
143       for (_i=low; _i<high; _i++) { \
144         if (rp[_i] > bcol) break; \
145         if (rp[_i] == bcol) { \
146           bap  = ap +  bs2*_i + bs*cidx + ridx; \
147           if (addv == ADD_VALUES) *bap += value;  \
148           else                    *bap  = value;  \
149           goto a_noinsert; \
150         } \
151       } \
152       if (a->nonew == 1) goto a_noinsert; \
153       else if (a->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) into matrix", row, col); \
154       if (nrow >= rmax) { \
155         /* there is no extra room in row, therefore enlarge */ \
156         int       new_nz = ai[a->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
157         MatScalar *new_a; \
158  \
159         if (a->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) in the matrix", row, col); \
160  \
161         /* malloc new storage space */ \
162         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(a->mbs+1)*sizeof(int); \
163         ierr = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
164         new_j   = (int*)(new_a + bs2*new_nz); \
165         new_i   = new_j + new_nz; \
166  \
167         /* copy over old data into new slots */ \
168         for (ii=0; ii<brow+1; ii++) {new_i[ii] = ai[ii];} \
169         for (ii=brow+1; ii<a->mbs+1; ii++) {new_i[ii] = ai[ii]+CHUNKSIZE;} \
170         ierr = PetscMemcpy(new_j,aj,(ai[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
171         len = (new_nz - CHUNKSIZE - ai[brow] - nrow); \
172         ierr = PetscMemcpy(new_j+ai[brow]+nrow+CHUNKSIZE,aj+ai[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
173         ierr = PetscMemcpy(new_a,aa,(ai[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
174         ierr = PetscMemzero(new_a+bs2*(ai[brow]+nrow),bs2*CHUNKSIZE*sizeof(PetscScalar));CHKERRQ(ierr); \
175         ierr = PetscMemcpy(new_a+bs2*(ai[brow]+nrow+CHUNKSIZE), \
176                     aa+bs2*(ai[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
177         /* free up old matrix storage */ \
178         ierr = PetscFree(a->a);CHKERRQ(ierr);  \
179         if (!a->singlemalloc) { \
180           ierr = PetscFree(a->i);CHKERRQ(ierr); \
181           ierr = PetscFree(a->j);CHKERRQ(ierr);\
182         } \
183         aa = a->a = new_a; ai = a->i = new_i; aj = a->j = new_j;  \
184         a->singlemalloc = PETSC_TRUE; \
185  \
186         rp   = aj + ai[brow]; ap = aa + bs2*ai[brow]; \
187         rmax = aimax[brow] = aimax[brow] + CHUNKSIZE; \
188         PetscLogObjectMemory(A,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
189         a->maxnz += bs2*CHUNKSIZE; \
190         a->reallocs++; \
191         a->nz++; \
192       } \
193       N = nrow++ - 1;  \
194       /* shift up all the later entries in this row */ \
195       for (ii=N; ii>=_i; ii--) { \
196         rp[ii+1] = rp[ii]; \
197         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
198       } \
199       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr); }  \
200       rp[_i]                      = bcol;  \
201       ap[bs2*_i + bs*cidx + ridx] = value;  \
202       a_noinsert:; \
203     ailen[brow] = nrow; \
204 }
205 
206 #define  MatSetValues_SeqBAIJ_B_Private(row,col,value,addv) \
207 { \
208     brow = row/bs;  \
209     rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
210     rmax = bimax[brow]; nrow = bilen[brow]; \
211       bcol = col/bs; \
212       ridx = row % bs; cidx = col % bs; \
213       low = 0; high = nrow; \
214       while (high-low > 3) { \
215         t = (low+high)/2; \
216         if (rp[t] > bcol) high = t; \
217         else              low  = t; \
218       } \
219       for (_i=low; _i<high; _i++) { \
220         if (rp[_i] > bcol) break; \
221         if (rp[_i] == bcol) { \
222           bap  = ap +  bs2*_i + bs*cidx + ridx; \
223           if (addv == ADD_VALUES) *bap += value;  \
224           else                    *bap  = value;  \
225           goto b_noinsert; \
226         } \
227       } \
228       if (b->nonew == 1) goto b_noinsert; \
229       else if (b->nonew == -1) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) into matrix", row, col); \
230       if (nrow >= rmax) { \
231         /* there is no extra room in row, therefore enlarge */ \
232         int       new_nz = bi[b->mbs] + CHUNKSIZE,len,*new_i,*new_j; \
233         MatScalar *new_a; \
234  \
235         if (b->nonew == -2) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Inserting a new nonzero (%d, %d) in the matrix", row, col); \
236  \
237         /* malloc new storage space */ \
238         len     = new_nz*(sizeof(int)+bs2*sizeof(MatScalar))+(b->mbs+1)*sizeof(int); \
239         ierr    = PetscMalloc(len,&new_a);CHKERRQ(ierr); \
240         new_j   = (int*)(new_a + bs2*new_nz); \
241         new_i   = new_j + new_nz; \
242  \
243         /* copy over old data into new slots */ \
244         for (ii=0; ii<brow+1; ii++) {new_i[ii] = bi[ii];} \
245         for (ii=brow+1; ii<b->mbs+1; ii++) {new_i[ii] = bi[ii]+CHUNKSIZE;} \
246         ierr = PetscMemcpy(new_j,bj,(bi[brow]+nrow)*sizeof(int));CHKERRQ(ierr); \
247         len  = (new_nz - CHUNKSIZE - bi[brow] - nrow); \
248         ierr = PetscMemcpy(new_j+bi[brow]+nrow+CHUNKSIZE,bj+bi[brow]+nrow,len*sizeof(int));CHKERRQ(ierr); \
249         ierr = PetscMemcpy(new_a,ba,(bi[brow]+nrow)*bs2*sizeof(MatScalar));CHKERRQ(ierr); \
250         ierr = PetscMemzero(new_a+bs2*(bi[brow]+nrow),bs2*CHUNKSIZE*sizeof(MatScalar));CHKERRQ(ierr); \
251         ierr = PetscMemcpy(new_a+bs2*(bi[brow]+nrow+CHUNKSIZE), \
252                     ba+bs2*(bi[brow]+nrow),bs2*len*sizeof(MatScalar));CHKERRQ(ierr);  \
253         /* free up old matrix storage */ \
254         ierr = PetscFree(b->a);CHKERRQ(ierr);  \
255         if (!b->singlemalloc) { \
256           ierr = PetscFree(b->i);CHKERRQ(ierr); \
257           ierr = PetscFree(b->j);CHKERRQ(ierr); \
258         } \
259         ba = b->a = new_a; bi = b->i = new_i; bj = b->j = new_j;  \
260         b->singlemalloc = PETSC_TRUE; \
261  \
262         rp   = bj + bi[brow]; ap = ba + bs2*bi[brow]; \
263         rmax = bimax[brow] = bimax[brow] + CHUNKSIZE; \
264         PetscLogObjectMemory(B,CHUNKSIZE*(sizeof(int) + bs2*sizeof(MatScalar))); \
265         b->maxnz += bs2*CHUNKSIZE; \
266         b->reallocs++; \
267         b->nz++; \
268       } \
269       N = nrow++ - 1;  \
270       /* shift up all the later entries in this row */ \
271       for (ii=N; ii>=_i; ii--) { \
272         rp[ii+1] = rp[ii]; \
273         ierr = PetscMemcpy(ap+bs2*(ii+1),ap+bs2*(ii),bs2*sizeof(MatScalar));CHKERRQ(ierr); \
274       } \
275       if (N>=_i) { ierr = PetscMemzero(ap+bs2*_i,bs2*sizeof(MatScalar));CHKERRQ(ierr);}  \
276       rp[_i]                      = bcol;  \
277       ap[bs2*_i + bs*cidx + ridx] = value;  \
278       b_noinsert:; \
279     bilen[brow] = nrow; \
280 }
281 
282 #if defined(PETSC_USE_MAT_SINGLE)
283 #undef __FUNCT__
284 #define __FUNCT__ "MatSetValues_MPIBAIJ"
285 int MatSetValues_MPIBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
286 {
287   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
288   int         ierr,i,N = m*n;
289   MatScalar   *vsingle;
290 
291   PetscFunctionBegin;
292   if (N > b->setvalueslen) {
293     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
294     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
295     b->setvalueslen  = N;
296   }
297   vsingle = b->setvaluescopy;
298 
299   for (i=0; i<N; i++) {
300     vsingle[i] = v[i];
301   }
302   ierr = MatSetValues_MPIBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
303   PetscFunctionReturn(0);
304 }
305 
306 #undef __FUNCT__
307 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ"
308 int MatSetValuesBlocked_MPIBAIJ(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
309 {
310   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
311   int         ierr,i,N = m*n*b->bs2;
312   MatScalar   *vsingle;
313 
314   PetscFunctionBegin;
315   if (N > b->setvalueslen) {
316     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
317     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
318     b->setvalueslen  = N;
319   }
320   vsingle = b->setvaluescopy;
321   for (i=0; i<N; i++) {
322     vsingle[i] = v[i];
323   }
324   ierr = MatSetValuesBlocked_MPIBAIJ_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
325   PetscFunctionReturn(0);
326 }
327 
328 #undef __FUNCT__
329 #define __FUNCT__ "MatSetValues_MPIBAIJ_HT"
330 int MatSetValues_MPIBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
331 {
332   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
333   int         ierr,i,N = m*n;
334   MatScalar   *vsingle;
335 
336   PetscFunctionBegin;
337   if (N > b->setvalueslen) {
338     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
339     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
340     b->setvalueslen  = N;
341   }
342   vsingle = b->setvaluescopy;
343   for (i=0; i<N; i++) {
344     vsingle[i] = v[i];
345   }
346   ierr = MatSetValues_MPIBAIJ_HT_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
347   PetscFunctionReturn(0);
348 }
349 
350 #undef __FUNCT__
351 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ_HT"
352 int MatSetValuesBlocked_MPIBAIJ_HT(Mat mat,int m,const int im[],int n,const int in[],const PetscScalar v[],InsertMode addv)
353 {
354   Mat_MPIBAIJ *b = (Mat_MPIBAIJ*)mat->data;
355   int         ierr,i,N = m*n*b->bs2;
356   MatScalar   *vsingle;
357 
358   PetscFunctionBegin;
359   if (N > b->setvalueslen) {
360     if (b->setvaluescopy) {ierr = PetscFree(b->setvaluescopy);CHKERRQ(ierr);}
361     ierr = PetscMalloc(N*sizeof(MatScalar),&b->setvaluescopy);CHKERRQ(ierr);
362     b->setvalueslen  = N;
363   }
364   vsingle = b->setvaluescopy;
365   for (i=0; i<N; i++) {
366     vsingle[i] = v[i];
367   }
368   ierr = MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(mat,m,im,n,in,vsingle,addv);CHKERRQ(ierr);
369   PetscFunctionReturn(0);
370 }
371 #endif
372 
373 #undef __FUNCT__
374 #define __FUNCT__ "MatSetValues_MPIBAIJ_MatScalar"
375 int MatSetValues_MPIBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
376 {
377   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
378   MatScalar   value;
379   PetscTruth  roworiented = baij->roworiented;
380   int         ierr,i,j,row,col;
381   int         rstart_orig=baij->rstart_bs;
382   int         rend_orig=baij->rend_bs,cstart_orig=baij->cstart_bs;
383   int         cend_orig=baij->cend_bs,bs=baij->bs;
384 
385   /* Some Variables required in the macro */
386   Mat         A = baij->A;
387   Mat_SeqBAIJ *a = (Mat_SeqBAIJ*)(A)->data;
388   int         *aimax=a->imax,*ai=a->i,*ailen=a->ilen,*aj=a->j;
389   MatScalar   *aa=a->a;
390 
391   Mat         B = baij->B;
392   Mat_SeqBAIJ *b = (Mat_SeqBAIJ*)(B)->data;
393   int         *bimax=b->imax,*bi=b->i,*bilen=b->ilen,*bj=b->j;
394   MatScalar   *ba=b->a;
395 
396   int         *rp,ii,nrow,_i,rmax,N,brow,bcol;
397   int         low,high,t,ridx,cidx,bs2=a->bs2;
398   MatScalar   *ap,*bap;
399 
400   PetscFunctionBegin;
401   for (i=0; i<m; i++) {
402     if (im[i] < 0) continue;
403 #if defined(PETSC_USE_BOPT_g)
404     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],mat->M-1);
405 #endif
406     if (im[i] >= rstart_orig && im[i] < rend_orig) {
407       row = im[i] - rstart_orig;
408       for (j=0; j<n; j++) {
409         if (in[j] >= cstart_orig && in[j] < cend_orig){
410           col = in[j] - cstart_orig;
411           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
412           MatSetValues_SeqBAIJ_A_Private(row,col,value,addv);
413           /* ierr = MatSetValues_SeqBAIJ(baij->A,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
414         } else if (in[j] < 0) continue;
415 #if defined(PETSC_USE_BOPT_g)
416         else if (in[j] >= mat->N) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",in[i],mat->N-1);}
417 #endif
418         else {
419           if (mat->was_assembled) {
420             if (!baij->colmap) {
421               ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr);
422             }
423 #if defined (PETSC_USE_CTABLE)
424             ierr = PetscTableFind(baij->colmap,in[j]/bs + 1,&col);CHKERRQ(ierr);
425             col  = col - 1;
426 #else
427             col = baij->colmap[in[j]/bs] - 1;
428 #endif
429             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
430               ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr);
431               col =  in[j];
432               /* Reinitialize the variables required by MatSetValues_SeqBAIJ_B_Private() */
433               B = baij->B;
434               b = (Mat_SeqBAIJ*)(B)->data;
435               bimax=b->imax;bi=b->i;bilen=b->ilen;bj=b->j;
436               ba=b->a;
437             } else col += in[j]%bs;
438           } else col = in[j];
439           if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
440           MatSetValues_SeqBAIJ_B_Private(row,col,value,addv);
441           /* ierr = MatSetValues_SeqBAIJ(baij->B,1,&row,1,&col,&value,addv);CHKERRQ(ierr); */
442         }
443       }
444     } else {
445       if (!baij->donotstash) {
446         if (roworiented) {
447           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
448         } else {
449           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
450         }
451       }
452     }
453   }
454   PetscFunctionReturn(0);
455 }
456 
457 #undef __FUNCT__
458 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ_MatScalar"
459 int MatSetValuesBlocked_MPIBAIJ_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
460 {
461   Mat_MPIBAIJ     *baij = (Mat_MPIBAIJ*)mat->data;
462   const MatScalar *value;
463   MatScalar       *barray=baij->barray;
464   PetscTruth      roworiented = baij->roworiented;
465   int             ierr,i,j,ii,jj,row,col,rstart=baij->rstart;
466   int             rend=baij->rend,cstart=baij->cstart,stepval;
467   int             cend=baij->cend,bs=baij->bs,bs2=baij->bs2;
468 
469   PetscFunctionBegin;
470   if(!barray) {
471     ierr         = PetscMalloc(bs2*sizeof(MatScalar),&barray);CHKERRQ(ierr);
472     baij->barray = barray;
473   }
474 
475   if (roworiented) {
476     stepval = (n-1)*bs;
477   } else {
478     stepval = (m-1)*bs;
479   }
480   for (i=0; i<m; i++) {
481     if (im[i] < 0) continue;
482 #if defined(PETSC_USE_BOPT_g)
483     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large, row %d max %d",im[i],baij->Mbs-1);
484 #endif
485     if (im[i] >= rstart && im[i] < rend) {
486       row = im[i] - rstart;
487       for (j=0; j<n; j++) {
488         /* If NumCol = 1 then a copy is not required */
489         if ((roworiented) && (n == 1)) {
490           barray = (MatScalar*)v + i*bs2;
491         } else if((!roworiented) && (m == 1)) {
492           barray = (MatScalar*)v + j*bs2;
493         } else { /* Here a copy is required */
494           if (roworiented) {
495             value = v + i*(stepval+bs)*bs + j*bs;
496           } else {
497             value = v + j*(stepval+bs)*bs + i*bs;
498           }
499           for (ii=0; ii<bs; ii++,value+=stepval) {
500             for (jj=0; jj<bs; jj++) {
501               *barray++  = *value++;
502             }
503           }
504           barray -=bs2;
505         }
506 
507         if (in[j] >= cstart && in[j] < cend){
508           col  = in[j] - cstart;
509           ierr = MatSetValuesBlocked_SeqBAIJ_MatScalar(baij->A,1,&row,1,&col,barray,addv);CHKERRQ(ierr);
510         }
511         else if (in[j] < 0) continue;
512 #if defined(PETSC_USE_BOPT_g)
513         else if (in[j] >= baij->Nbs) {SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large, col %d max %d",in[j],baij->Nbs-1);}
514 #endif
515         else {
516           if (mat->was_assembled) {
517             if (!baij->colmap) {
518               ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr);
519             }
520 
521 #if defined(PETSC_USE_BOPT_g)
522 #if defined (PETSC_USE_CTABLE)
523             { int data;
524               ierr = PetscTableFind(baij->colmap,in[j]+1,&data);CHKERRQ(ierr);
525               if ((data - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
526             }
527 #else
528             if ((baij->colmap[in[j]] - 1) % bs) SETERRQ(PETSC_ERR_PLIB,"Incorrect colmap");
529 #endif
530 #endif
531 #if defined (PETSC_USE_CTABLE)
532 	    ierr = PetscTableFind(baij->colmap,in[j]+1,&col);CHKERRQ(ierr);
533             col  = (col - 1)/bs;
534 #else
535             col = (baij->colmap[in[j]] - 1)/bs;
536 #endif
537             if (col < 0 && !((Mat_SeqBAIJ*)(baij->A->data))->nonew) {
538               ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr);
539               col =  in[j];
540             }
541           }
542           else col = in[j];
543           ierr = MatSetValuesBlocked_SeqBAIJ_MatScalar(baij->B,1,&row,1,&col,barray,addv);CHKERRQ(ierr);
544         }
545       }
546     } else {
547       if (!baij->donotstash) {
548         if (roworiented) {
549           ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
550         } else {
551           ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
552         }
553       }
554     }
555   }
556   PetscFunctionReturn(0);
557 }
558 
559 #define HASH_KEY 0.6180339887
560 #define HASH(size,key,tmp) (tmp = (key)*HASH_KEY,(int)((size)*(tmp-(int)tmp)))
561 /* #define HASH(size,key) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
562 /* #define HASH(size,key,tmp) ((int)((size)*fmod(((key)*HASH_KEY),1))) */
563 #undef __FUNCT__
564 #define __FUNCT__ "MatSetValues_MPIBAIJ_HT_MatScalar"
565 int MatSetValues_MPIBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
566 {
567   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
568   PetscTruth  roworiented = baij->roworiented;
569   int         ierr,i,j,row,col;
570   int         rstart_orig=baij->rstart_bs;
571   int         rend_orig=baij->rend_bs,Nbs=baij->Nbs;
572   int         h1,key,size=baij->ht_size,bs=baij->bs,*HT=baij->ht,idx;
573   PetscReal   tmp;
574   MatScalar   **HD = baij->hd,value;
575 #if defined(PETSC_USE_BOPT_g)
576   int         total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
577 #endif
578 
579   PetscFunctionBegin;
580 
581   for (i=0; i<m; i++) {
582 #if defined(PETSC_USE_BOPT_g)
583     if (im[i] < 0) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Negative row");
584     if (im[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],mat->M-1);
585 #endif
586       row = im[i];
587     if (row >= rstart_orig && row < rend_orig) {
588       for (j=0; j<n; j++) {
589         col = in[j];
590         if (roworiented) value = v[i*n+j]; else value = v[i+j*m];
591         /* Look up into the Hash Table */
592         key = (row/bs)*Nbs+(col/bs)+1;
593         h1  = HASH(size,key,tmp);
594 
595 
596         idx = h1;
597 #if defined(PETSC_USE_BOPT_g)
598         insert_ct++;
599         total_ct++;
600         if (HT[idx] != key) {
601           for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
602           if (idx == size) {
603             for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
604             if (idx == h1) {
605               SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%d,%d) has no entry in the hash table", row, col);
606             }
607           }
608         }
609 #else
610         if (HT[idx] != key) {
611           for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
612           if (idx == size) {
613             for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
614             if (idx == h1) {
615               SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%d,%d) has no entry in the hash table", row, col);
616             }
617           }
618         }
619 #endif
620         /* A HASH table entry is found, so insert the values at the correct address */
621         if (addv == ADD_VALUES) *(HD[idx]+ (col % bs)*bs + (row % bs)) += value;
622         else                    *(HD[idx]+ (col % bs)*bs + (row % bs))  = value;
623       }
624     } else {
625       if (!baij->donotstash) {
626         if (roworiented) {
627           ierr = MatStashValuesRow_Private(&mat->stash,im[i],n,in,v+i*n);CHKERRQ(ierr);
628         } else {
629           ierr = MatStashValuesCol_Private(&mat->stash,im[i],n,in,v+i,m);CHKERRQ(ierr);
630         }
631       }
632     }
633   }
634 #if defined(PETSC_USE_BOPT_g)
635   baij->ht_total_ct = total_ct;
636   baij->ht_insert_ct = insert_ct;
637 #endif
638   PetscFunctionReturn(0);
639 }
640 
641 #undef __FUNCT__
642 #define __FUNCT__ "MatSetValuesBlocked_MPIBAIJ_HT_MatScalar"
643 int MatSetValuesBlocked_MPIBAIJ_HT_MatScalar(Mat mat,int m,const int im[],int n,const int in[],const MatScalar v[],InsertMode addv)
644 {
645   Mat_MPIBAIJ     *baij = (Mat_MPIBAIJ*)mat->data;
646   PetscTruth      roworiented = baij->roworiented;
647   int             ierr,i,j,ii,jj,row,col;
648   int             rstart=baij->rstart ;
649   int             rend=baij->rend,stepval,bs=baij->bs,bs2=baij->bs2;
650   int             h1,key,size=baij->ht_size,idx,*HT=baij->ht,Nbs=baij->Nbs;
651   PetscReal       tmp;
652   MatScalar       **HD = baij->hd,*baij_a;
653   const MatScalar *v_t,*value;
654 #if defined(PETSC_USE_BOPT_g)
655   int             total_ct=baij->ht_total_ct,insert_ct=baij->ht_insert_ct;
656 #endif
657 
658   PetscFunctionBegin;
659 
660   if (roworiented) {
661     stepval = (n-1)*bs;
662   } else {
663     stepval = (m-1)*bs;
664   }
665   for (i=0; i<m; i++) {
666 #if defined(PETSC_USE_BOPT_g)
667     if (im[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %d",im[i]);
668     if (im[i] >= baij->Mbs) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",im[i],baij->Mbs-1);
669 #endif
670     row   = im[i];
671     v_t   = v + i*bs2;
672     if (row >= rstart && row < rend) {
673       for (j=0; j<n; j++) {
674         col = in[j];
675 
676         /* Look up into the Hash Table */
677         key = row*Nbs+col+1;
678         h1  = HASH(size,key,tmp);
679 
680         idx = h1;
681 #if defined(PETSC_USE_BOPT_g)
682         total_ct++;
683         insert_ct++;
684        if (HT[idx] != key) {
685           for (idx=h1; (idx<size) && (HT[idx]!=key); idx++,total_ct++);
686           if (idx == size) {
687             for (idx=0; (idx<h1) && (HT[idx]!=key); idx++,total_ct++);
688             if (idx == h1) {
689               SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%d,%d) has no entry in the hash table", row, col);
690             }
691           }
692         }
693 #else
694         if (HT[idx] != key) {
695           for (idx=h1; (idx<size) && (HT[idx]!=key); idx++);
696           if (idx == size) {
697             for (idx=0; (idx<h1) && (HT[idx]!=key); idx++);
698             if (idx == h1) {
699               SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"(%d,%d) has no entry in the hash table", row, col);
700             }
701           }
702         }
703 #endif
704         baij_a = HD[idx];
705         if (roworiented) {
706           /*value = v + i*(stepval+bs)*bs + j*bs;*/
707           /* value = v + (i*(stepval+bs)+j)*bs; */
708           value = v_t;
709           v_t  += bs;
710           if (addv == ADD_VALUES) {
711             for (ii=0; ii<bs; ii++,value+=stepval) {
712               for (jj=ii; jj<bs2; jj+=bs) {
713                 baij_a[jj]  += *value++;
714               }
715             }
716           } else {
717             for (ii=0; ii<bs; ii++,value+=stepval) {
718               for (jj=ii; jj<bs2; jj+=bs) {
719                 baij_a[jj]  = *value++;
720               }
721             }
722           }
723         } else {
724           value = v + j*(stepval+bs)*bs + i*bs;
725           if (addv == ADD_VALUES) {
726             for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
727               for (jj=0; jj<bs; jj++) {
728                 baij_a[jj]  += *value++;
729               }
730             }
731           } else {
732             for (ii=0; ii<bs; ii++,value+=stepval,baij_a+=bs) {
733               for (jj=0; jj<bs; jj++) {
734                 baij_a[jj]  = *value++;
735               }
736             }
737           }
738         }
739       }
740     } else {
741       if (!baij->donotstash) {
742         if (roworiented) {
743           ierr = MatStashValuesRowBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
744         } else {
745           ierr = MatStashValuesColBlocked_Private(&mat->bstash,im[i],n,in,v,m,n,i);CHKERRQ(ierr);
746         }
747       }
748     }
749   }
750 #if defined(PETSC_USE_BOPT_g)
751   baij->ht_total_ct = total_ct;
752   baij->ht_insert_ct = insert_ct;
753 #endif
754   PetscFunctionReturn(0);
755 }
756 
757 #undef __FUNCT__
758 #define __FUNCT__ "MatGetValues_MPIBAIJ"
759 int MatGetValues_MPIBAIJ(Mat mat,int m,const int idxm[],int n,const int idxn[],PetscScalar v[])
760 {
761   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
762   int        bs=baij->bs,ierr,i,j,bsrstart = baij->rstart*bs,bsrend = baij->rend*bs;
763   int        bscstart = baij->cstart*bs,bscend = baij->cend*bs,row,col,data;
764 
765   PetscFunctionBegin;
766   for (i=0; i<m; i++) {
767     if (idxm[i] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative row: %d",idxm[i]);
768     if (idxm[i] >= mat->M) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Row too large: row %d max %d",idxm[i],mat->M-1);
769     if (idxm[i] >= bsrstart && idxm[i] < bsrend) {
770       row = idxm[i] - bsrstart;
771       for (j=0; j<n; j++) {
772         if (idxn[j] < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"Negative column: %d",idxn[j]);
773         if (idxn[j] >= mat->N) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Column too large: col %d max %d",idxn[j],mat->N-1);
774         if (idxn[j] >= bscstart && idxn[j] < bscend){
775           col = idxn[j] - bscstart;
776           ierr = MatGetValues_SeqBAIJ(baij->A,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
777         } else {
778           if (!baij->colmap) {
779             ierr = CreateColmap_MPIBAIJ_Private(mat);CHKERRQ(ierr);
780           }
781 #if defined (PETSC_USE_CTABLE)
782           ierr = PetscTableFind(baij->colmap,idxn[j]/bs+1,&data);CHKERRQ(ierr);
783           data --;
784 #else
785           data = baij->colmap[idxn[j]/bs]-1;
786 #endif
787           if((data < 0) || (baij->garray[data/bs] != idxn[j]/bs)) *(v+i*n+j) = 0.0;
788           else {
789             col  = data + idxn[j]%bs;
790             ierr = MatGetValues_SeqBAIJ(baij->B,1,&row,1,&col,v+i*n+j);CHKERRQ(ierr);
791           }
792         }
793       }
794     } else {
795       SETERRQ(PETSC_ERR_SUP,"Only local values currently supported");
796     }
797   }
798  PetscFunctionReturn(0);
799 }
800 
801 #undef __FUNCT__
802 #define __FUNCT__ "MatNorm_MPIBAIJ"
803 int MatNorm_MPIBAIJ(Mat mat,NormType type,PetscReal *nrm)
804 {
805   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
806   Mat_SeqBAIJ *amat = (Mat_SeqBAIJ*)baij->A->data,*bmat = (Mat_SeqBAIJ*)baij->B->data;
807   int        ierr,i,bs2=baij->bs2;
808   PetscReal  sum = 0.0;
809   MatScalar  *v;
810 
811   PetscFunctionBegin;
812   if (baij->size == 1) {
813     ierr =  MatNorm(baij->A,type,nrm);CHKERRQ(ierr);
814   } else {
815     if (type == NORM_FROBENIUS) {
816       v = amat->a;
817       for (i=0; i<amat->nz*bs2; i++) {
818 #if defined(PETSC_USE_COMPLEX)
819         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
820 #else
821         sum += (*v)*(*v); v++;
822 #endif
823       }
824       v = bmat->a;
825       for (i=0; i<bmat->nz*bs2; i++) {
826 #if defined(PETSC_USE_COMPLEX)
827         sum += PetscRealPart(PetscConj(*v)*(*v)); v++;
828 #else
829         sum += (*v)*(*v); v++;
830 #endif
831       }
832       ierr = MPI_Allreduce(&sum,nrm,1,MPIU_REAL,MPI_SUM,mat->comm);CHKERRQ(ierr);
833       *nrm = sqrt(*nrm);
834     } else {
835       SETERRQ(PETSC_ERR_SUP,"No support for this norm yet");
836     }
837   }
838   PetscFunctionReturn(0);
839 }
840 
841 
842 /*
843   Creates the hash table, and sets the table
844   This table is created only once.
845   If new entried need to be added to the matrix
846   then the hash table has to be destroyed and
847   recreated.
848 */
849 #undef __FUNCT__
850 #define __FUNCT__ "MatCreateHashTable_MPIBAIJ_Private"
851 int MatCreateHashTable_MPIBAIJ_Private(Mat mat,PetscReal factor)
852 {
853   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
854   Mat         A = baij->A,B=baij->B;
855   Mat_SeqBAIJ *a=(Mat_SeqBAIJ *)A->data,*b=(Mat_SeqBAIJ *)B->data;
856   int         i,j,k,nz=a->nz+b->nz,h1,*ai=a->i,*aj=a->j,*bi=b->i,*bj=b->j;
857   int         size,bs2=baij->bs2,rstart=baij->rstart,ierr;
858   int         cstart=baij->cstart,*garray=baij->garray,row,col,Nbs=baij->Nbs;
859   int         *HT,key;
860   MatScalar   **HD;
861   PetscReal   tmp;
862 #if defined(PETSC_USE_BOPT_g)
863   int         ct=0,max=0;
864 #endif
865 
866   PetscFunctionBegin;
867   baij->ht_size=(int)(factor*nz);
868   size = baij->ht_size;
869 
870   if (baij->ht) {
871     PetscFunctionReturn(0);
872   }
873 
874   /* Allocate Memory for Hash Table */
875   ierr     = PetscMalloc((size)*(sizeof(int)+sizeof(MatScalar*))+1,&baij->hd);CHKERRQ(ierr);
876   baij->ht = (int*)(baij->hd + size);
877   HD       = baij->hd;
878   HT       = baij->ht;
879 
880 
881   ierr = PetscMemzero(HD,size*(sizeof(int)+sizeof(PetscScalar*)));CHKERRQ(ierr);
882 
883 
884   /* Loop Over A */
885   for (i=0; i<a->mbs; i++) {
886     for (j=ai[i]; j<ai[i+1]; j++) {
887       row = i+rstart;
888       col = aj[j]+cstart;
889 
890       key = row*Nbs + col + 1;
891       h1  = HASH(size,key,tmp);
892       for (k=0; k<size; k++){
893         if (HT[(h1+k)%size] == 0.0) {
894           HT[(h1+k)%size] = key;
895           HD[(h1+k)%size] = a->a + j*bs2;
896           break;
897 #if defined(PETSC_USE_BOPT_g)
898         } else {
899           ct++;
900 #endif
901         }
902       }
903 #if defined(PETSC_USE_BOPT_g)
904       if (k> max) max = k;
905 #endif
906     }
907   }
908   /* Loop Over B */
909   for (i=0; i<b->mbs; i++) {
910     for (j=bi[i]; j<bi[i+1]; j++) {
911       row = i+rstart;
912       col = garray[bj[j]];
913       key = row*Nbs + col + 1;
914       h1  = HASH(size,key,tmp);
915       for (k=0; k<size; k++){
916         if (HT[(h1+k)%size] == 0.0) {
917           HT[(h1+k)%size] = key;
918           HD[(h1+k)%size] = b->a + j*bs2;
919           break;
920 #if defined(PETSC_USE_BOPT_g)
921         } else {
922           ct++;
923 #endif
924         }
925       }
926 #if defined(PETSC_USE_BOPT_g)
927       if (k> max) max = k;
928 #endif
929     }
930   }
931 
932   /* Print Summary */
933 #if defined(PETSC_USE_BOPT_g)
934   for (i=0,j=0; i<size; i++) {
935     if (HT[i]) {j++;}
936   }
937   PetscLogInfo(0,"MatCreateHashTable_MPIBAIJ_Private: Average Search = %5.2f,max search = %d\n",(j== 0)? 0.0:((PetscReal)(ct+j))/j,max);
938 #endif
939   PetscFunctionReturn(0);
940 }
941 
942 #undef __FUNCT__
943 #define __FUNCT__ "MatAssemblyBegin_MPIBAIJ"
944 int MatAssemblyBegin_MPIBAIJ(Mat mat,MatAssemblyType mode)
945 {
946   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
947   int         ierr,nstash,reallocs;
948   InsertMode  addv;
949 
950   PetscFunctionBegin;
951   if (baij->donotstash) {
952     PetscFunctionReturn(0);
953   }
954 
955   /* make sure all processors are either in INSERTMODE or ADDMODE */
956   ierr = MPI_Allreduce(&mat->insertmode,&addv,1,MPI_INT,MPI_BOR,mat->comm);CHKERRQ(ierr);
957   if (addv == (ADD_VALUES|INSERT_VALUES)) {
958     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Some processors inserted others added");
959   }
960   mat->insertmode = addv; /* in case this processor had no cache */
961 
962   ierr = MatStashScatterBegin_Private(&mat->stash,baij->rowners_bs);CHKERRQ(ierr);
963   ierr = MatStashScatterBegin_Private(&mat->bstash,baij->rowners);CHKERRQ(ierr);
964   ierr = MatStashGetInfo_Private(&mat->stash,&nstash,&reallocs);CHKERRQ(ierr);
965   PetscLogInfo(0,"MatAssemblyBegin_MPIBAIJ:Stash has %d entries,uses %d mallocs.\n",nstash,reallocs);
966   ierr = MatStashGetInfo_Private(&mat->bstash,&nstash,&reallocs);CHKERRQ(ierr);
967   PetscLogInfo(0,"MatAssemblyBegin_MPIBAIJ:Block-Stash has %d entries, uses %d mallocs.\n",nstash,reallocs);
968   PetscFunctionReturn(0);
969 }
970 
971 #undef __FUNCT__
972 #define __FUNCT__ "MatAssemblyEnd_MPIBAIJ"
973 int MatAssemblyEnd_MPIBAIJ(Mat mat,MatAssemblyType mode)
974 {
975   Mat_MPIBAIJ *baij=(Mat_MPIBAIJ*)mat->data;
976   Mat_SeqBAIJ *a=(Mat_SeqBAIJ*)baij->A->data,*b=(Mat_SeqBAIJ*)baij->B->data;
977   int         i,j,rstart,ncols,n,ierr,flg,bs2=baij->bs2;
978   int         *row,*col,other_disassembled;
979   PetscTruth  r1,r2,r3;
980   MatScalar   *val;
981   InsertMode  addv = mat->insertmode;
982 
983   PetscFunctionBegin;
984   if (!baij->donotstash) {
985     while (1) {
986       ierr = MatStashScatterGetMesg_Private(&mat->stash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
987       if (!flg) break;
988 
989       for (i=0; i<n;) {
990         /* Now identify the consecutive vals belonging to the same row */
991         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
992         if (j < n) ncols = j-i;
993         else       ncols = n-i;
994         /* Now assemble all these values with a single function call */
995         ierr = MatSetValues_MPIBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i,addv);CHKERRQ(ierr);
996         i = j;
997       }
998     }
999     ierr = MatStashScatterEnd_Private(&mat->stash);CHKERRQ(ierr);
1000     /* Now process the block-stash. Since the values are stashed column-oriented,
1001        set the roworiented flag to column oriented, and after MatSetValues()
1002        restore the original flags */
1003     r1 = baij->roworiented;
1004     r2 = a->roworiented;
1005     r3 = b->roworiented;
1006     baij->roworiented = PETSC_FALSE;
1007     a->roworiented    = PETSC_FALSE;
1008     b->roworiented    = PETSC_FALSE;
1009     while (1) {
1010       ierr = MatStashScatterGetMesg_Private(&mat->bstash,&n,&row,&col,&val,&flg);CHKERRQ(ierr);
1011       if (!flg) break;
1012 
1013       for (i=0; i<n;) {
1014         /* Now identify the consecutive vals belonging to the same row */
1015         for (j=i,rstart=row[j]; j<n; j++) { if (row[j] != rstart) break; }
1016         if (j < n) ncols = j-i;
1017         else       ncols = n-i;
1018         ierr = MatSetValuesBlocked_MPIBAIJ_MatScalar(mat,1,row+i,ncols,col+i,val+i*bs2,addv);CHKERRQ(ierr);
1019         i = j;
1020       }
1021     }
1022     ierr = MatStashScatterEnd_Private(&mat->bstash);CHKERRQ(ierr);
1023     baij->roworiented = r1;
1024     a->roworiented    = r2;
1025     b->roworiented    = r3;
1026   }
1027 
1028   ierr = MatAssemblyBegin(baij->A,mode);CHKERRQ(ierr);
1029   ierr = MatAssemblyEnd(baij->A,mode);CHKERRQ(ierr);
1030 
1031   /* determine if any processor has disassembled, if so we must
1032      also disassemble ourselfs, in order that we may reassemble. */
1033   /*
1034      if nonzero structure of submatrix B cannot change then we know that
1035      no processor disassembled thus we can skip this stuff
1036   */
1037   if (!((Mat_SeqBAIJ*)baij->B->data)->nonew)  {
1038     ierr = MPI_Allreduce(&mat->was_assembled,&other_disassembled,1,MPI_INT,MPI_PROD,mat->comm);CHKERRQ(ierr);
1039     if (mat->was_assembled && !other_disassembled) {
1040       ierr = DisAssemble_MPIBAIJ(mat);CHKERRQ(ierr);
1041     }
1042   }
1043 
1044   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) {
1045     ierr = MatSetUpMultiply_MPIBAIJ(mat);CHKERRQ(ierr);
1046   }
1047   ierr = MatAssemblyBegin(baij->B,mode);CHKERRQ(ierr);
1048   ierr = MatAssemblyEnd(baij->B,mode);CHKERRQ(ierr);
1049 
1050 #if defined(PETSC_USE_BOPT_g)
1051   if (baij->ht && mode== MAT_FINAL_ASSEMBLY) {
1052     PetscLogInfo(0,"MatAssemblyEnd_MPIBAIJ:Average Hash Table Search in MatSetValues = %5.2f\n",((PetscReal)baij->ht_total_ct)/baij->ht_insert_ct);
1053     baij->ht_total_ct  = 0;
1054     baij->ht_insert_ct = 0;
1055   }
1056 #endif
1057   if (baij->ht_flag && !baij->ht && mode == MAT_FINAL_ASSEMBLY) {
1058     ierr = MatCreateHashTable_MPIBAIJ_Private(mat,baij->ht_fact);CHKERRQ(ierr);
1059     mat->ops->setvalues        = MatSetValues_MPIBAIJ_HT;
1060     mat->ops->setvaluesblocked = MatSetValuesBlocked_MPIBAIJ_HT;
1061   }
1062 
1063   if (baij->rowvalues) {
1064     ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);
1065     baij->rowvalues = 0;
1066   }
1067   PetscFunctionReturn(0);
1068 }
1069 
1070 #undef __FUNCT__
1071 #define __FUNCT__ "MatView_MPIBAIJ_ASCIIorDraworSocket"
1072 static int MatView_MPIBAIJ_ASCIIorDraworSocket(Mat mat,PetscViewer viewer)
1073 {
1074   Mat_MPIBAIJ       *baij = (Mat_MPIBAIJ*)mat->data;
1075   int               ierr,bs = baij->bs,size = baij->size,rank = baij->rank;
1076   PetscTruth        isascii,isdraw;
1077   PetscViewer       sviewer;
1078   PetscViewerFormat format;
1079 
1080   PetscFunctionBegin;
1081   /* printf(" MatView_MPIBAIJ_ASCIIorDraworSocket is called ...\n"); */
1082   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
1083   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1084   if (isascii) {
1085     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
1086     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
1087       MatInfo info;
1088       ierr = MPI_Comm_rank(mat->comm,&rank);CHKERRQ(ierr);
1089       ierr = MatGetInfo(mat,MAT_LOCAL,&info);CHKERRQ(ierr);
1090       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Local rows %d nz %d nz alloced %d bs %d mem %d\n",
1091               rank,mat->m,(int)info.nz_used*bs,(int)info.nz_allocated*bs,
1092               baij->bs,(int)info.memory);CHKERRQ(ierr);
1093       ierr = MatGetInfo(baij->A,MAT_LOCAL,&info);CHKERRQ(ierr);
1094       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] on-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
1095       ierr = MatGetInfo(baij->B,MAT_LOCAL,&info);CHKERRQ(ierr);
1096       ierr = PetscViewerASCIISynchronizedPrintf(viewer,"[%d] off-diagonal part: nz %d \n",rank,(int)info.nz_used*bs);CHKERRQ(ierr);
1097       ierr = PetscViewerFlush(viewer);CHKERRQ(ierr);
1098       ierr = VecScatterView(baij->Mvctx,viewer);CHKERRQ(ierr);
1099       PetscFunctionReturn(0);
1100     } else if (format == PETSC_VIEWER_ASCII_INFO) {
1101       ierr = PetscViewerASCIIPrintf(viewer,"  block size is %d\n",bs);CHKERRQ(ierr);
1102       PetscFunctionReturn(0);
1103     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
1104       PetscFunctionReturn(0);
1105     }
1106   }
1107 
1108   if (isdraw) {
1109     PetscDraw       draw;
1110     PetscTruth isnull;
1111     ierr = PetscViewerDrawGetDraw(viewer,0,&draw);CHKERRQ(ierr);
1112     ierr = PetscDrawIsNull(draw,&isnull);CHKERRQ(ierr); if (isnull) PetscFunctionReturn(0);
1113   }
1114 
1115   if (size == 1) {
1116     ierr = PetscObjectSetName((PetscObject)baij->A,mat->name);CHKERRQ(ierr);
1117     ierr = MatView(baij->A,viewer);CHKERRQ(ierr);
1118   } else {
1119     /* assemble the entire matrix onto first processor. */
1120     Mat         A;
1121     Mat_SeqBAIJ *Aloc;
1122     int         M = mat->M,N = mat->N,*ai,*aj,col,i,j,k,*rvals,mbs = baij->mbs;
1123     MatScalar   *a;
1124 
1125     if (!rank) {
1126       ierr = MatCreateMPIBAIJ(mat->comm,baij->bs,M,N,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
1127     } else {
1128       ierr = MatCreateMPIBAIJ(mat->comm,baij->bs,0,0,M,N,0,PETSC_NULL,0,PETSC_NULL,&A);CHKERRQ(ierr);
1129     }
1130     PetscLogObjectParent(mat,A);
1131 
1132     /* copy over the A part */
1133     Aloc = (Mat_SeqBAIJ*)baij->A->data;
1134     ai   = Aloc->i; aj = Aloc->j; a = Aloc->a;
1135     ierr = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
1136 
1137     for (i=0; i<mbs; i++) {
1138       rvals[0] = bs*(baij->rstart + i);
1139       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1140       for (j=ai[i]; j<ai[i+1]; j++) {
1141         col = (baij->cstart+aj[j])*bs;
1142         for (k=0; k<bs; k++) {
1143           ierr = MatSetValues_MPIBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
1144           col++; a += bs;
1145         }
1146       }
1147     }
1148     /* copy over the B part */
1149     Aloc = (Mat_SeqBAIJ*)baij->B->data;
1150     ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1151     for (i=0; i<mbs; i++) {
1152       rvals[0] = bs*(baij->rstart + i);
1153       for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1154       for (j=ai[i]; j<ai[i+1]; j++) {
1155         col = baij->garray[aj[j]]*bs;
1156         for (k=0; k<bs; k++) {
1157           ierr = MatSetValues_MPIBAIJ_MatScalar(A,bs,rvals,1,&col,a,INSERT_VALUES);CHKERRQ(ierr);
1158           col++; a += bs;
1159         }
1160       }
1161     }
1162     ierr = PetscFree(rvals);CHKERRQ(ierr);
1163     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1164     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1165     /*
1166        Everyone has to call to draw the matrix since the graphics waits are
1167        synchronized across all processors that share the PetscDraw object
1168     */
1169     ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
1170     if (!rank) {
1171       ierr = PetscObjectSetName((PetscObject)((Mat_MPIBAIJ*)(A->data))->A,mat->name);CHKERRQ(ierr);
1172       ierr = MatView(((Mat_MPIBAIJ*)(A->data))->A,sviewer);CHKERRQ(ierr);
1173     }
1174     ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
1175     ierr = MatDestroy(A);CHKERRQ(ierr);
1176   }
1177   PetscFunctionReturn(0);
1178 }
1179 
1180 #undef __FUNCT__
1181 #define __FUNCT__ "MatView_MPIBAIJ"
1182 int MatView_MPIBAIJ(Mat mat,PetscViewer viewer)
1183 {
1184   int        ierr;
1185   PetscTruth isascii,isdraw,issocket,isbinary;
1186 
1187   PetscFunctionBegin;
1188   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);CHKERRQ(ierr);
1189   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_DRAW,&isdraw);CHKERRQ(ierr);
1190   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_SOCKET,&issocket);CHKERRQ(ierr);
1191   ierr = PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_BINARY,&isbinary);CHKERRQ(ierr);
1192   if (isascii || isdraw || issocket || isbinary) {
1193     ierr = MatView_MPIBAIJ_ASCIIorDraworSocket(mat,viewer);CHKERRQ(ierr);
1194   } else {
1195     SETERRQ1(1,"Viewer type %s not supported by MPIBAIJ matrices",((PetscObject)viewer)->type_name);
1196   }
1197   PetscFunctionReturn(0);
1198 }
1199 
1200 #undef __FUNCT__
1201 #define __FUNCT__ "MatDestroy_MPIBAIJ"
1202 int MatDestroy_MPIBAIJ(Mat mat)
1203 {
1204   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1205   int         ierr;
1206 
1207   PetscFunctionBegin;
1208 #if defined(PETSC_USE_LOG)
1209   PetscLogObjectState((PetscObject)mat,"Rows=%d,Cols=%d",mat->M,mat->N);
1210 #endif
1211   ierr = MatStashDestroy_Private(&mat->stash);CHKERRQ(ierr);
1212   ierr = MatStashDestroy_Private(&mat->bstash);CHKERRQ(ierr);
1213   ierr = PetscFree(baij->rowners);CHKERRQ(ierr);
1214   ierr = MatDestroy(baij->A);CHKERRQ(ierr);
1215   ierr = MatDestroy(baij->B);CHKERRQ(ierr);
1216 #if defined (PETSC_USE_CTABLE)
1217   if (baij->colmap) {ierr = PetscTableDelete(baij->colmap);CHKERRQ(ierr);}
1218 #else
1219   if (baij->colmap) {ierr = PetscFree(baij->colmap);CHKERRQ(ierr);}
1220 #endif
1221   if (baij->garray) {ierr = PetscFree(baij->garray);CHKERRQ(ierr);}
1222   if (baij->lvec)   {ierr = VecDestroy(baij->lvec);CHKERRQ(ierr);}
1223   if (baij->Mvctx)  {ierr = VecScatterDestroy(baij->Mvctx);CHKERRQ(ierr);}
1224   if (baij->rowvalues) {ierr = PetscFree(baij->rowvalues);CHKERRQ(ierr);}
1225   if (baij->barray) {ierr = PetscFree(baij->barray);CHKERRQ(ierr);}
1226   if (baij->hd) {ierr = PetscFree(baij->hd);CHKERRQ(ierr);}
1227 #if defined(PETSC_USE_MAT_SINGLE)
1228   if (baij->setvaluescopy) {ierr = PetscFree(baij->setvaluescopy);CHKERRQ(ierr);}
1229 #endif
1230   ierr = PetscFree(baij);CHKERRQ(ierr);
1231   PetscFunctionReturn(0);
1232 }
1233 
1234 #undef __FUNCT__
1235 #define __FUNCT__ "MatMult_MPIBAIJ"
1236 int MatMult_MPIBAIJ(Mat A,Vec xx,Vec yy)
1237 {
1238   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1239   int         ierr,nt;
1240 
1241   PetscFunctionBegin;
1242   ierr = VecGetLocalSize(xx,&nt);CHKERRQ(ierr);
1243   if (nt != A->n) {
1244     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible partition of A and xx");
1245   }
1246   ierr = VecGetLocalSize(yy,&nt);CHKERRQ(ierr);
1247   if (nt != A->m) {
1248     SETERRQ(PETSC_ERR_ARG_SIZ,"Incompatible parition of A and yy");
1249   }
1250   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1251   ierr = (*a->A->ops->mult)(a->A,xx,yy);CHKERRQ(ierr);
1252   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1253   ierr = (*a->B->ops->multadd)(a->B,a->lvec,yy,yy);CHKERRQ(ierr);
1254   ierr = VecScatterPostRecvs(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1255   PetscFunctionReturn(0);
1256 }
1257 
1258 #undef __FUNCT__
1259 #define __FUNCT__ "MatMultAdd_MPIBAIJ"
1260 int MatMultAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1261 {
1262   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1263   int        ierr;
1264 
1265   PetscFunctionBegin;
1266   ierr = VecScatterBegin(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1267   ierr = (*a->A->ops->multadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
1268   ierr = VecScatterEnd(xx,a->lvec,INSERT_VALUES,SCATTER_FORWARD,a->Mvctx);CHKERRQ(ierr);
1269   ierr = (*a->B->ops->multadd)(a->B,a->lvec,zz,zz);CHKERRQ(ierr);
1270   PetscFunctionReturn(0);
1271 }
1272 
1273 #undef __FUNCT__
1274 #define __FUNCT__ "MatMultTranspose_MPIBAIJ"
1275 int MatMultTranspose_MPIBAIJ(Mat A,Vec xx,Vec yy)
1276 {
1277   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1278   int         ierr;
1279 
1280   PetscFunctionBegin;
1281   /* do nondiagonal part */
1282   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
1283   /* send it on its way */
1284   ierr = VecScatterBegin(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1285   /* do local part */
1286   ierr = (*a->A->ops->multtranspose)(a->A,xx,yy);CHKERRQ(ierr);
1287   /* receive remote parts: note this assumes the values are not actually */
1288   /* inserted in yy until the next line, which is true for my implementation*/
1289   /* but is not perhaps always true. */
1290   ierr = VecScatterEnd(a->lvec,yy,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1291   PetscFunctionReturn(0);
1292 }
1293 
1294 #undef __FUNCT__
1295 #define __FUNCT__ "MatMultTransposeAdd_MPIBAIJ"
1296 int MatMultTransposeAdd_MPIBAIJ(Mat A,Vec xx,Vec yy,Vec zz)
1297 {
1298   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1299   int         ierr;
1300 
1301   PetscFunctionBegin;
1302   /* do nondiagonal part */
1303   ierr = (*a->B->ops->multtranspose)(a->B,xx,a->lvec);CHKERRQ(ierr);
1304   /* send it on its way */
1305   ierr = VecScatterBegin(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1306   /* do local part */
1307   ierr = (*a->A->ops->multtransposeadd)(a->A,xx,yy,zz);CHKERRQ(ierr);
1308   /* receive remote parts: note this assumes the values are not actually */
1309   /* inserted in yy until the next line, which is true for my implementation*/
1310   /* but is not perhaps always true. */
1311   ierr = VecScatterEnd(a->lvec,zz,ADD_VALUES,SCATTER_REVERSE,a->Mvctx);CHKERRQ(ierr);
1312   PetscFunctionReturn(0);
1313 }
1314 
1315 /*
1316   This only works correctly for square matrices where the subblock A->A is the
1317    diagonal block
1318 */
1319 #undef __FUNCT__
1320 #define __FUNCT__ "MatGetDiagonal_MPIBAIJ"
1321 int MatGetDiagonal_MPIBAIJ(Mat A,Vec v)
1322 {
1323   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1324   int         ierr;
1325 
1326   PetscFunctionBegin;
1327   if (A->M != A->N) SETERRQ(PETSC_ERR_SUP,"Supports only square matrix where A->A is diag block");
1328   ierr = MatGetDiagonal(a->A,v);CHKERRQ(ierr);
1329   PetscFunctionReturn(0);
1330 }
1331 
1332 #undef __FUNCT__
1333 #define __FUNCT__ "MatScale_MPIBAIJ"
1334 int MatScale_MPIBAIJ(const PetscScalar *aa,Mat A)
1335 {
1336   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1337   int         ierr;
1338 
1339   PetscFunctionBegin;
1340   ierr = MatScale(aa,a->A);CHKERRQ(ierr);
1341   ierr = MatScale(aa,a->B);CHKERRQ(ierr);
1342   PetscFunctionReturn(0);
1343 }
1344 
1345 #undef __FUNCT__
1346 #define __FUNCT__ "MatGetRow_MPIBAIJ"
1347 int MatGetRow_MPIBAIJ(Mat matin,int row,int *nz,int **idx,PetscScalar **v)
1348 {
1349   Mat_MPIBAIJ  *mat = (Mat_MPIBAIJ*)matin->data;
1350   PetscScalar  *vworkA,*vworkB,**pvA,**pvB,*v_p;
1351   int          bs = mat->bs,bs2 = mat->bs2,i,ierr,*cworkA,*cworkB,**pcA,**pcB;
1352   int          nztot,nzA,nzB,lrow,brstart = mat->rstart*bs,brend = mat->rend*bs;
1353   int          *cmap,*idx_p,cstart = mat->cstart;
1354 
1355   PetscFunctionBegin;
1356   if (mat->getrowactive == PETSC_TRUE) SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"Already active");
1357   mat->getrowactive = PETSC_TRUE;
1358 
1359   if (!mat->rowvalues && (idx || v)) {
1360     /*
1361         allocate enough space to hold information from the longest row.
1362     */
1363     Mat_SeqBAIJ *Aa = (Mat_SeqBAIJ*)mat->A->data,*Ba = (Mat_SeqBAIJ*)mat->B->data;
1364     int     max = 1,mbs = mat->mbs,tmp;
1365     for (i=0; i<mbs; i++) {
1366       tmp = Aa->i[i+1] - Aa->i[i] + Ba->i[i+1] - Ba->i[i];
1367       if (max < tmp) { max = tmp; }
1368     }
1369     ierr = PetscMalloc(max*bs2*(sizeof(int)+sizeof(PetscScalar)),&mat->rowvalues);CHKERRQ(ierr);
1370     mat->rowindices = (int*)(mat->rowvalues + max*bs2);
1371   }
1372 
1373   if (row < brstart || row >= brend) SETERRQ(PETSC_ERR_SUP,"Only local rows")
1374   lrow = row - brstart;
1375 
1376   pvA = &vworkA; pcA = &cworkA; pvB = &vworkB; pcB = &cworkB;
1377   if (!v)   {pvA = 0; pvB = 0;}
1378   if (!idx) {pcA = 0; if (!v) pcB = 0;}
1379   ierr = (*mat->A->ops->getrow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1380   ierr = (*mat->B->ops->getrow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1381   nztot = nzA + nzB;
1382 
1383   cmap  = mat->garray;
1384   if (v  || idx) {
1385     if (nztot) {
1386       /* Sort by increasing column numbers, assuming A and B already sorted */
1387       int imark = -1;
1388       if (v) {
1389         *v = v_p = mat->rowvalues;
1390         for (i=0; i<nzB; i++) {
1391           if (cmap[cworkB[i]/bs] < cstart)   v_p[i] = vworkB[i];
1392           else break;
1393         }
1394         imark = i;
1395         for (i=0; i<nzA; i++)     v_p[imark+i] = vworkA[i];
1396         for (i=imark; i<nzB; i++) v_p[nzA+i]   = vworkB[i];
1397       }
1398       if (idx) {
1399         *idx = idx_p = mat->rowindices;
1400         if (imark > -1) {
1401           for (i=0; i<imark; i++) {
1402             idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs;
1403           }
1404         } else {
1405           for (i=0; i<nzB; i++) {
1406             if (cmap[cworkB[i]/bs] < cstart)
1407               idx_p[i] = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1408             else break;
1409           }
1410           imark = i;
1411         }
1412         for (i=0; i<nzA; i++)     idx_p[imark+i] = cstart*bs + cworkA[i];
1413         for (i=imark; i<nzB; i++) idx_p[nzA+i]   = cmap[cworkB[i]/bs]*bs + cworkB[i]%bs ;
1414       }
1415     } else {
1416       if (idx) *idx = 0;
1417       if (v)   *v   = 0;
1418     }
1419   }
1420   *nz = nztot;
1421   ierr = (*mat->A->ops->restorerow)(mat->A,lrow,&nzA,pcA,pvA);CHKERRQ(ierr);
1422   ierr = (*mat->B->ops->restorerow)(mat->B,lrow,&nzB,pcB,pvB);CHKERRQ(ierr);
1423   PetscFunctionReturn(0);
1424 }
1425 
1426 #undef __FUNCT__
1427 #define __FUNCT__ "MatRestoreRow_MPIBAIJ"
1428 int MatRestoreRow_MPIBAIJ(Mat mat,int row,int *nz,int **idx,PetscScalar **v)
1429 {
1430   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1431 
1432   PetscFunctionBegin;
1433   if (baij->getrowactive == PETSC_FALSE) {
1434     SETERRQ(PETSC_ERR_ARG_WRONGSTATE,"MatGetRow not called");
1435   }
1436   baij->getrowactive = PETSC_FALSE;
1437   PetscFunctionReturn(0);
1438 }
1439 
1440 #undef __FUNCT__
1441 #define __FUNCT__ "MatGetBlockSize_MPIBAIJ"
1442 int MatGetBlockSize_MPIBAIJ(Mat mat,int *bs)
1443 {
1444   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1445 
1446   PetscFunctionBegin;
1447   *bs = baij->bs;
1448   PetscFunctionReturn(0);
1449 }
1450 
1451 #undef __FUNCT__
1452 #define __FUNCT__ "MatZeroEntries_MPIBAIJ"
1453 int MatZeroEntries_MPIBAIJ(Mat A)
1454 {
1455   Mat_MPIBAIJ *l = (Mat_MPIBAIJ*)A->data;
1456   int         ierr;
1457 
1458   PetscFunctionBegin;
1459   ierr = MatZeroEntries(l->A);CHKERRQ(ierr);
1460   ierr = MatZeroEntries(l->B);CHKERRQ(ierr);
1461   PetscFunctionReturn(0);
1462 }
1463 
1464 #undef __FUNCT__
1465 #define __FUNCT__ "MatGetInfo_MPIBAIJ"
1466 int MatGetInfo_MPIBAIJ(Mat matin,MatInfoType flag,MatInfo *info)
1467 {
1468   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)matin->data;
1469   Mat         A = a->A,B = a->B;
1470   int         ierr;
1471   PetscReal   isend[5],irecv[5];
1472 
1473   PetscFunctionBegin;
1474   info->block_size     = (PetscReal)a->bs;
1475   ierr = MatGetInfo(A,MAT_LOCAL,info);CHKERRQ(ierr);
1476   isend[0] = info->nz_used; isend[1] = info->nz_allocated; isend[2] = info->nz_unneeded;
1477   isend[3] = info->memory;  isend[4] = info->mallocs;
1478   ierr = MatGetInfo(B,MAT_LOCAL,info);CHKERRQ(ierr);
1479   isend[0] += info->nz_used; isend[1] += info->nz_allocated; isend[2] += info->nz_unneeded;
1480   isend[3] += info->memory;  isend[4] += info->mallocs;
1481   if (flag == MAT_LOCAL) {
1482     info->nz_used      = isend[0];
1483     info->nz_allocated = isend[1];
1484     info->nz_unneeded  = isend[2];
1485     info->memory       = isend[3];
1486     info->mallocs      = isend[4];
1487   } else if (flag == MAT_GLOBAL_MAX) {
1488     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_MAX,matin->comm);CHKERRQ(ierr);
1489     info->nz_used      = irecv[0];
1490     info->nz_allocated = irecv[1];
1491     info->nz_unneeded  = irecv[2];
1492     info->memory       = irecv[3];
1493     info->mallocs      = irecv[4];
1494   } else if (flag == MAT_GLOBAL_SUM) {
1495     ierr = MPI_Allreduce(isend,irecv,5,MPIU_REAL,MPI_SUM,matin->comm);CHKERRQ(ierr);
1496     info->nz_used      = irecv[0];
1497     info->nz_allocated = irecv[1];
1498     info->nz_unneeded  = irecv[2];
1499     info->memory       = irecv[3];
1500     info->mallocs      = irecv[4];
1501   } else {
1502     SETERRQ1(1,"Unknown MatInfoType argument %d",flag);
1503   }
1504   info->rows_global       = (PetscReal)A->M;
1505   info->columns_global    = (PetscReal)A->N;
1506   info->rows_local        = (PetscReal)A->m;
1507   info->columns_local     = (PetscReal)A->N;
1508   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
1509   info->fill_ratio_needed = 0;
1510   info->factor_mallocs    = 0;
1511   PetscFunctionReturn(0);
1512 }
1513 
1514 #undef __FUNCT__
1515 #define __FUNCT__ "MatSetOption_MPIBAIJ"
1516 int MatSetOption_MPIBAIJ(Mat A,MatOption op)
1517 {
1518   Mat_MPIBAIJ *a = (Mat_MPIBAIJ*)A->data;
1519   int         ierr;
1520 
1521   PetscFunctionBegin;
1522   switch (op) {
1523   case MAT_NO_NEW_NONZERO_LOCATIONS:
1524   case MAT_YES_NEW_NONZERO_LOCATIONS:
1525   case MAT_COLUMNS_UNSORTED:
1526   case MAT_COLUMNS_SORTED:
1527   case MAT_NEW_NONZERO_ALLOCATION_ERR:
1528   case MAT_KEEP_ZEROED_ROWS:
1529   case MAT_NEW_NONZERO_LOCATION_ERR:
1530     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1531     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1532     break;
1533   case MAT_ROW_ORIENTED:
1534     a->roworiented = PETSC_TRUE;
1535     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1536     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1537     break;
1538   case MAT_ROWS_SORTED:
1539   case MAT_ROWS_UNSORTED:
1540   case MAT_YES_NEW_DIAGONALS:
1541     PetscLogInfo(A,"Info:MatSetOption_MPIBAIJ:Option ignored\n");
1542     break;
1543   case MAT_COLUMN_ORIENTED:
1544     a->roworiented = PETSC_FALSE;
1545     ierr = MatSetOption(a->A,op);CHKERRQ(ierr);
1546     ierr = MatSetOption(a->B,op);CHKERRQ(ierr);
1547     break;
1548   case MAT_IGNORE_OFF_PROC_ENTRIES:
1549     a->donotstash = PETSC_TRUE;
1550     break;
1551   case MAT_NO_NEW_DIAGONALS:
1552     SETERRQ(PETSC_ERR_SUP,"MAT_NO_NEW_DIAGONALS");
1553   case MAT_USE_HASH_TABLE:
1554     a->ht_flag = PETSC_TRUE;
1555     break;
1556   default:
1557     SETERRQ(PETSC_ERR_SUP,"unknown option");
1558   }
1559   PetscFunctionReturn(0);
1560 }
1561 
1562 #undef __FUNCT__
1563 #define __FUNCT__ "MatTranspose_MPIBAIJ("
1564 int MatTranspose_MPIBAIJ(Mat A,Mat *matout)
1565 {
1566   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)A->data;
1567   Mat_SeqBAIJ *Aloc;
1568   Mat         B;
1569   int         ierr,M=A->M,N=A->N,*ai,*aj,i,*rvals,j,k,col;
1570   int         bs=baij->bs,mbs=baij->mbs;
1571   MatScalar   *a;
1572 
1573   PetscFunctionBegin;
1574   if (!matout && M != N) SETERRQ(PETSC_ERR_ARG_SIZ,"Square matrix only for in-place");
1575   ierr = MatCreateMPIBAIJ(A->comm,baij->bs,A->n,A->m,N,M,0,PETSC_NULL,0,PETSC_NULL,&B);CHKERRQ(ierr);
1576 
1577   /* copy over the A part */
1578   Aloc = (Mat_SeqBAIJ*)baij->A->data;
1579   ai   = Aloc->i; aj = Aloc->j; a = Aloc->a;
1580   ierr = PetscMalloc(bs*sizeof(int),&rvals);CHKERRQ(ierr);
1581 
1582   for (i=0; i<mbs; i++) {
1583     rvals[0] = bs*(baij->rstart + i);
1584     for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1585     for (j=ai[i]; j<ai[i+1]; j++) {
1586       col = (baij->cstart+aj[j])*bs;
1587       for (k=0; k<bs; k++) {
1588         ierr = MatSetValues_MPIBAIJ_MatScalar(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr);
1589         col++; a += bs;
1590       }
1591     }
1592   }
1593   /* copy over the B part */
1594   Aloc = (Mat_SeqBAIJ*)baij->B->data;
1595   ai = Aloc->i; aj = Aloc->j; a = Aloc->a;
1596   for (i=0; i<mbs; i++) {
1597     rvals[0] = bs*(baij->rstart + i);
1598     for (j=1; j<bs; j++) { rvals[j] = rvals[j-1] + 1; }
1599     for (j=ai[i]; j<ai[i+1]; j++) {
1600       col = baij->garray[aj[j]]*bs;
1601       for (k=0; k<bs; k++) {
1602         ierr = MatSetValues_MPIBAIJ_MatScalar(B,1,&col,bs,rvals,a,INSERT_VALUES);CHKERRQ(ierr);
1603         col++; a += bs;
1604       }
1605     }
1606   }
1607   ierr = PetscFree(rvals);CHKERRQ(ierr);
1608   ierr = MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1609   ierr = MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1610 
1611   if (matout) {
1612     *matout = B;
1613   } else {
1614     ierr = MatHeaderCopy(A,B);CHKERRQ(ierr);
1615   }
1616   PetscFunctionReturn(0);
1617 }
1618 
1619 #undef __FUNCT__
1620 #define __FUNCT__ "MatDiagonalScale_MPIBAIJ"
1621 int MatDiagonalScale_MPIBAIJ(Mat mat,Vec ll,Vec rr)
1622 {
1623   Mat_MPIBAIJ *baij = (Mat_MPIBAIJ*)mat->data;
1624   Mat         a = baij->A,b = baij->B;
1625   int         ierr,s1,s2,s3;
1626 
1627   PetscFunctionBegin;
1628   ierr = MatGetLocalSize(mat,&s2,&s3);CHKERRQ(ierr);
1629   if (rr) {
1630     ierr = VecGetLocalSize(rr,&s1);CHKERRQ(ierr);
1631     if (s1!=s3) SETERRQ(PETSC_ERR_ARG_SIZ,"right vector non-conforming local size");
1632     /* Overlap communication with computation. */
1633     ierr = VecScatterBegin(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1634   }
1635   if (ll) {
1636     ierr = VecGetLocalSize(ll,&s1);CHKERRQ(ierr);
1637     if (s1!=s2) SETERRQ(PETSC_ERR_ARG_SIZ,"left vector non-conforming local size");
1638     ierr = (*b->ops->diagonalscale)(b,ll,PETSC_NULL);CHKERRQ(ierr);
1639   }
1640   /* scale  the diagonal block */
1641   ierr = (*a->ops->diagonalscale)(a,ll,rr);CHKERRQ(ierr);
1642 
1643   if (rr) {
1644     /* Do a scatter end and then right scale the off-diagonal block */
1645     ierr = VecScatterEnd(rr,baij->lvec,INSERT_VALUES,SCATTER_FORWARD,baij->Mvctx);CHKERRQ(ierr);
1646     ierr = (*b->ops->diagonalscale)(b,PETSC_NULL,baij->lvec);CHKERRQ(ierr);
1647   }
1648 
1649   PetscFunctionReturn(0);
1650 }
1651 
1652 #undef __FUNCT__
1653 #define __FUNCT__ "MatZeroRows_MPIBAIJ"
1654 int MatZeroRows_MPIBAIJ(Mat A,IS is,const PetscScalar *diag)
1655 {
1656   Mat_MPIBAIJ    *l = (Mat_MPIBAIJ*)A->data;
1657   int            i,ierr,N,*rows,*owners = l->rowners,size = l->size;
1658   int            *nprocs,j,idx,nsends,row;
1659   int            nmax,*svalues,*starts,*owner,nrecvs,rank = l->rank;
1660   int            *rvalues,tag = A->tag,count,base,slen,n,*source;
1661   int            *lens,imdex,*lrows,*values,bs=l->bs,rstart_bs=l->rstart_bs;
1662   MPI_Comm       comm = A->comm;
1663   MPI_Request    *send_waits,*recv_waits;
1664   MPI_Status     recv_status,*send_status;
1665   IS             istmp;
1666   PetscTruth     found;
1667 
1668   PetscFunctionBegin;
1669   ierr = ISGetLocalSize(is,&N);CHKERRQ(ierr);
1670   ierr = ISGetIndices(is,&rows);CHKERRQ(ierr);
1671 
1672   /*  first count number of contributors to each processor */
1673   ierr  = PetscMalloc(2*size*sizeof(int),&nprocs);CHKERRQ(ierr);
1674   ierr  = PetscMemzero(nprocs,2*size*sizeof(int));CHKERRQ(ierr);
1675   ierr  = PetscMalloc((N+1)*sizeof(int),&owner);CHKERRQ(ierr); /* see note*/
1676   for (i=0; i<N; i++) {
1677     idx   = rows[i];
1678     found = PETSC_FALSE;
1679     for (j=0; j<size; j++) {
1680       if (idx >= owners[j]*bs && idx < owners[j+1]*bs) {
1681         nprocs[2*j]++; nprocs[2*j+1] = 1; owner[i] = j; found = PETSC_TRUE; break;
1682       }
1683     }
1684     if (!found) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Index out of range");
1685   }
1686   nsends = 0;  for (i=0; i<size; i++) { nsends += nprocs[2*i+1];}
1687 
1688   /* inform other processors of number of messages and max length*/
1689   ierr = PetscMaxSum(comm,nprocs,&nmax,&nrecvs);CHKERRQ(ierr);
1690 
1691   /* post receives:   */
1692   ierr = PetscMalloc((nrecvs+1)*(nmax+1)*sizeof(int),&rvalues);CHKERRQ(ierr);
1693   ierr = PetscMalloc((nrecvs+1)*sizeof(MPI_Request),&recv_waits);CHKERRQ(ierr);
1694   for (i=0; i<nrecvs; i++) {
1695     ierr = MPI_Irecv(rvalues+nmax*i,nmax,MPI_INT,MPI_ANY_SOURCE,tag,comm,recv_waits+i);CHKERRQ(ierr);
1696   }
1697 
1698   /* do sends:
1699      1) starts[i] gives the starting index in svalues for stuff going to
1700      the ith processor
1701   */
1702   ierr = PetscMalloc((N+1)*sizeof(int),&svalues);CHKERRQ(ierr);
1703   ierr = PetscMalloc((nsends+1)*sizeof(MPI_Request),&send_waits);CHKERRQ(ierr);
1704   ierr = PetscMalloc((size+1)*sizeof(int),&starts);CHKERRQ(ierr);
1705   starts[0]  = 0;
1706   for (i=1; i<size; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1707   for (i=0; i<N; i++) {
1708     svalues[starts[owner[i]]++] = rows[i];
1709   }
1710   ierr = ISRestoreIndices(is,&rows);CHKERRQ(ierr);
1711 
1712   starts[0] = 0;
1713   for (i=1; i<size+1; i++) { starts[i] = starts[i-1] + nprocs[2*i-2];}
1714   count = 0;
1715   for (i=0; i<size; i++) {
1716     if (nprocs[2*i+1]) {
1717       ierr = MPI_Isend(svalues+starts[i],nprocs[2*i],MPI_INT,i,tag,comm,send_waits+count++);CHKERRQ(ierr);
1718     }
1719   }
1720   ierr = PetscFree(starts);CHKERRQ(ierr);
1721 
1722   base = owners[rank]*bs;
1723 
1724   /*  wait on receives */
1725   ierr   = PetscMalloc(2*(nrecvs+1)*sizeof(int),&lens);CHKERRQ(ierr);
1726   source = lens + nrecvs;
1727   count  = nrecvs; slen = 0;
1728   while (count) {
1729     ierr = MPI_Waitany(nrecvs,recv_waits,&imdex,&recv_status);CHKERRQ(ierr);
1730     /* unpack receives into our local space */
1731     ierr = MPI_Get_count(&recv_status,MPI_INT,&n);CHKERRQ(ierr);
1732     source[imdex]  = recv_status.MPI_SOURCE;
1733     lens[imdex]    = n;
1734     slen          += n;
1735     count--;
1736   }
1737   ierr = PetscFree(recv_waits);CHKERRQ(ierr);
1738 
1739   /* move the data into the send scatter */
1740   ierr = PetscMalloc((slen+1)*sizeof(int),&lrows);CHKERRQ(ierr);
1741   count = 0;
1742   for (i=0; i<nrecvs; i++) {
1743     values = rvalues + i*nmax;
1744     for (j=0; j<lens[i]; j++) {
1745       lrows[count++] = values[j] - base;
1746     }
1747   }
1748   ierr = PetscFree(rvalues);CHKERRQ(ierr);
1749   ierr = PetscFree(lens);CHKERRQ(ierr);
1750   ierr = PetscFree(owner);CHKERRQ(ierr);
1751   ierr = PetscFree(nprocs);CHKERRQ(ierr);
1752 
1753   /* actually zap the local rows */
1754   ierr = ISCreateGeneral(PETSC_COMM_SELF,slen,lrows,&istmp);CHKERRQ(ierr);
1755   PetscLogObjectParent(A,istmp);
1756 
1757   /*
1758         Zero the required rows. If the "diagonal block" of the matrix
1759      is square and the user wishes to set the diagonal we use seperate
1760      code so that MatSetValues() is not called for each diagonal allocating
1761      new memory, thus calling lots of mallocs and slowing things down.
1762 
1763        Contributed by: Mathew Knepley
1764   */
1765   /* must zero l->B before l->A because the (diag) case below may put values into l->B*/
1766   ierr = MatZeroRows_SeqBAIJ(l->B,istmp,0);CHKERRQ(ierr);
1767   if (diag && (l->A->M == l->A->N)) {
1768     ierr = MatZeroRows_SeqBAIJ(l->A,istmp,diag);CHKERRQ(ierr);
1769   } else if (diag) {
1770     ierr = MatZeroRows_SeqBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1771     if (((Mat_SeqBAIJ*)l->A->data)->nonew) {
1772       SETERRQ(PETSC_ERR_SUP,"MatZeroRows() on rectangular matrices cannot be used with the Mat options \n\
1773 MAT_NO_NEW_NONZERO_LOCATIONS,MAT_NEW_NONZERO_LOCATION_ERR,MAT_NEW_NONZERO_ALLOCATION_ERR");
1774     }
1775     for (i=0; i<slen; i++) {
1776       row  = lrows[i] + rstart_bs;
1777       ierr = MatSetValues(A,1,&row,1,&row,diag,INSERT_VALUES);CHKERRQ(ierr);
1778     }
1779     ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1780     ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1781   } else {
1782     ierr = MatZeroRows_SeqBAIJ(l->A,istmp,0);CHKERRQ(ierr);
1783   }
1784 
1785   ierr = ISDestroy(istmp);CHKERRQ(ierr);
1786   ierr = PetscFree(lrows);CHKERRQ(ierr);
1787 
1788   /* wait on sends */
1789   if (nsends) {
1790     ierr = PetscMalloc(nsends*sizeof(MPI_Status),&send_status);CHKERRQ(ierr);
1791     ierr = MPI_Waitall(nsends,send_waits,send_status);CHKERRQ(ierr);
1792     ierr = PetscFree(send_status);CHKERRQ(ierr);
1793   }
1794   ierr = PetscFree(send_waits);CHKERRQ(ierr);
1795   ierr = PetscFree(svalues);CHKERRQ(ierr);
1796 
1797   PetscFunctionReturn(0);
1798 }
1799 
1800 #undef __FUNCT__
1801 #define __FUNCT__ "MatPrintHelp_MPIBAIJ"
1802 int MatPrintHelp_MPIBAIJ(Mat A)
1803 {
1804   Mat_MPIBAIJ *a   = (Mat_MPIBAIJ*)A->data;
1805   MPI_Comm    comm = A->comm;
1806   static int  called = 0;
1807   int         ierr;
1808 
1809   PetscFunctionBegin;
1810   if (!a->rank) {
1811     ierr = MatPrintHelp_SeqBAIJ(a->A);CHKERRQ(ierr);
1812   }
1813   if (called) {PetscFunctionReturn(0);} else called = 1;
1814   ierr = (*PetscHelpPrintf)(comm," Options for MATMPIBAIJ matrix format (the defaults):\n");CHKERRQ(ierr);
1815   ierr = (*PetscHelpPrintf)(comm,"  -mat_use_hash_table <factor>: Use hashtable for efficient matrix assembly\n");CHKERRQ(ierr);
1816   PetscFunctionReturn(0);
1817 }
1818 
1819 #undef __FUNCT__
1820 #define __FUNCT__ "MatSetUnfactored_MPIBAIJ"
1821 int MatSetUnfactored_MPIBAIJ(Mat A)
1822 {
1823   Mat_MPIBAIJ *a   = (Mat_MPIBAIJ*)A->data;
1824   int         ierr;
1825 
1826   PetscFunctionBegin;
1827   ierr = MatSetUnfactored(a->A);CHKERRQ(ierr);
1828   PetscFunctionReturn(0);
1829 }
1830 
1831 static int MatDuplicate_MPIBAIJ(Mat,MatDuplicateOption,Mat *);
1832 
1833 #undef __FUNCT__
1834 #define __FUNCT__ "MatEqual_MPIBAIJ"
1835 int MatEqual_MPIBAIJ(Mat A,Mat B,PetscTruth *flag)
1836 {
1837   Mat_MPIBAIJ *matB = (Mat_MPIBAIJ*)B->data,*matA = (Mat_MPIBAIJ*)A->data;
1838   Mat         a,b,c,d;
1839   PetscTruth  flg;
1840   int         ierr;
1841 
1842   PetscFunctionBegin;
1843   a = matA->A; b = matA->B;
1844   c = matB->A; d = matB->B;
1845 
1846   ierr = MatEqual(a,c,&flg);CHKERRQ(ierr);
1847   if (flg == PETSC_TRUE) {
1848     ierr = MatEqual(b,d,&flg);CHKERRQ(ierr);
1849   }
1850   ierr = MPI_Allreduce(&flg,flag,1,MPI_INT,MPI_LAND,A->comm);CHKERRQ(ierr);
1851   PetscFunctionReturn(0);
1852 }
1853 
1854 
1855 #undef __FUNCT__
1856 #define __FUNCT__ "MatSetUpPreallocation_MPIBAIJ"
1857 int MatSetUpPreallocation_MPIBAIJ(Mat A)
1858 {
1859   int        ierr;
1860 
1861   PetscFunctionBegin;
1862   ierr =  MatMPIBAIJSetPreallocation(A,1,PETSC_DEFAULT,0,PETSC_DEFAULT,0);CHKERRQ(ierr);
1863   PetscFunctionReturn(0);
1864 }
1865 
1866 /* -------------------------------------------------------------------*/
1867 static struct _MatOps MatOps_Values = {
1868        MatSetValues_MPIBAIJ,
1869        MatGetRow_MPIBAIJ,
1870        MatRestoreRow_MPIBAIJ,
1871        MatMult_MPIBAIJ,
1872 /* 4*/ MatMultAdd_MPIBAIJ,
1873        MatMultTranspose_MPIBAIJ,
1874        MatMultTransposeAdd_MPIBAIJ,
1875        0,
1876        0,
1877        0,
1878 /*10*/ 0,
1879        0,
1880        0,
1881        0,
1882        MatTranspose_MPIBAIJ,
1883 /*15*/ MatGetInfo_MPIBAIJ,
1884        MatEqual_MPIBAIJ,
1885        MatGetDiagonal_MPIBAIJ,
1886        MatDiagonalScale_MPIBAIJ,
1887        MatNorm_MPIBAIJ,
1888 /*20*/ MatAssemblyBegin_MPIBAIJ,
1889        MatAssemblyEnd_MPIBAIJ,
1890        0,
1891        MatSetOption_MPIBAIJ,
1892        MatZeroEntries_MPIBAIJ,
1893 /*25*/ MatZeroRows_MPIBAIJ,
1894        0,
1895        0,
1896        0,
1897        0,
1898 /*30*/ MatSetUpPreallocation_MPIBAIJ,
1899        0,
1900        0,
1901        0,
1902        0,
1903 /*35*/ MatDuplicate_MPIBAIJ,
1904        0,
1905        0,
1906        0,
1907        0,
1908 /*40*/ 0,
1909        MatGetSubMatrices_MPIBAIJ,
1910        MatIncreaseOverlap_MPIBAIJ,
1911        MatGetValues_MPIBAIJ,
1912        0,
1913 /*45*/ MatPrintHelp_MPIBAIJ,
1914        MatScale_MPIBAIJ,
1915        0,
1916        0,
1917        0,
1918 /*50*/ MatGetBlockSize_MPIBAIJ,
1919        0,
1920        0,
1921        0,
1922        0,
1923 /*55*/ 0,
1924        0,
1925        MatSetUnfactored_MPIBAIJ,
1926        0,
1927        MatSetValuesBlocked_MPIBAIJ,
1928 /*60*/ 0,
1929        MatDestroy_MPIBAIJ,
1930        MatView_MPIBAIJ,
1931        MatGetPetscMaps_Petsc,
1932        0,
1933 /*65*/ 0,
1934        0,
1935        0,
1936        0,
1937        0,
1938 /*70*/ MatGetRowMax_MPIBAIJ,
1939        0,
1940        0,
1941        0,
1942        0,
1943 /*75*/ 0,
1944        0,
1945        0,
1946        0,
1947        0,
1948 /*80*/ 0,
1949        0,
1950        0,
1951        0,
1952        0,
1953 /*85*/ MatLoad_MPIBAIJ
1954 };
1955 
1956 
1957 EXTERN_C_BEGIN
1958 #undef __FUNCT__
1959 #define __FUNCT__ "MatGetDiagonalBlock_MPIBAIJ"
1960 int MatGetDiagonalBlock_MPIBAIJ(Mat A,PetscTruth *iscopy,MatReuse reuse,Mat *a)
1961 {
1962   PetscFunctionBegin;
1963   *a      = ((Mat_MPIBAIJ *)A->data)->A;
1964   *iscopy = PETSC_FALSE;
1965   PetscFunctionReturn(0);
1966 }
1967 EXTERN_C_END
1968 
1969 EXTERN_C_BEGIN
1970 #undef __FUNCT__
1971 #define __FUNCT__ "MatMPIBAIJSetPreallocation_MPIBAIJ"
1972 int MatMPIBAIJSetPreallocation_MPIBAIJ(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
1973 {
1974   Mat_MPIBAIJ  *b;
1975   int          ierr,i;
1976 
1977   PetscFunctionBegin;
1978   B->preallocated = PETSC_TRUE;
1979   ierr = PetscOptionsGetInt(PETSC_NULL,"-mat_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
1980 
1981   if (bs < 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Invalid block size specified, must be positive");
1982   if (d_nz == PETSC_DEFAULT || d_nz == PETSC_DECIDE) d_nz = 5;
1983   if (o_nz == PETSC_DEFAULT || o_nz == PETSC_DECIDE) o_nz = 2;
1984   if (d_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"d_nz cannot be less than 0: value %d",d_nz);
1985   if (o_nz < 0) SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE,"o_nz cannot be less than 0: value %d",o_nz);
1986   if (d_nnz) {
1987   for (i=0; i<B->m/bs; i++) {
1988       if (d_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"d_nnz cannot be less than -1: local row %d value %d",i,d_nnz[i]);
1989     }
1990   }
1991   if (o_nnz) {
1992     for (i=0; i<B->m/bs; i++) {
1993       if (o_nnz[i] < 0) SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"o_nnz cannot be less than -1: local row %d value %d",i,o_nnz[i]);
1994     }
1995   }
1996 
1997   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->m,&B->M);CHKERRQ(ierr);
1998   ierr = PetscSplitOwnershipBlock(B->comm,bs,&B->n,&B->N);CHKERRQ(ierr);
1999   ierr = PetscMapCreateMPI(B->comm,B->m,B->M,&B->rmap);CHKERRQ(ierr);
2000   ierr = PetscMapCreateMPI(B->comm,B->n,B->N,&B->cmap);CHKERRQ(ierr);
2001 
2002   b = (Mat_MPIBAIJ*)B->data;
2003   b->bs  = bs;
2004   b->bs2 = bs*bs;
2005   b->mbs = B->m/bs;
2006   b->nbs = B->n/bs;
2007   b->Mbs = B->M/bs;
2008   b->Nbs = B->N/bs;
2009 
2010   ierr = MPI_Allgather(&b->mbs,1,MPI_INT,b->rowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
2011   b->rowners[0]    = 0;
2012   for (i=2; i<=b->size; i++) {
2013     b->rowners[i] += b->rowners[i-1];
2014   }
2015   b->rstart    = b->rowners[b->rank];
2016   b->rend      = b->rowners[b->rank+1];
2017 
2018   ierr = MPI_Allgather(&b->nbs,1,MPI_INT,b->cowners+1,1,MPI_INT,B->comm);CHKERRQ(ierr);
2019   b->cowners[0] = 0;
2020   for (i=2; i<=b->size; i++) {
2021     b->cowners[i] += b->cowners[i-1];
2022   }
2023   b->cstart    = b->cowners[b->rank];
2024   b->cend      = b->cowners[b->rank+1];
2025 
2026   for (i=0; i<=b->size; i++) {
2027     b->rowners_bs[i] = b->rowners[i]*bs;
2028   }
2029   b->rstart_bs = b->rstart*bs;
2030   b->rend_bs   = b->rend*bs;
2031   b->cstart_bs = b->cstart*bs;
2032   b->cend_bs   = b->cend*bs;
2033 
2034   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->n,d_nz,d_nnz,&b->A);CHKERRQ(ierr);
2035   PetscLogObjectParent(B,b->A);
2036   ierr = MatCreateSeqBAIJ(PETSC_COMM_SELF,bs,B->m,B->N,o_nz,o_nnz,&b->B);CHKERRQ(ierr);
2037   PetscLogObjectParent(B,b->B);
2038   ierr = MatStashCreate_Private(B->comm,bs,&B->bstash);CHKERRQ(ierr);
2039 
2040   PetscFunctionReturn(0);
2041 }
2042 EXTERN_C_END
2043 
2044 EXTERN_C_BEGIN
2045 EXTERN int MatDiagonalScaleLocal_MPIBAIJ(Mat,Vec);
2046 EXTERN int MatSetHashTableFactor_MPIBAIJ(Mat,PetscReal);
2047 EXTERN_C_END
2048 
2049 /*MC
2050    MATMPIBAIJ - MATMPIBAIJ = "mpibaij" - A matrix type to be used for distributed block sparse matrices.
2051 
2052    Options Database Keys:
2053 . -mat_type mpibaij - sets the matrix type to "mpibaij" during a call to MatSetFromOptions()
2054 
2055   Level: beginner
2056 
2057 .seealso: MatCreateMPIBAIJ
2058 M*/
2059 
2060 EXTERN_C_BEGIN
2061 #undef __FUNCT__
2062 #define __FUNCT__ "MatCreate_MPIBAIJ"
2063 int MatCreate_MPIBAIJ(Mat B)
2064 {
2065   Mat_MPIBAIJ  *b;
2066   int          ierr;
2067   PetscTruth   flg;
2068 
2069   PetscFunctionBegin;
2070 
2071   ierr = PetscNew(Mat_MPIBAIJ,&b);CHKERRQ(ierr);
2072   B->data = (void*)b;
2073 
2074   ierr    = PetscMemzero(b,sizeof(Mat_MPIBAIJ));CHKERRQ(ierr);
2075   ierr    = PetscMemcpy(B->ops,&MatOps_Values,sizeof(struct _MatOps));CHKERRQ(ierr);
2076   B->mapping    = 0;
2077   B->factor     = 0;
2078   B->assembled  = PETSC_FALSE;
2079 
2080   B->insertmode = NOT_SET_VALUES;
2081   ierr = MPI_Comm_rank(B->comm,&b->rank);CHKERRQ(ierr);
2082   ierr = MPI_Comm_size(B->comm,&b->size);CHKERRQ(ierr);
2083 
2084   /* build local table of row and column ownerships */
2085   ierr          = PetscMalloc(3*(b->size+2)*sizeof(int),&b->rowners);CHKERRQ(ierr);
2086   PetscLogObjectMemory(B,3*(b->size+2)*sizeof(int)+sizeof(struct _p_Mat)+sizeof(Mat_MPIBAIJ));
2087   b->cowners    = b->rowners + b->size + 2;
2088   b->rowners_bs = b->cowners + b->size + 2;
2089 
2090   /* build cache for off array entries formed */
2091   ierr = MatStashCreate_Private(B->comm,1,&B->stash);CHKERRQ(ierr);
2092   b->donotstash  = PETSC_FALSE;
2093   b->colmap      = PETSC_NULL;
2094   b->garray      = PETSC_NULL;
2095   b->roworiented = PETSC_TRUE;
2096 
2097 #if defined(PETSC_USE_MAT_SINGLE)
2098   /* stuff for MatSetValues_XXX in single precision */
2099   b->setvalueslen     = 0;
2100   b->setvaluescopy    = PETSC_NULL;
2101 #endif
2102 
2103   /* stuff used in block assembly */
2104   b->barray       = 0;
2105 
2106   /* stuff used for matrix vector multiply */
2107   b->lvec         = 0;
2108   b->Mvctx        = 0;
2109 
2110   /* stuff for MatGetRow() */
2111   b->rowindices   = 0;
2112   b->rowvalues    = 0;
2113   b->getrowactive = PETSC_FALSE;
2114 
2115   /* hash table stuff */
2116   b->ht           = 0;
2117   b->hd           = 0;
2118   b->ht_size      = 0;
2119   b->ht_flag      = PETSC_FALSE;
2120   b->ht_fact      = 0;
2121   b->ht_total_ct  = 0;
2122   b->ht_insert_ct = 0;
2123 
2124   ierr = PetscOptionsHasName(PETSC_NULL,"-mat_use_hash_table",&flg);CHKERRQ(ierr);
2125   if (flg) {
2126     PetscReal fact = 1.39;
2127     ierr = MatSetOption(B,MAT_USE_HASH_TABLE);CHKERRQ(ierr);
2128     ierr = PetscOptionsGetReal(PETSC_NULL,"-mat_use_hash_table",&fact,PETSC_NULL);CHKERRQ(ierr);
2129     if (fact <= 1.0) fact = 1.39;
2130     ierr = MatMPIBAIJSetHashTableFactor(B,fact);CHKERRQ(ierr);
2131     PetscLogInfo(0,"MatCreateMPIBAIJ:Hash table Factor used %5.2f\n",fact);
2132   }
2133   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatStoreValues_C",
2134                                      "MatStoreValues_MPIBAIJ",
2135                                      MatStoreValues_MPIBAIJ);CHKERRQ(ierr);
2136   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatRetrieveValues_C",
2137                                      "MatRetrieveValues_MPIBAIJ",
2138                                      MatRetrieveValues_MPIBAIJ);CHKERRQ(ierr);
2139   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatGetDiagonalBlock_C",
2140                                      "MatGetDiagonalBlock_MPIBAIJ",
2141                                      MatGetDiagonalBlock_MPIBAIJ);CHKERRQ(ierr);
2142   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPIBAIJSetPreallocation_C",
2143                                      "MatMPIBAIJSetPreallocation_MPIBAIJ",
2144                                      MatMPIBAIJSetPreallocation_MPIBAIJ);CHKERRQ(ierr);
2145   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatDiagonalScaleLocal_C",
2146                                      "MatDiagonalScaleLocal_MPIBAIJ",
2147                                      MatDiagonalScaleLocal_MPIBAIJ);CHKERRQ(ierr);
2148   ierr = PetscObjectComposeFunctionDynamic((PetscObject)B,"MatSetHashTableFactor_C",
2149                                      "MatSetHashTableFactor_MPIBAIJ",
2150                                      MatSetHashTableFactor_MPIBAIJ);CHKERRQ(ierr);
2151   PetscFunctionReturn(0);
2152 }
2153 EXTERN_C_END
2154 
2155 /*MC
2156    MATBAIJ - MATBAIJ = "baij" - A matrix type to be used for block sparse matrices.
2157 
2158    This matrix type is identical to MATSEQBAIJ when constructed with a single process communicator,
2159    and MATMPIBAIJ otherwise.
2160 
2161    Options Database Keys:
2162 . -mat_type baij - sets the matrix type to "baij" during a call to MatSetFromOptions()
2163 
2164   Level: beginner
2165 
2166 .seealso: MatCreateMPIBAIJ,MATSEQBAIJ,MATMPIBAIJ
2167 M*/
2168 
2169 EXTERN_C_BEGIN
2170 #undef __FUNCT__
2171 #define __FUNCT__ "MatCreate_BAIJ"
2172 int MatCreate_BAIJ(Mat A) {
2173   int ierr,size;
2174 
2175   PetscFunctionBegin;
2176   ierr = PetscObjectChangeTypeName((PetscObject)A,MATBAIJ);CHKERRQ(ierr);
2177   ierr = MPI_Comm_size(A->comm,&size);CHKERRQ(ierr);
2178   if (size == 1) {
2179     ierr = MatSetType(A,MATSEQBAIJ);CHKERRQ(ierr);
2180   } else {
2181     ierr = MatSetType(A,MATMPIBAIJ);CHKERRQ(ierr);
2182   }
2183   PetscFunctionReturn(0);
2184 }
2185 EXTERN_C_END
2186 
2187 #undef __FUNCT__
2188 #define __FUNCT__ "MatMPIBAIJSetPreallocation"
2189 /*@C
2190    MatMPIBAIJSetPreallocation - Creates a sparse parallel matrix in block AIJ format
2191    (block compressed row).  For good matrix assembly performance
2192    the user should preallocate the matrix storage by setting the parameters
2193    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
2194    performance can be increased by more than a factor of 50.
2195 
2196    Collective on Mat
2197 
2198    Input Parameters:
2199 +  A - the matrix
2200 .  bs   - size of blockk
2201 .  d_nz  - number of block nonzeros per block row in diagonal portion of local
2202            submatrix  (same for all local rows)
2203 .  d_nnz - array containing the number of block nonzeros in the various block rows
2204            of the in diagonal portion of the local (possibly different for each block
2205            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
2206 .  o_nz  - number of block nonzeros per block row in the off-diagonal portion of local
2207            submatrix (same for all local rows).
2208 -  o_nnz - array containing the number of nonzeros in the various block rows of the
2209            off-diagonal portion of the local submatrix (possibly different for
2210            each block row) or PETSC_NULL.
2211 
2212    Output Parameter:
2213 
2214 
2215    Options Database Keys:
2216 .   -mat_no_unroll - uses code that does not unroll the loops in the
2217                      block calculations (much slower)
2218 .   -mat_block_size - size of the blocks to use
2219 
2220    Notes:
2221    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
2222    than it must be used on all processors that share the object for that argument.
2223 
2224    Storage Information:
2225    For a square global matrix we define each processor's diagonal portion
2226    to be its local rows and the corresponding columns (a square submatrix);
2227    each processor's off-diagonal portion encompasses the remainder of the
2228    local matrix (a rectangular submatrix).
2229 
2230    The user can specify preallocated storage for the diagonal part of
2231    the local submatrix with either d_nz or d_nnz (not both).  Set
2232    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2233    memory allocation.  Likewise, specify preallocated storage for the
2234    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
2235 
2236    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2237    the figure below we depict these three local rows and all columns (0-11).
2238 
2239 .vb
2240            0 1 2 3 4 5 6 7 8 9 10 11
2241           -------------------
2242    row 3  |  o o o d d d o o o o o o
2243    row 4  |  o o o d d d o o o o o o
2244    row 5  |  o o o d d d o o o o o o
2245           -------------------
2246 .ve
2247 
2248    Thus, any entries in the d locations are stored in the d (diagonal)
2249    submatrix, and any entries in the o locations are stored in the
2250    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
2251    stored simply in the MATSEQBAIJ format for compressed row storage.
2252 
2253    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
2254    and o_nz should indicate the number of block nonzeros per row in the o matrix.
2255    In general, for PDE problems in which most nonzeros are near the diagonal,
2256    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
2257    or you will get TERRIBLE performance; see the users' manual chapter on
2258    matrices.
2259 
2260    Level: intermediate
2261 
2262 .keywords: matrix, block, aij, compressed row, sparse, parallel
2263 
2264 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
2265 @*/
2266 int MatMPIBAIJSetPreallocation(Mat B,int bs,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[])
2267 {
2268   int ierr,(*f)(Mat,int,int,const int[],int,const int[]);
2269 
2270   PetscFunctionBegin;
2271   ierr = PetscObjectQueryFunction((PetscObject)B,"MatMPIBAIJSetPreallocation_C",(void (**)(void))&f);CHKERRQ(ierr);
2272   if (f) {
2273     ierr = (*f)(B,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
2274   }
2275   PetscFunctionReturn(0);
2276 }
2277 
2278 #undef __FUNCT__
2279 #define __FUNCT__ "MatCreateMPIBAIJ"
2280 /*@C
2281    MatCreateMPIBAIJ - Creates a sparse parallel matrix in block AIJ format
2282    (block compressed row).  For good matrix assembly performance
2283    the user should preallocate the matrix storage by setting the parameters
2284    d_nz (or d_nnz) and o_nz (or o_nnz).  By setting these parameters accurately,
2285    performance can be increased by more than a factor of 50.
2286 
2287    Collective on MPI_Comm
2288 
2289    Input Parameters:
2290 +  comm - MPI communicator
2291 .  bs   - size of blockk
2292 .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
2293            This value should be the same as the local size used in creating the
2294            y vector for the matrix-vector product y = Ax.
2295 .  n - number of local columns (or PETSC_DECIDE to have calculated if N is given)
2296            This value should be the same as the local size used in creating the
2297            x vector for the matrix-vector product y = Ax.
2298 .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
2299 .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
2300 .  d_nz  - number of nonzero blocks per block row in diagonal portion of local
2301            submatrix  (same for all local rows)
2302 .  d_nnz - array containing the number of nonzero blocks in the various block rows
2303            of the in diagonal portion of the local (possibly different for each block
2304            row) or PETSC_NULL.  You must leave room for the diagonal entry even if it is zero.
2305 .  o_nz  - number of nonzero blocks per block row in the off-diagonal portion of local
2306            submatrix (same for all local rows).
2307 -  o_nnz - array containing the number of nonzero blocks in the various block rows of the
2308            off-diagonal portion of the local submatrix (possibly different for
2309            each block row) or PETSC_NULL.
2310 
2311    Output Parameter:
2312 .  A - the matrix
2313 
2314    Options Database Keys:
2315 .   -mat_no_unroll - uses code that does not unroll the loops in the
2316                      block calculations (much slower)
2317 .   -mat_block_size - size of the blocks to use
2318 
2319    Notes:
2320    A nonzero block is any block that as 1 or more nonzeros in it
2321 
2322    The user MUST specify either the local or global matrix dimensions
2323    (possibly both).
2324 
2325    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one processor
2326    than it must be used on all processors that share the object for that argument.
2327 
2328    Storage Information:
2329    For a square global matrix we define each processor's diagonal portion
2330    to be its local rows and the corresponding columns (a square submatrix);
2331    each processor's off-diagonal portion encompasses the remainder of the
2332    local matrix (a rectangular submatrix).
2333 
2334    The user can specify preallocated storage for the diagonal part of
2335    the local submatrix with either d_nz or d_nnz (not both).  Set
2336    d_nz=PETSC_DEFAULT and d_nnz=PETSC_NULL for PETSc to control dynamic
2337    memory allocation.  Likewise, specify preallocated storage for the
2338    off-diagonal part of the local submatrix with o_nz or o_nnz (not both).
2339 
2340    Consider a processor that owns rows 3, 4 and 5 of a parallel matrix. In
2341    the figure below we depict these three local rows and all columns (0-11).
2342 
2343 .vb
2344            0 1 2 3 4 5 6 7 8 9 10 11
2345           -------------------
2346    row 3  |  o o o d d d o o o o o o
2347    row 4  |  o o o d d d o o o o o o
2348    row 5  |  o o o d d d o o o o o o
2349           -------------------
2350 .ve
2351 
2352    Thus, any entries in the d locations are stored in the d (diagonal)
2353    submatrix, and any entries in the o locations are stored in the
2354    o (off-diagonal) submatrix.  Note that the d and the o submatrices are
2355    stored simply in the MATSEQBAIJ format for compressed row storage.
2356 
2357    Now d_nz should indicate the number of block nonzeros per row in the d matrix,
2358    and o_nz should indicate the number of block nonzeros per row in the o matrix.
2359    In general, for PDE problems in which most nonzeros are near the diagonal,
2360    one expects d_nz >> o_nz.   For large problems you MUST preallocate memory
2361    or you will get TERRIBLE performance; see the users' manual chapter on
2362    matrices.
2363 
2364    Level: intermediate
2365 
2366 .keywords: matrix, block, aij, compressed row, sparse, parallel
2367 
2368 .seealso: MatCreate(), MatCreateSeqBAIJ(), MatSetValues(), MatCreateMPIBAIJ()
2369 @*/
2370 int MatCreateMPIBAIJ(MPI_Comm comm,int bs,int m,int n,int M,int N,int d_nz,const int d_nnz[],int o_nz,const int o_nnz[],Mat *A)
2371 {
2372   int ierr,size;
2373 
2374   PetscFunctionBegin;
2375   ierr = MatCreate(comm,m,n,M,N,A);CHKERRQ(ierr);
2376   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2377   if (size > 1) {
2378     ierr = MatSetType(*A,MATMPIBAIJ);CHKERRQ(ierr);
2379     ierr = MatMPIBAIJSetPreallocation(*A,bs,d_nz,d_nnz,o_nz,o_nnz);CHKERRQ(ierr);
2380   } else {
2381     ierr = MatSetType(*A,MATSEQBAIJ);CHKERRQ(ierr);
2382     ierr = MatSeqBAIJSetPreallocation(*A,bs,d_nz,d_nnz);CHKERRQ(ierr);
2383   }
2384   PetscFunctionReturn(0);
2385 }
2386 
2387 #undef __FUNCT__
2388 #define __FUNCT__ "MatDuplicate_MPIBAIJ"
2389 static int MatDuplicate_MPIBAIJ(Mat matin,MatDuplicateOption cpvalues,Mat *newmat)
2390 {
2391   Mat         mat;
2392   Mat_MPIBAIJ *a,*oldmat = (Mat_MPIBAIJ*)matin->data;
2393   int         ierr,len=0;
2394 
2395   PetscFunctionBegin;
2396   *newmat       = 0;
2397   ierr = MatCreate(matin->comm,matin->m,matin->n,matin->M,matin->N,&mat);CHKERRQ(ierr);
2398   ierr = MatSetType(mat,MATMPIBAIJ);CHKERRQ(ierr);
2399   mat->preallocated = PETSC_TRUE;
2400   mat->assembled    = PETSC_TRUE;
2401   a      = (Mat_MPIBAIJ*)mat->data;
2402   a->bs  = oldmat->bs;
2403   a->bs2 = oldmat->bs2;
2404   a->mbs = oldmat->mbs;
2405   a->nbs = oldmat->nbs;
2406   a->Mbs = oldmat->Mbs;
2407   a->Nbs = oldmat->Nbs;
2408 
2409   a->rstart       = oldmat->rstart;
2410   a->rend         = oldmat->rend;
2411   a->cstart       = oldmat->cstart;
2412   a->cend         = oldmat->cend;
2413   a->size         = oldmat->size;
2414   a->rank         = oldmat->rank;
2415   a->donotstash   = oldmat->donotstash;
2416   a->roworiented  = oldmat->roworiented;
2417   a->rowindices   = 0;
2418   a->rowvalues    = 0;
2419   a->getrowactive = PETSC_FALSE;
2420   a->barray       = 0;
2421   a->rstart_bs    = oldmat->rstart_bs;
2422   a->rend_bs      = oldmat->rend_bs;
2423   a->cstart_bs    = oldmat->cstart_bs;
2424   a->cend_bs      = oldmat->cend_bs;
2425 
2426   /* hash table stuff */
2427   a->ht           = 0;
2428   a->hd           = 0;
2429   a->ht_size      = 0;
2430   a->ht_flag      = oldmat->ht_flag;
2431   a->ht_fact      = oldmat->ht_fact;
2432   a->ht_total_ct  = 0;
2433   a->ht_insert_ct = 0;
2434 
2435   ierr = PetscMemcpy(a->rowners,oldmat->rowners,3*(a->size+2)*sizeof(int));CHKERRQ(ierr);
2436   ierr = MatStashCreate_Private(matin->comm,1,&mat->stash);CHKERRQ(ierr);
2437   ierr = MatStashCreate_Private(matin->comm,oldmat->bs,&mat->bstash);CHKERRQ(ierr);
2438   if (oldmat->colmap) {
2439 #if defined (PETSC_USE_CTABLE)
2440   ierr = PetscTableCreateCopy(oldmat->colmap,&a->colmap);CHKERRQ(ierr);
2441 #else
2442   ierr = PetscMalloc((a->Nbs)*sizeof(int),&a->colmap);CHKERRQ(ierr);
2443   PetscLogObjectMemory(mat,(a->Nbs)*sizeof(int));
2444   ierr = PetscMemcpy(a->colmap,oldmat->colmap,(a->Nbs)*sizeof(int));CHKERRQ(ierr);
2445 #endif
2446   } else a->colmap = 0;
2447   if (oldmat->garray && (len = ((Mat_SeqBAIJ*)(oldmat->B->data))->nbs)) {
2448     ierr = PetscMalloc(len*sizeof(int),&a->garray);CHKERRQ(ierr);
2449     PetscLogObjectMemory(mat,len*sizeof(int));
2450     ierr = PetscMemcpy(a->garray,oldmat->garray,len*sizeof(int));CHKERRQ(ierr);
2451   } else a->garray = 0;
2452 
2453   ierr =  VecDuplicate(oldmat->lvec,&a->lvec);CHKERRQ(ierr);
2454   PetscLogObjectParent(mat,a->lvec);
2455   ierr =  VecScatterCopy(oldmat->Mvctx,&a->Mvctx);CHKERRQ(ierr);
2456 
2457   PetscLogObjectParent(mat,a->Mvctx);
2458   ierr =  MatDuplicate(oldmat->A,cpvalues,&a->A);CHKERRQ(ierr);
2459   PetscLogObjectParent(mat,a->A);
2460   ierr =  MatDuplicate(oldmat->B,cpvalues,&a->B);CHKERRQ(ierr);
2461   PetscLogObjectParent(mat,a->B);
2462   ierr = PetscFListDuplicate(matin->qlist,&mat->qlist);CHKERRQ(ierr);
2463   *newmat = mat;
2464   PetscFunctionReturn(0);
2465 }
2466 
2467 #include "petscsys.h"
2468 
2469 #undef __FUNCT__
2470 #define __FUNCT__ "MatLoad_MPIBAIJ"
2471 int MatLoad_MPIBAIJ(PetscViewer viewer,MatType type,Mat *newmat)
2472 {
2473   Mat          A;
2474   int          i,nz,ierr,j,rstart,rend,fd;
2475   PetscScalar  *vals,*buf;
2476   MPI_Comm     comm = ((PetscObject)viewer)->comm;
2477   MPI_Status   status;
2478   int          header[4],rank,size,*rowlengths = 0,M,N,m,*rowners,*browners,maxnz,*cols;
2479   int          *locrowlens,*sndcounts = 0,*procsnz = 0,jj,*mycols,*ibuf;
2480   int          tag = ((PetscObject)viewer)->tag,bs=1,Mbs,mbs,extra_rows;
2481   int          *dlens,*odlens,*mask,*masked1,*masked2,rowcount,odcount;
2482   int          dcount,kmax,k,nzcount,tmp;
2483 
2484   PetscFunctionBegin;
2485   ierr = PetscOptionsGetInt(PETSC_NULL,"-matload_block_size",&bs,PETSC_NULL);CHKERRQ(ierr);
2486 
2487   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
2488   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
2489   if (!rank) {
2490     ierr = PetscViewerBinaryGetDescriptor(viewer,&fd);CHKERRQ(ierr);
2491     ierr = PetscBinaryRead(fd,(char *)header,4,PETSC_INT);CHKERRQ(ierr);
2492     if (header[0] != MAT_FILE_COOKIE) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"not matrix object");
2493     if (header[3] < 0) {
2494       SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"Matrix stored in special format, cannot load as MPIBAIJ");
2495     }
2496   }
2497 
2498   ierr = MPI_Bcast(header+1,3,MPI_INT,0,comm);CHKERRQ(ierr);
2499   M = header[1]; N = header[2];
2500 
2501   if (M != N) SETERRQ(PETSC_ERR_SUP,"Can only do square matrices");
2502 
2503   /*
2504      This code adds extra rows to make sure the number of rows is
2505      divisible by the blocksize
2506   */
2507   Mbs        = M/bs;
2508   extra_rows = bs - M + bs*(Mbs);
2509   if (extra_rows == bs) extra_rows = 0;
2510   else                  Mbs++;
2511   if (extra_rows &&!rank) {
2512     PetscLogInfo(0,"MatLoad_MPIBAIJ:Padding loaded matrix to match blocksize\n");
2513   }
2514 
2515   /* determine ownership of all rows */
2516   mbs        = Mbs/size + ((Mbs % size) > rank);
2517   m          = mbs*bs;
2518   ierr       = PetscMalloc(2*(size+2)*sizeof(int),&rowners);CHKERRQ(ierr);
2519   browners   = rowners + size + 1;
2520   ierr       = MPI_Allgather(&mbs,1,MPI_INT,rowners+1,1,MPI_INT,comm);CHKERRQ(ierr);
2521   rowners[0] = 0;
2522   for (i=2; i<=size; i++) rowners[i] += rowners[i-1];
2523   for (i=0; i<=size;  i++) browners[i] = rowners[i]*bs;
2524   rstart = rowners[rank];
2525   rend   = rowners[rank+1];
2526 
2527   /* distribute row lengths to all processors */
2528   ierr = PetscMalloc((rend-rstart)*bs*sizeof(int),&locrowlens);CHKERRQ(ierr);
2529   if (!rank) {
2530     ierr = PetscMalloc((M+extra_rows)*sizeof(int),&rowlengths);CHKERRQ(ierr);
2531     ierr = PetscBinaryRead(fd,rowlengths,M,PETSC_INT);CHKERRQ(ierr);
2532     for (i=0; i<extra_rows; i++) rowlengths[M+i] = 1;
2533     ierr = PetscMalloc(size*sizeof(int),&sndcounts);CHKERRQ(ierr);
2534     for (i=0; i<size; i++) sndcounts[i] = browners[i+1] - browners[i];
2535     ierr = MPI_Scatterv(rowlengths,sndcounts,browners,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2536     ierr = PetscFree(sndcounts);CHKERRQ(ierr);
2537   } else {
2538     ierr = MPI_Scatterv(0,0,0,MPI_INT,locrowlens,(rend-rstart)*bs,MPI_INT,0,comm);CHKERRQ(ierr);
2539   }
2540 
2541   if (!rank) {
2542     /* calculate the number of nonzeros on each processor */
2543     ierr = PetscMalloc(size*sizeof(int),&procsnz);CHKERRQ(ierr);
2544     ierr = PetscMemzero(procsnz,size*sizeof(int));CHKERRQ(ierr);
2545     for (i=0; i<size; i++) {
2546       for (j=rowners[i]*bs; j< rowners[i+1]*bs; j++) {
2547         procsnz[i] += rowlengths[j];
2548       }
2549     }
2550     ierr = PetscFree(rowlengths);CHKERRQ(ierr);
2551 
2552     /* determine max buffer needed and allocate it */
2553     maxnz = 0;
2554     for (i=0; i<size; i++) {
2555       maxnz = PetscMax(maxnz,procsnz[i]);
2556     }
2557     ierr = PetscMalloc(maxnz*sizeof(int),&cols);CHKERRQ(ierr);
2558 
2559     /* read in my part of the matrix column indices  */
2560     nz     = procsnz[0];
2561     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2562     mycols = ibuf;
2563     if (size == 1)  nz -= extra_rows;
2564     ierr = PetscBinaryRead(fd,mycols,nz,PETSC_INT);CHKERRQ(ierr);
2565     if (size == 1)  for (i=0; i< extra_rows; i++) { mycols[nz+i] = M+i; }
2566 
2567     /* read in every ones (except the last) and ship off */
2568     for (i=1; i<size-1; i++) {
2569       nz   = procsnz[i];
2570       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2571       ierr = MPI_Send(cols,nz,MPI_INT,i,tag,comm);CHKERRQ(ierr);
2572     }
2573     /* read in the stuff for the last proc */
2574     if (size != 1) {
2575       nz   = procsnz[size-1] - extra_rows;  /* the extra rows are not on the disk */
2576       ierr = PetscBinaryRead(fd,cols,nz,PETSC_INT);CHKERRQ(ierr);
2577       for (i=0; i<extra_rows; i++) cols[nz+i] = M+i;
2578       ierr = MPI_Send(cols,nz+extra_rows,MPI_INT,size-1,tag,comm);CHKERRQ(ierr);
2579     }
2580     ierr = PetscFree(cols);CHKERRQ(ierr);
2581   } else {
2582     /* determine buffer space needed for message */
2583     nz = 0;
2584     for (i=0; i<m; i++) {
2585       nz += locrowlens[i];
2586     }
2587     ierr   = PetscMalloc(nz*sizeof(int),&ibuf);CHKERRQ(ierr);
2588     mycols = ibuf;
2589     /* receive message of column indices*/
2590     ierr = MPI_Recv(mycols,nz,MPI_INT,0,tag,comm,&status);CHKERRQ(ierr);
2591     ierr = MPI_Get_count(&status,MPI_INT,&maxnz);CHKERRQ(ierr);
2592     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2593   }
2594 
2595   /* loop over local rows, determining number of off diagonal entries */
2596   ierr     = PetscMalloc(2*(rend-rstart+1)*sizeof(int),&dlens);CHKERRQ(ierr);
2597   odlens   = dlens + (rend-rstart);
2598   ierr     = PetscMalloc(3*Mbs*sizeof(int),&mask);CHKERRQ(ierr);
2599   ierr     = PetscMemzero(mask,3*Mbs*sizeof(int));CHKERRQ(ierr);
2600   masked1  = mask    + Mbs;
2601   masked2  = masked1 + Mbs;
2602   rowcount = 0; nzcount = 0;
2603   for (i=0; i<mbs; i++) {
2604     dcount  = 0;
2605     odcount = 0;
2606     for (j=0; j<bs; j++) {
2607       kmax = locrowlens[rowcount];
2608       for (k=0; k<kmax; k++) {
2609         tmp = mycols[nzcount++]/bs;
2610         if (!mask[tmp]) {
2611           mask[tmp] = 1;
2612           if (tmp < rstart || tmp >= rend) masked2[odcount++] = tmp;
2613           else masked1[dcount++] = tmp;
2614         }
2615       }
2616       rowcount++;
2617     }
2618 
2619     dlens[i]  = dcount;
2620     odlens[i] = odcount;
2621 
2622     /* zero out the mask elements we set */
2623     for (j=0; j<dcount; j++) mask[masked1[j]] = 0;
2624     for (j=0; j<odcount; j++) mask[masked2[j]] = 0;
2625   }
2626 
2627   /* create our matrix */
2628   ierr = MatCreate(comm,m,m,M+extra_rows,N+extra_rows,&A);CHKERRQ(ierr);
2629   ierr = MatSetType(A,type);CHKERRQ(ierr)
2630   ierr = MatMPIBAIJSetPreallocation(A,bs,0,dlens,0,odlens);CHKERRQ(ierr);
2631 
2632   /* Why doesn't this called using ierr = MatSetOption(A,MAT_COLUMNS_SORTED);CHKERRQ(ierr); */
2633   MatSetOption(A,MAT_COLUMNS_SORTED);
2634 
2635   if (!rank) {
2636     ierr = PetscMalloc(maxnz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2637     /* read in my part of the matrix numerical values  */
2638     nz = procsnz[0];
2639     vals = buf;
2640     mycols = ibuf;
2641     if (size == 1)  nz -= extra_rows;
2642     ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2643     if (size == 1)  for (i=0; i< extra_rows; i++) { vals[nz+i] = 1.0; }
2644 
2645     /* insert into matrix */
2646     jj      = rstart*bs;
2647     for (i=0; i<m; i++) {
2648       ierr = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2649       mycols += locrowlens[i];
2650       vals   += locrowlens[i];
2651       jj++;
2652     }
2653     /* read in other processors (except the last one) and ship out */
2654     for (i=1; i<size-1; i++) {
2655       nz   = procsnz[i];
2656       vals = buf;
2657       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2658       ierr = MPI_Send(vals,nz,MPIU_SCALAR,i,A->tag,comm);CHKERRQ(ierr);
2659     }
2660     /* the last proc */
2661     if (size != 1){
2662       nz   = procsnz[i] - extra_rows;
2663       vals = buf;
2664       ierr = PetscBinaryRead(fd,vals,nz,PETSC_SCALAR);CHKERRQ(ierr);
2665       for (i=0; i<extra_rows; i++) vals[nz+i] = 1.0;
2666       ierr = MPI_Send(vals,nz+extra_rows,MPIU_SCALAR,size-1,A->tag,comm);CHKERRQ(ierr);
2667     }
2668     ierr = PetscFree(procsnz);CHKERRQ(ierr);
2669   } else {
2670     /* receive numeric values */
2671     ierr = PetscMalloc(nz*sizeof(PetscScalar),&buf);CHKERRQ(ierr);
2672 
2673     /* receive message of values*/
2674     vals   = buf;
2675     mycols = ibuf;
2676     ierr   = MPI_Recv(vals,nz,MPIU_SCALAR,0,A->tag,comm,&status);CHKERRQ(ierr);
2677     ierr   = MPI_Get_count(&status,MPIU_SCALAR,&maxnz);CHKERRQ(ierr);
2678     if (maxnz != nz) SETERRQ(PETSC_ERR_FILE_UNEXPECTED,"something is wrong with file");
2679 
2680     /* insert into matrix */
2681     jj      = rstart*bs;
2682     for (i=0; i<m; i++) {
2683       ierr    = MatSetValues(A,1,&jj,locrowlens[i],mycols,vals,INSERT_VALUES);CHKERRQ(ierr);
2684       mycols += locrowlens[i];
2685       vals   += locrowlens[i];
2686       jj++;
2687     }
2688   }
2689   ierr = PetscFree(locrowlens);CHKERRQ(ierr);
2690   ierr = PetscFree(buf);CHKERRQ(ierr);
2691   ierr = PetscFree(ibuf);CHKERRQ(ierr);
2692   ierr = PetscFree(rowners);CHKERRQ(ierr);
2693   ierr = PetscFree(dlens);CHKERRQ(ierr);
2694   ierr = PetscFree(mask);CHKERRQ(ierr);
2695   ierr = MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2696   ierr = MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
2697 
2698   *newmat = A;
2699   PetscFunctionReturn(0);
2700 }
2701 
2702 #undef __FUNCT__
2703 #define __FUNCT__ "MatMPIBAIJSetHashTableFactor"
2704 /*@
2705    MatMPIBAIJSetHashTableFactor - Sets the factor required to compute the size of the HashTable.
2706 
2707    Input Parameters:
2708 .  mat  - the matrix
2709 .  fact - factor
2710 
2711    Collective on Mat
2712 
2713    Level: advanced
2714 
2715   Notes:
2716    This can also be set by the command line option: -mat_use_hash_table fact
2717 
2718 .keywords: matrix, hashtable, factor, HT
2719 
2720 .seealso: MatSetOption()
2721 @*/
2722 int MatMPIBAIJSetHashTableFactor(Mat mat,PetscReal fact)
2723 {
2724   int ierr,(*f)(Mat,PetscReal);
2725 
2726   PetscFunctionBegin;
2727   ierr = PetscObjectQueryFunction((PetscObject)mat,"MatSetHashTableFactor_C",(void (**)(void))&f);CHKERRQ(ierr);
2728   if (f) {
2729     ierr = (*f)(mat,fact);CHKERRQ(ierr);
2730   }
2731   PetscFunctionReturn(0);
2732 }
2733 
2734 #undef __FUNCT__
2735 #define __FUNCT__ "MatSetHashTableFactor_MPIBAIJ"
2736 int MatSetHashTableFactor_MPIBAIJ(Mat mat,PetscReal fact)
2737 {
2738   Mat_MPIBAIJ *baij;
2739 
2740   PetscFunctionBegin;
2741   PetscValidHeaderSpecific(mat,MAT_COOKIE);
2742   baij = (Mat_MPIBAIJ*)mat->data;
2743   baij->ht_fact = fact;
2744   PetscFunctionReturn(0);
2745 }
2746 
2747 #undef __FUNCT__
2748 #define __FUNCT__ "MatMPIBAIJGetSeqBAIJ"
2749 int MatMPIBAIJGetSeqBAIJ(Mat A,Mat *Ad,Mat *Ao,int *colmap[])
2750 {
2751   Mat_MPIBAIJ *a = (Mat_MPIBAIJ *)A->data;
2752   PetscFunctionBegin;
2753   *Ad     = a->A;
2754   *Ao     = a->B;
2755   *colmap = a->garray;
2756   PetscFunctionReturn(0);
2757 }
2758