xref: /petsc/src/mat/impls/sell/mpi/mpisell.c (revision e64794e4e0b181328b5d79973a03ee824bee75ee)
1 #include <../src/mat/impls/aij/mpi/mpiaij.h>   /*I "petscmat.h" I*/
2 #include <../src/mat/impls/sell/mpi/mpisell.h> /*I "petscmat.h" I*/
3 #include <petsc/private/vecimpl.h>
4 #include <petsc/private/isimpl.h>
5 #include <petscblaslapack.h>
6 #include <petscsf.h>
7 
8 /*MC
9    MATSELL - MATSELL = "sell" - A matrix type to be used for sparse matrices.
10 
11    This matrix type is identical to `MATSEQSELL` when constructed with a single process communicator,
12    and `MATMPISELL` otherwise.  As a result, for single process communicators,
13   `MatSeqSELLSetPreallocation()` is supported, and similarly `MatMPISELLSetPreallocation()` is supported
14   for communicators controlling multiple processes.  It is recommended that you call both of
15   the above preallocation routines for simplicity.
16 
17    Options Database Keys:
18 . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`
19 
20   Level: beginner
21 
22 .seealso: `Mat`, `MATAIJ`, `MATBAIJ`, `MATSBAIJ`, `MatCreateSELL()`, `MatCreateSeqSELL()`, `MATSEQSELL`, `MATMPISELL`
23 M*/
24 
25 static PetscErrorCode MatDiagonalSet_MPISELL(Mat Y, Vec D, InsertMode is)
26 {
27   Mat_MPISELL *sell = (Mat_MPISELL *)Y->data;
28 
29   PetscFunctionBegin;
30   if (Y->assembled && Y->rmap->rstart == Y->cmap->rstart && Y->rmap->rend == Y->cmap->rend) {
31     PetscCall(MatDiagonalSet(sell->A, D, is));
32   } else {
33     PetscCall(MatDiagonalSet_Default(Y, D, is));
34   }
35   PetscFunctionReturn(PETSC_SUCCESS);
36 }
37 
38 /*
39   Local utility routine that creates a mapping from the global column
40 number to the local number in the off-diagonal part of the local
41 storage of the matrix.  When PETSC_USE_CTABLE is used this is scalable at
42 a slightly higher hash table cost; without it it is not scalable (each processor
43 has an order N integer array but is fast to access.
44 */
45 PetscErrorCode MatCreateColmap_MPISELL_Private(Mat mat)
46 {
47   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
48   PetscInt     n    = sell->B->cmap->n, i;
49 
50   PetscFunctionBegin;
51   PetscCheck(sell->garray, PETSC_COMM_SELF, PETSC_ERR_PLIB, "MPISELL Matrix was assembled but is missing garray");
52 #if defined(PETSC_USE_CTABLE)
53   PetscCall(PetscHMapICreateWithSize(n, &sell->colmap));
54   for (i = 0; i < n; i++) PetscCall(PetscHMapISet(sell->colmap, sell->garray[i] + 1, i + 1));
55 #else
56   PetscCall(PetscCalloc1(mat->cmap->N + 1, &sell->colmap));
57   for (i = 0; i < n; i++) sell->colmap[sell->garray[i]] = i + 1;
58 #endif
59   PetscFunctionReturn(PETSC_SUCCESS);
60 }
61 
62 #define MatSetValues_SeqSELL_A_Private(row, col, value, addv, orow, ocol) \
63   { \
64     if (col <= lastcol1) low1 = 0; \
65     else high1 = nrow1; \
66     lastcol1 = col; \
67     while (high1 - low1 > 5) { \
68       t = (low1 + high1) / 2; \
69       if (cp1[sliceheight * t] > col) high1 = t; \
70       else low1 = t; \
71     } \
72     for (_i = low1; _i < high1; _i++) { \
73       if (cp1[sliceheight * _i] > col) break; \
74       if (cp1[sliceheight * _i] == col) { \
75         if (addv == ADD_VALUES) vp1[sliceheight * _i] += value; \
76         else vp1[sliceheight * _i] = value; \
77         inserted = PETSC_TRUE; \
78         goto a_noinsert; \
79       } \
80     } \
81     if (value == 0.0 && ignorezeroentries) { \
82       low1  = 0; \
83       high1 = nrow1; \
84       goto a_noinsert; \
85     } \
86     if (nonew == 1) { \
87       low1  = 0; \
88       high1 = nrow1; \
89       goto a_noinsert; \
90     } \
91     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
92     MatSeqXSELLReallocateSELL(A, am, 1, nrow1, a->sliidx, a->sliceheight, row / sliceheight, row, col, a->colidx, a->val, cp1, vp1, nonew, MatScalar); \
93     /* shift up all the later entries in this row */ \
94     for (ii = nrow1 - 1; ii >= _i; ii--) { \
95       cp1[sliceheight * (ii + 1)] = cp1[sliceheight * ii]; \
96       vp1[sliceheight * (ii + 1)] = vp1[sliceheight * ii]; \
97     } \
98     cp1[sliceheight * _i] = col; \
99     vp1[sliceheight * _i] = value; \
100     a->nz++; \
101     nrow1++; \
102   a_noinsert:; \
103     a->rlen[row] = nrow1; \
104   }
105 
106 #define MatSetValues_SeqSELL_B_Private(row, col, value, addv, orow, ocol) \
107   { \
108     if (col <= lastcol2) low2 = 0; \
109     else high2 = nrow2; \
110     lastcol2 = col; \
111     while (high2 - low2 > 5) { \
112       t = (low2 + high2) / 2; \
113       if (cp2[sliceheight * t] > col) high2 = t; \
114       else low2 = t; \
115     } \
116     for (_i = low2; _i < high2; _i++) { \
117       if (cp2[sliceheight * _i] > col) break; \
118       if (cp2[sliceheight * _i] == col) { \
119         if (addv == ADD_VALUES) vp2[sliceheight * _i] += value; \
120         else vp2[sliceheight * _i] = value; \
121         inserted = PETSC_TRUE; \
122         goto b_noinsert; \
123       } \
124     } \
125     if (value == 0.0 && ignorezeroentries) { \
126       low2  = 0; \
127       high2 = nrow2; \
128       goto b_noinsert; \
129     } \
130     if (nonew == 1) { \
131       low2  = 0; \
132       high2 = nrow2; \
133       goto b_noinsert; \
134     } \
135     PetscCheck(nonew != -1, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", orow, ocol); \
136     MatSeqXSELLReallocateSELL(B, bm, 1, nrow2, b->sliidx, b->sliceheight, row / sliceheight, row, col, b->colidx, b->val, cp2, vp2, nonew, MatScalar); \
137     /* shift up all the later entries in this row */ \
138     for (ii = nrow2 - 1; ii >= _i; ii--) { \
139       cp2[sliceheight * (ii + 1)] = cp2[sliceheight * ii]; \
140       vp2[sliceheight * (ii + 1)] = vp2[sliceheight * ii]; \
141     } \
142     cp2[sliceheight * _i] = col; \
143     vp2[sliceheight * _i] = value; \
144     b->nz++; \
145     nrow2++; \
146   b_noinsert:; \
147     b->rlen[row] = nrow2; \
148   }
149 
150 static PetscErrorCode MatSetValues_MPISELL(Mat mat, PetscInt m, const PetscInt im[], PetscInt n, const PetscInt in[], const PetscScalar v[], InsertMode addv)
151 {
152   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
153   PetscScalar  value;
154   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend, shift1, shift2;
155   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
156   PetscBool    roworiented = sell->roworiented;
157 
158   /* Some Variables required in the macro */
159   Mat          A                 = sell->A;
160   Mat_SeqSELL *a                 = (Mat_SeqSELL *)A->data;
161   PetscBool    ignorezeroentries = a->ignorezeroentries, found;
162   Mat          B                 = sell->B;
163   Mat_SeqSELL *b                 = (Mat_SeqSELL *)B->data;
164   PetscInt    *cp1, *cp2, ii, _i, nrow1, nrow2, low1, high1, low2, high2, t, lastcol1, lastcol2, sliceheight = a->sliceheight;
165   MatScalar   *vp1, *vp2;
166 
167   PetscFunctionBegin;
168   for (i = 0; i < m; i++) {
169     if (im[i] < 0) continue;
170     PetscCheck(im[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, im[i], mat->rmap->N - 1);
171     if (im[i] >= rstart && im[i] < rend) {
172       row      = im[i] - rstart;
173       lastcol1 = -1;
174       shift1   = a->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
175       cp1      = PetscSafePointerPlusOffset(a->colidx, shift1);
176       vp1      = PetscSafePointerPlusOffset(a->val, shift1);
177       nrow1    = a->rlen[row];
178       low1     = 0;
179       high1    = nrow1;
180       lastcol2 = -1;
181       shift2   = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
182       cp2      = PetscSafePointerPlusOffset(b->colidx, shift2);
183       vp2      = PetscSafePointerPlusOffset(b->val, shift2);
184       nrow2    = b->rlen[row];
185       low2     = 0;
186       high2    = nrow2;
187 
188       for (j = 0; j < n; j++) {
189         if (roworiented) value = v[i * n + j];
190         else value = v[i + j * m];
191         if (ignorezeroentries && value == 0.0 && (addv == ADD_VALUES)) continue;
192         if (in[j] >= cstart && in[j] < cend) {
193           col = in[j] - cstart;
194           MatSetValue_SeqSELL_Private(A, row, col, value, addv, im[i], in[j], cp1, vp1, lastcol1, low1, high1); /* set one value */
195 #if defined(PETSC_HAVE_CUDA)
196           if (A->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) A->offloadmask = PETSC_OFFLOAD_CPU;
197 #endif
198         } else if (in[j] < 0) {
199           continue;
200         } else {
201           PetscCheck(in[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, in[j], mat->cmap->N - 1);
202           if (mat->was_assembled) {
203             if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
204 #if defined(PETSC_USE_CTABLE)
205             PetscCall(PetscHMapIGetWithDefault(sell->colmap, in[j] + 1, 0, &col));
206             col--;
207 #else
208             col = sell->colmap[in[j]] - 1;
209 #endif
210             if (col < 0 && !((Mat_SeqSELL *)sell->B->data)->nonew) {
211               PetscCall(MatDisAssemble_MPISELL(mat));
212               col = in[j];
213               /* Reinitialize the variables required by MatSetValues_SeqSELL_B_Private() */
214               B      = sell->B;
215               b      = (Mat_SeqSELL *)B->data;
216               shift2 = b->sliidx[row / sliceheight] + (row % sliceheight); /* starting index of the row */
217               cp2    = b->colidx + shift2;
218               vp2    = b->val + shift2;
219               nrow2  = b->rlen[row];
220               low2   = 0;
221               high2  = nrow2;
222               found  = PETSC_FALSE;
223             } else {
224               PetscCheck(col >= 0, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Inserting a new nonzero at global row/column (%" PetscInt_FMT ", %" PetscInt_FMT ") into matrix", im[i], in[j]);
225             }
226           } else col = in[j];
227           MatSetValue_SeqSELL_Private(B, row, col, value, addv, im[i], in[j], cp2, vp2, lastcol2, low2, high2); /* set one value */
228 #if defined(PETSC_HAVE_CUDA)
229           if (B->offloadmask != PETSC_OFFLOAD_UNALLOCATED && found) B->offloadmask = PETSC_OFFLOAD_CPU;
230 #endif
231         }
232       }
233     } else {
234       PetscCheck(!mat->nooffprocentries, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONG, "Setting off process row %" PetscInt_FMT " even though MatSetOption(,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE) was set", im[i]);
235       if (!sell->donotstash) {
236         mat->assembled = PETSC_FALSE;
237         if (roworiented) {
238           PetscCall(MatStashValuesRow_Private(&mat->stash, im[i], n, in, v + i * n, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
239         } else {
240           PetscCall(MatStashValuesCol_Private(&mat->stash, im[i], n, in, v + i, m, (PetscBool)(ignorezeroentries && (addv == ADD_VALUES))));
241         }
242       }
243     }
244   }
245   PetscFunctionReturn(PETSC_SUCCESS);
246 }
247 
248 static PetscErrorCode MatGetValues_MPISELL(Mat mat, PetscInt m, const PetscInt idxm[], PetscInt n, const PetscInt idxn[], PetscScalar v[])
249 {
250   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
251   PetscInt     i, j, rstart = mat->rmap->rstart, rend = mat->rmap->rend;
252   PetscInt     cstart = mat->cmap->rstart, cend = mat->cmap->rend, row, col;
253 
254   PetscFunctionBegin;
255   for (i = 0; i < m; i++) {
256     if (idxm[i] < 0) continue; /* negative row */
257     PetscCheck(idxm[i] < mat->rmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Row too large: row %" PetscInt_FMT " max %" PetscInt_FMT, idxm[i], mat->rmap->N - 1);
258     PetscCheck(idxm[i] >= rstart && idxm[i] < rend, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only local values currently supported");
259     row = idxm[i] - rstart;
260     for (j = 0; j < n; j++) {
261       if (idxn[j] < 0) continue; /* negative column */
262       PetscCheck(idxn[j] < mat->cmap->N, PETSC_COMM_SELF, PETSC_ERR_ARG_OUTOFRANGE, "Column too large: col %" PetscInt_FMT " max %" PetscInt_FMT, idxn[j], mat->cmap->N - 1);
263       if (idxn[j] >= cstart && idxn[j] < cend) {
264         col = idxn[j] - cstart;
265         PetscCall(MatGetValues(sell->A, 1, &row, 1, &col, v + i * n + j));
266       } else {
267         if (!sell->colmap) PetscCall(MatCreateColmap_MPISELL_Private(mat));
268 #if defined(PETSC_USE_CTABLE)
269         PetscCall(PetscHMapIGetWithDefault(sell->colmap, idxn[j] + 1, 0, &col));
270         col--;
271 #else
272         col = sell->colmap[idxn[j]] - 1;
273 #endif
274         if (col < 0 || sell->garray[col] != idxn[j]) *(v + i * n + j) = 0.0;
275         else PetscCall(MatGetValues(sell->B, 1, &row, 1, &col, v + i * n + j));
276       }
277     }
278   }
279   PetscFunctionReturn(PETSC_SUCCESS);
280 }
281 
282 static PetscErrorCode MatAssemblyBegin_MPISELL(Mat mat, MatAssemblyType mode)
283 {
284   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
285   PetscInt     nstash, reallocs;
286 
287   PetscFunctionBegin;
288   if (sell->donotstash || mat->nooffprocentries) PetscFunctionReturn(PETSC_SUCCESS);
289 
290   PetscCall(MatStashScatterBegin_Private(mat, &mat->stash, mat->rmap->range));
291   PetscCall(MatStashGetInfo_Private(&mat->stash, &nstash, &reallocs));
292   PetscCall(PetscInfo(sell->A, "Stash has %" PetscInt_FMT " entries, uses %" PetscInt_FMT " mallocs.\n", nstash, reallocs));
293   PetscFunctionReturn(PETSC_SUCCESS);
294 }
295 
296 PetscErrorCode MatAssemblyEnd_MPISELL(Mat mat, MatAssemblyType mode)
297 {
298   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
299   PetscMPIInt  n;
300   PetscInt     i, flg;
301   PetscInt    *row, *col;
302   PetscScalar *val;
303   PetscBool    all_assembled;
304   /* do not use 'b = (Mat_SeqSELL*)sell->B->data' as B can be reset in disassembly */
305   PetscFunctionBegin;
306   if (!sell->donotstash && !mat->nooffprocentries) {
307     while (1) {
308       PetscCall(MatStashScatterGetMesg_Private(&mat->stash, &n, &row, &col, &val, &flg));
309       if (!flg) break;
310 
311       for (i = 0; i < n; i++) { /* assemble one by one */
312         PetscCall(MatSetValues_MPISELL(mat, 1, row + i, 1, col + i, val + i, mat->insertmode));
313       }
314     }
315     PetscCall(MatStashScatterEnd_Private(&mat->stash));
316   }
317 #if defined(PETSC_HAVE_CUDA)
318   if (mat->offloadmask == PETSC_OFFLOAD_CPU) sell->A->offloadmask = PETSC_OFFLOAD_CPU;
319 #endif
320   PetscCall(MatAssemblyBegin(sell->A, mode));
321   PetscCall(MatAssemblyEnd(sell->A, mode));
322 
323   /*
324      determine if any process has disassembled, if so we must
325      also disassemble ourselves, in order that we may reassemble.
326   */
327   /*
328      if nonzero structure of submatrix B cannot change then we know that
329      no process disassembled thus we can skip this stuff
330   */
331   if (!((Mat_SeqSELL *)sell->B->data)->nonew) {
332     PetscCallMPI(MPIU_Allreduce(&mat->was_assembled, &all_assembled, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)mat)));
333     if (mat->was_assembled && !all_assembled) PetscCall(MatDisAssemble_MPISELL(mat));
334   }
335   if (!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) PetscCall(MatSetUpMultiply_MPISELL(mat));
336 #if defined(PETSC_HAVE_CUDA)
337   if (mat->offloadmask == PETSC_OFFLOAD_CPU && sell->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) sell->B->offloadmask = PETSC_OFFLOAD_CPU;
338 #endif
339   PetscCall(MatAssemblyBegin(sell->B, mode));
340   PetscCall(MatAssemblyEnd(sell->B, mode));
341   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
342   sell->rowvalues = NULL;
343   PetscCall(VecDestroy(&sell->diag));
344 
345   /* if no new nonzero locations are allowed in matrix then only set the matrix state the first time through */
346   if ((!mat->was_assembled && mode == MAT_FINAL_ASSEMBLY) || !((Mat_SeqSELL *)sell->A->data)->nonew) {
347     PetscObjectState state = sell->A->nonzerostate + sell->B->nonzerostate;
348     PetscCallMPI(MPIU_Allreduce(&state, &mat->nonzerostate, 1, MPIU_INT64, MPI_SUM, PetscObjectComm((PetscObject)mat)));
349   }
350 #if defined(PETSC_HAVE_CUDA)
351   mat->offloadmask = PETSC_OFFLOAD_BOTH;
352 #endif
353   PetscFunctionReturn(PETSC_SUCCESS);
354 }
355 
356 static PetscErrorCode MatZeroEntries_MPISELL(Mat A)
357 {
358   Mat_MPISELL *l = (Mat_MPISELL *)A->data;
359 
360   PetscFunctionBegin;
361   PetscCall(MatZeroEntries(l->A));
362   PetscCall(MatZeroEntries(l->B));
363   PetscFunctionReturn(PETSC_SUCCESS);
364 }
365 
366 static PetscErrorCode MatMult_MPISELL(Mat A, Vec xx, Vec yy)
367 {
368   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
369   PetscInt     nt;
370 
371   PetscFunctionBegin;
372   PetscCall(VecGetLocalSize(xx, &nt));
373   PetscCheck(nt == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Incompatible partition of A (%" PetscInt_FMT ") and xx (%" PetscInt_FMT ")", A->cmap->n, nt);
374   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
375   PetscCall((*a->A->ops->mult)(a->A, xx, yy));
376   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
377   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, yy, yy));
378   PetscFunctionReturn(PETSC_SUCCESS);
379 }
380 
381 static PetscErrorCode MatMultDiagonalBlock_MPISELL(Mat A, Vec bb, Vec xx)
382 {
383   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
384 
385   PetscFunctionBegin;
386   PetscCall(MatMultDiagonalBlock(a->A, bb, xx));
387   PetscFunctionReturn(PETSC_SUCCESS);
388 }
389 
390 static PetscErrorCode MatMultAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
391 {
392   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
393 
394   PetscFunctionBegin;
395   PetscCall(VecScatterBegin(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
396   PetscCall((*a->A->ops->multadd)(a->A, xx, yy, zz));
397   PetscCall(VecScatterEnd(a->Mvctx, xx, a->lvec, INSERT_VALUES, SCATTER_FORWARD));
398   PetscCall((*a->B->ops->multadd)(a->B, a->lvec, zz, zz));
399   PetscFunctionReturn(PETSC_SUCCESS);
400 }
401 
402 static PetscErrorCode MatMultTranspose_MPISELL(Mat A, Vec xx, Vec yy)
403 {
404   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
405 
406   PetscFunctionBegin;
407   /* do nondiagonal part */
408   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
409   /* do local part */
410   PetscCall((*a->A->ops->multtranspose)(a->A, xx, yy));
411   /* add partial results together */
412   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
413   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, yy, ADD_VALUES, SCATTER_REVERSE));
414   PetscFunctionReturn(PETSC_SUCCESS);
415 }
416 
417 static PetscErrorCode MatIsTranspose_MPISELL(Mat Amat, Mat Bmat, PetscReal tol, PetscBool *f)
418 {
419   MPI_Comm     comm;
420   Mat_MPISELL *Asell = (Mat_MPISELL *)Amat->data, *Bsell;
421   Mat          Adia  = Asell->A, Bdia, Aoff, Boff, *Aoffs, *Boffs;
422   IS           Me, Notme;
423   PetscInt     M, N, first, last, *notme, i;
424   PetscMPIInt  size;
425 
426   PetscFunctionBegin;
427   /* Easy test: symmetric diagonal block */
428   Bsell = (Mat_MPISELL *)Bmat->data;
429   Bdia  = Bsell->A;
430   PetscCall(MatIsTranspose(Adia, Bdia, tol, f));
431   if (!*f) PetscFunctionReturn(PETSC_SUCCESS);
432   PetscCall(PetscObjectGetComm((PetscObject)Amat, &comm));
433   PetscCallMPI(MPI_Comm_size(comm, &size));
434   if (size == 1) PetscFunctionReturn(PETSC_SUCCESS);
435 
436   /* Hard test: off-diagonal block. This takes a MatCreateSubMatrix. */
437   PetscCall(MatGetSize(Amat, &M, &N));
438   PetscCall(MatGetOwnershipRange(Amat, &first, &last));
439   PetscCall(PetscMalloc1(N - last + first, &notme));
440   for (i = 0; i < first; i++) notme[i] = i;
441   for (i = last; i < M; i++) notme[i - last + first] = i;
442   PetscCall(ISCreateGeneral(MPI_COMM_SELF, N - last + first, notme, PETSC_COPY_VALUES, &Notme));
443   PetscCall(ISCreateStride(MPI_COMM_SELF, last - first, first, 1, &Me));
444   PetscCall(MatCreateSubMatrices(Amat, 1, &Me, &Notme, MAT_INITIAL_MATRIX, &Aoffs));
445   Aoff = Aoffs[0];
446   PetscCall(MatCreateSubMatrices(Bmat, 1, &Notme, &Me, MAT_INITIAL_MATRIX, &Boffs));
447   Boff = Boffs[0];
448   PetscCall(MatIsTranspose(Aoff, Boff, tol, f));
449   PetscCall(MatDestroyMatrices(1, &Aoffs));
450   PetscCall(MatDestroyMatrices(1, &Boffs));
451   PetscCall(ISDestroy(&Me));
452   PetscCall(ISDestroy(&Notme));
453   PetscCall(PetscFree(notme));
454   PetscFunctionReturn(PETSC_SUCCESS);
455 }
456 
457 static PetscErrorCode MatMultTransposeAdd_MPISELL(Mat A, Vec xx, Vec yy, Vec zz)
458 {
459   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
460 
461   PetscFunctionBegin;
462   /* do nondiagonal part */
463   PetscCall((*a->B->ops->multtranspose)(a->B, xx, a->lvec));
464   /* do local part */
465   PetscCall((*a->A->ops->multtransposeadd)(a->A, xx, yy, zz));
466   /* add partial results together */
467   PetscCall(VecScatterBegin(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
468   PetscCall(VecScatterEnd(a->Mvctx, a->lvec, zz, ADD_VALUES, SCATTER_REVERSE));
469   PetscFunctionReturn(PETSC_SUCCESS);
470 }
471 
472 /*
473   This only works correctly for square matrices where the subblock A->A is the
474    diagonal block
475 */
476 static PetscErrorCode MatGetDiagonal_MPISELL(Mat A, Vec v)
477 {
478   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
479 
480   PetscFunctionBegin;
481   PetscCheck(A->rmap->N == A->cmap->N, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Supports only square matrix where A->A is diag block");
482   PetscCheck(A->rmap->rstart == A->cmap->rstart && A->rmap->rend == A->cmap->rend, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "row partition must equal col partition");
483   PetscCall(MatGetDiagonal(a->A, v));
484   PetscFunctionReturn(PETSC_SUCCESS);
485 }
486 
487 static PetscErrorCode MatScale_MPISELL(Mat A, PetscScalar aa)
488 {
489   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
490 
491   PetscFunctionBegin;
492   PetscCall(MatScale(a->A, aa));
493   PetscCall(MatScale(a->B, aa));
494   PetscFunctionReturn(PETSC_SUCCESS);
495 }
496 
497 PetscErrorCode MatDestroy_MPISELL(Mat mat)
498 {
499   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
500 
501   PetscFunctionBegin;
502   PetscCall(PetscLogObjectState((PetscObject)mat, "Rows=%" PetscInt_FMT ", Cols=%" PetscInt_FMT, mat->rmap->N, mat->cmap->N));
503   PetscCall(MatStashDestroy_Private(&mat->stash));
504   PetscCall(VecDestroy(&sell->diag));
505   PetscCall(MatDestroy(&sell->A));
506   PetscCall(MatDestroy(&sell->B));
507 #if defined(PETSC_USE_CTABLE)
508   PetscCall(PetscHMapIDestroy(&sell->colmap));
509 #else
510   PetscCall(PetscFree(sell->colmap));
511 #endif
512   PetscCall(PetscFree(sell->garray));
513   PetscCall(VecDestroy(&sell->lvec));
514   PetscCall(VecScatterDestroy(&sell->Mvctx));
515   PetscCall(PetscFree2(sell->rowvalues, sell->rowindices));
516   PetscCall(PetscFree(sell->ld));
517   PetscCall(PetscFree(mat->data));
518 
519   PetscCall(PetscObjectChangeTypeName((PetscObject)mat, NULL));
520   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatStoreValues_C", NULL));
521   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatRetrieveValues_C", NULL));
522   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatIsTranspose_C", NULL));
523   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatMPISELLSetPreallocation_C", NULL));
524   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpiaij_C", NULL));
525 #if defined(PETSC_HAVE_CUDA)
526   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatConvert_mpisell_mpisellcuda_C", NULL));
527 #endif
528   PetscCall(PetscObjectComposeFunction((PetscObject)mat, "MatDiagonalScaleLocal_C", NULL));
529   PetscFunctionReturn(PETSC_SUCCESS);
530 }
531 
532 #include <petscdraw.h>
533 static PetscErrorCode MatView_MPISELL_ASCIIorDraworSocket(Mat mat, PetscViewer viewer)
534 {
535   Mat_MPISELL      *sell = (Mat_MPISELL *)mat->data;
536   PetscMPIInt       rank = sell->rank, size = sell->size;
537   PetscBool         isdraw, isascii, isbinary;
538   PetscViewer       sviewer;
539   PetscViewerFormat format;
540 
541   PetscFunctionBegin;
542   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
543   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
544   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
545   if (isascii) {
546     PetscCall(PetscViewerGetFormat(viewer, &format));
547     if (format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
548       MatInfo   info;
549       PetscInt *inodes;
550 
551       PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)mat), &rank));
552       PetscCall(MatGetInfo(mat, MAT_LOCAL, &info));
553       PetscCall(MatInodeGetInodeSizes(sell->A, NULL, &inodes, NULL));
554       PetscCall(PetscViewerASCIIPushSynchronized(viewer));
555       if (!inodes) {
556         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", not using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
557                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
558       } else {
559         PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] Local rows %" PetscInt_FMT " nz %" PetscInt_FMT " nz alloced %" PetscInt_FMT " mem %" PetscInt_FMT ", using I-node routines\n", rank, mat->rmap->n, (PetscInt)info.nz_used,
560                                                      (PetscInt)info.nz_allocated, (PetscInt)info.memory));
561       }
562       PetscCall(MatGetInfo(sell->A, MAT_LOCAL, &info));
563       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] on-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
564       PetscCall(MatGetInfo(sell->B, MAT_LOCAL, &info));
565       PetscCall(PetscViewerASCIISynchronizedPrintf(viewer, "[%d] off-diagonal part: nz %" PetscInt_FMT " \n", rank, (PetscInt)info.nz_used));
566       PetscCall(PetscViewerFlush(viewer));
567       PetscCall(PetscViewerASCIIPopSynchronized(viewer));
568       PetscCall(PetscViewerASCIIPrintf(viewer, "Information on VecScatter used in matrix-vector product: \n"));
569       PetscCall(VecScatterView(sell->Mvctx, viewer));
570       PetscFunctionReturn(PETSC_SUCCESS);
571     } else if (format == PETSC_VIEWER_ASCII_INFO) {
572       PetscInt inodecount, inodelimit, *inodes;
573       PetscCall(MatInodeGetInodeSizes(sell->A, &inodecount, &inodes, &inodelimit));
574       if (inodes) {
575         PetscCall(PetscViewerASCIIPrintf(viewer, "using I-node (on process 0) routines: found %" PetscInt_FMT " nodes, limit used is %" PetscInt_FMT "\n", inodecount, inodelimit));
576       } else {
577         PetscCall(PetscViewerASCIIPrintf(viewer, "not using I-node (on process 0) routines\n"));
578       }
579       PetscFunctionReturn(PETSC_SUCCESS);
580     } else if (format == PETSC_VIEWER_ASCII_FACTOR_INFO) {
581       PetscFunctionReturn(PETSC_SUCCESS);
582     }
583   } else if (isbinary) {
584     if (size == 1) {
585       PetscCall(PetscObjectSetName((PetscObject)sell->A, ((PetscObject)mat)->name));
586       PetscCall(MatView(sell->A, viewer));
587     } else {
588       /* PetscCall(MatView_MPISELL_Binary(mat,viewer)); */
589     }
590     PetscFunctionReturn(PETSC_SUCCESS);
591   } else if (isdraw) {
592     PetscDraw draw;
593     PetscBool isnull;
594     PetscCall(PetscViewerDrawGetDraw(viewer, 0, &draw));
595     PetscCall(PetscDrawIsNull(draw, &isnull));
596     if (isnull) PetscFunctionReturn(PETSC_SUCCESS);
597   }
598 
599   {
600     /* assemble the entire matrix onto first processor. */
601     Mat          A;
602     Mat_SeqSELL *Aloc;
603     PetscInt     M = mat->rmap->N, N = mat->cmap->N, *acolidx, row, col, i, j;
604     MatScalar   *aval;
605     PetscBool    isnonzero;
606 
607     PetscCall(MatCreate(PetscObjectComm((PetscObject)mat), &A));
608     if (rank == 0) {
609       PetscCall(MatSetSizes(A, M, N, M, N));
610     } else {
611       PetscCall(MatSetSizes(A, 0, 0, M, N));
612     }
613     /* This is just a temporary matrix, so explicitly using MATMPISELL is probably best */
614     PetscCall(MatSetType(A, MATMPISELL));
615     PetscCall(MatMPISELLSetPreallocation(A, 0, NULL, 0, NULL));
616     PetscCall(MatSetOption(A, MAT_NEW_NONZERO_LOCATION_ERR, PETSC_FALSE));
617 
618     /* copy over the A part */
619     Aloc    = (Mat_SeqSELL *)sell->A->data;
620     acolidx = Aloc->colidx;
621     aval    = Aloc->val;
622     for (i = 0; i < Aloc->totalslices; i++) { /* loop over slices */
623       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
624         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
625         if (isnonzero) { /* check the mask bit */
626           row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
627           col = *acolidx + mat->rmap->rstart;
628           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
629         }
630         aval++;
631         acolidx++;
632       }
633     }
634 
635     /* copy over the B part */
636     Aloc    = (Mat_SeqSELL *)sell->B->data;
637     acolidx = Aloc->colidx;
638     aval    = Aloc->val;
639     for (i = 0; i < Aloc->totalslices; i++) {
640       for (j = Aloc->sliidx[i]; j < Aloc->sliidx[i + 1]; j++) {
641         isnonzero = (PetscBool)((j - Aloc->sliidx[i]) / Aloc->sliceheight < Aloc->rlen[i * Aloc->sliceheight + j % Aloc->sliceheight]);
642         if (isnonzero) {
643           row = i * Aloc->sliceheight + j % Aloc->sliceheight + mat->rmap->rstart;
644           col = sell->garray[*acolidx];
645           PetscCall(MatSetValues(A, 1, &row, 1, &col, aval, INSERT_VALUES));
646         }
647         aval++;
648         acolidx++;
649       }
650     }
651 
652     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
653     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
654     /*
655        Everyone has to call to draw the matrix since the graphics waits are
656        synchronized across all processors that share the PetscDraw object
657     */
658     PetscCall(PetscViewerGetSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
659     if (rank == 0) {
660       PetscCall(PetscObjectSetName((PetscObject)((Mat_MPISELL *)A->data)->A, ((PetscObject)mat)->name));
661       PetscCall(MatView_SeqSELL(((Mat_MPISELL *)A->data)->A, sviewer));
662     }
663     PetscCall(PetscViewerRestoreSubViewer(viewer, PETSC_COMM_SELF, &sviewer));
664     PetscCall(MatDestroy(&A));
665   }
666   PetscFunctionReturn(PETSC_SUCCESS);
667 }
668 
669 static PetscErrorCode MatView_MPISELL(Mat mat, PetscViewer viewer)
670 {
671   PetscBool isascii, isdraw, issocket, isbinary;
672 
673   PetscFunctionBegin;
674   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii));
675   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERDRAW, &isdraw));
676   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERBINARY, &isbinary));
677   PetscCall(PetscObjectTypeCompare((PetscObject)viewer, PETSCVIEWERSOCKET, &issocket));
678   if (isascii || isdraw || isbinary || issocket) PetscCall(MatView_MPISELL_ASCIIorDraworSocket(mat, viewer));
679   PetscFunctionReturn(PETSC_SUCCESS);
680 }
681 
682 static PetscErrorCode MatGetGhosts_MPISELL(Mat mat, PetscInt *nghosts, const PetscInt *ghosts[])
683 {
684   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
685 
686   PetscFunctionBegin;
687   PetscCall(MatGetSize(sell->B, NULL, nghosts));
688   if (ghosts) *ghosts = sell->garray;
689   PetscFunctionReturn(PETSC_SUCCESS);
690 }
691 
692 static PetscErrorCode MatGetInfo_MPISELL(Mat matin, MatInfoType flag, MatInfo *info)
693 {
694   Mat_MPISELL   *mat = (Mat_MPISELL *)matin->data;
695   Mat            A = mat->A, B = mat->B;
696   PetscLogDouble isend[5], irecv[5];
697 
698   PetscFunctionBegin;
699   info->block_size = 1.0;
700   PetscCall(MatGetInfo(A, MAT_LOCAL, info));
701 
702   isend[0] = info->nz_used;
703   isend[1] = info->nz_allocated;
704   isend[2] = info->nz_unneeded;
705   isend[3] = info->memory;
706   isend[4] = info->mallocs;
707 
708   PetscCall(MatGetInfo(B, MAT_LOCAL, info));
709 
710   isend[0] += info->nz_used;
711   isend[1] += info->nz_allocated;
712   isend[2] += info->nz_unneeded;
713   isend[3] += info->memory;
714   isend[4] += info->mallocs;
715   if (flag == MAT_LOCAL) {
716     info->nz_used      = isend[0];
717     info->nz_allocated = isend[1];
718     info->nz_unneeded  = isend[2];
719     info->memory       = isend[3];
720     info->mallocs      = isend[4];
721   } else if (flag == MAT_GLOBAL_MAX) {
722     PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_MAX, PetscObjectComm((PetscObject)matin)));
723 
724     info->nz_used      = irecv[0];
725     info->nz_allocated = irecv[1];
726     info->nz_unneeded  = irecv[2];
727     info->memory       = irecv[3];
728     info->mallocs      = irecv[4];
729   } else if (flag == MAT_GLOBAL_SUM) {
730     PetscCallMPI(MPIU_Allreduce(isend, irecv, 5, MPIU_PETSCLOGDOUBLE, MPI_SUM, PetscObjectComm((PetscObject)matin)));
731 
732     info->nz_used      = irecv[0];
733     info->nz_allocated = irecv[1];
734     info->nz_unneeded  = irecv[2];
735     info->memory       = irecv[3];
736     info->mallocs      = irecv[4];
737   }
738   info->fill_ratio_given  = 0; /* no parallel LU/ILU/Cholesky */
739   info->fill_ratio_needed = 0;
740   info->factor_mallocs    = 0;
741   PetscFunctionReturn(PETSC_SUCCESS);
742 }
743 
744 static PetscErrorCode MatSetOption_MPISELL(Mat A, MatOption op, PetscBool flg)
745 {
746   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
747 
748   PetscFunctionBegin;
749   switch (op) {
750   case MAT_NEW_NONZERO_LOCATIONS:
751   case MAT_NEW_NONZERO_ALLOCATION_ERR:
752   case MAT_UNUSED_NONZERO_LOCATION_ERR:
753   case MAT_KEEP_NONZERO_PATTERN:
754   case MAT_NEW_NONZERO_LOCATION_ERR:
755   case MAT_USE_INODES:
756   case MAT_IGNORE_ZERO_ENTRIES:
757     MatCheckPreallocated(A, 1);
758     PetscCall(MatSetOption(a->A, op, flg));
759     PetscCall(MatSetOption(a->B, op, flg));
760     break;
761   case MAT_ROW_ORIENTED:
762     MatCheckPreallocated(A, 1);
763     a->roworiented = flg;
764 
765     PetscCall(MatSetOption(a->A, op, flg));
766     PetscCall(MatSetOption(a->B, op, flg));
767     break;
768   case MAT_IGNORE_OFF_PROC_ENTRIES:
769     a->donotstash = flg;
770     break;
771   case MAT_SYMMETRIC:
772     MatCheckPreallocated(A, 1);
773     PetscCall(MatSetOption(a->A, op, flg));
774     break;
775   case MAT_STRUCTURALLY_SYMMETRIC:
776     MatCheckPreallocated(A, 1);
777     PetscCall(MatSetOption(a->A, op, flg));
778     break;
779   case MAT_HERMITIAN:
780     MatCheckPreallocated(A, 1);
781     PetscCall(MatSetOption(a->A, op, flg));
782     break;
783   case MAT_SYMMETRY_ETERNAL:
784     MatCheckPreallocated(A, 1);
785     PetscCall(MatSetOption(a->A, op, flg));
786     break;
787   case MAT_STRUCTURAL_SYMMETRY_ETERNAL:
788     MatCheckPreallocated(A, 1);
789     PetscCall(MatSetOption(a->A, op, flg));
790     break;
791   default:
792     break;
793   }
794   PetscFunctionReturn(PETSC_SUCCESS);
795 }
796 
797 static PetscErrorCode MatDiagonalScale_MPISELL(Mat mat, Vec ll, Vec rr)
798 {
799   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
800   Mat          a = sell->A, b = sell->B;
801   PetscInt     s1, s2, s3;
802 
803   PetscFunctionBegin;
804   PetscCall(MatGetLocalSize(mat, &s2, &s3));
805   if (rr) {
806     PetscCall(VecGetLocalSize(rr, &s1));
807     PetscCheck(s1 == s3, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "right vector non-conforming local size");
808     /* Overlap communication with computation. */
809     PetscCall(VecScatterBegin(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
810   }
811   if (ll) {
812     PetscCall(VecGetLocalSize(ll, &s1));
813     PetscCheck(s1 == s2, PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "left vector non-conforming local size");
814     PetscUseTypeMethod(b, diagonalscale, ll, NULL);
815   }
816   /* scale  the diagonal block */
817   PetscUseTypeMethod(a, diagonalscale, ll, rr);
818 
819   if (rr) {
820     /* Do a scatter end and then right scale the off-diagonal block */
821     PetscCall(VecScatterEnd(sell->Mvctx, rr, sell->lvec, INSERT_VALUES, SCATTER_FORWARD));
822     PetscUseTypeMethod(b, diagonalscale, NULL, sell->lvec);
823   }
824   PetscFunctionReturn(PETSC_SUCCESS);
825 }
826 
827 static PetscErrorCode MatSetUnfactored_MPISELL(Mat A)
828 {
829   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
830 
831   PetscFunctionBegin;
832   PetscCall(MatSetUnfactored(a->A));
833   PetscFunctionReturn(PETSC_SUCCESS);
834 }
835 
836 static PetscErrorCode MatEqual_MPISELL(Mat A, Mat B, PetscBool *flag)
837 {
838   Mat_MPISELL *matB = (Mat_MPISELL *)B->data, *matA = (Mat_MPISELL *)A->data;
839   Mat          a, b, c, d;
840   PetscBool    flg;
841 
842   PetscFunctionBegin;
843   a = matA->A;
844   b = matA->B;
845   c = matB->A;
846   d = matB->B;
847 
848   PetscCall(MatEqual(a, c, &flg));
849   if (flg) PetscCall(MatEqual(b, d, &flg));
850   PetscCallMPI(MPIU_Allreduce(&flg, flag, 1, MPIU_BOOL, MPI_LAND, PetscObjectComm((PetscObject)A)));
851   PetscFunctionReturn(PETSC_SUCCESS);
852 }
853 
854 static PetscErrorCode MatCopy_MPISELL(Mat A, Mat B, MatStructure str)
855 {
856   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
857   Mat_MPISELL *b = (Mat_MPISELL *)B->data;
858 
859   PetscFunctionBegin;
860   /* If the two matrices don't have the same copy implementation, they aren't compatible for fast copy. */
861   if ((str != SAME_NONZERO_PATTERN) || (A->ops->copy != B->ops->copy)) {
862     /* because of the column compression in the off-processor part of the matrix a->B,
863        the number of columns in a->B and b->B may be different, hence we cannot call
864        the MatCopy() directly on the two parts. If need be, we can provide a more
865        efficient copy than the MatCopy_Basic() by first uncompressing the a->B matrices
866        then copying the submatrices */
867     PetscCall(MatCopy_Basic(A, B, str));
868   } else {
869     PetscCall(MatCopy(a->A, b->A, str));
870     PetscCall(MatCopy(a->B, b->B, str));
871   }
872   PetscFunctionReturn(PETSC_SUCCESS);
873 }
874 
875 static PetscErrorCode MatSetUp_MPISELL(Mat A)
876 {
877   PetscFunctionBegin;
878   PetscCall(MatMPISELLSetPreallocation(A, PETSC_DEFAULT, NULL, PETSC_DEFAULT, NULL));
879   PetscFunctionReturn(PETSC_SUCCESS);
880 }
881 
882 static PetscErrorCode MatConjugate_MPISELL(Mat mat)
883 {
884   PetscFunctionBegin;
885   if (PetscDefined(USE_COMPLEX)) {
886     Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
887 
888     PetscCall(MatConjugate_SeqSELL(sell->A));
889     PetscCall(MatConjugate_SeqSELL(sell->B));
890   }
891   PetscFunctionReturn(PETSC_SUCCESS);
892 }
893 
894 static PetscErrorCode MatRealPart_MPISELL(Mat A)
895 {
896   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
897 
898   PetscFunctionBegin;
899   PetscCall(MatRealPart(a->A));
900   PetscCall(MatRealPart(a->B));
901   PetscFunctionReturn(PETSC_SUCCESS);
902 }
903 
904 static PetscErrorCode MatImaginaryPart_MPISELL(Mat A)
905 {
906   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
907 
908   PetscFunctionBegin;
909   PetscCall(MatImaginaryPart(a->A));
910   PetscCall(MatImaginaryPart(a->B));
911   PetscFunctionReturn(PETSC_SUCCESS);
912 }
913 
914 static PetscErrorCode MatInvertBlockDiagonal_MPISELL(Mat A, const PetscScalar **values)
915 {
916   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
917 
918   PetscFunctionBegin;
919   PetscCall(MatInvertBlockDiagonal(a->A, values));
920   A->factorerrortype = a->A->factorerrortype;
921   PetscFunctionReturn(PETSC_SUCCESS);
922 }
923 
924 static PetscErrorCode MatSetRandom_MPISELL(Mat x, PetscRandom rctx)
925 {
926   Mat_MPISELL *sell = (Mat_MPISELL *)x->data;
927 
928   PetscFunctionBegin;
929   PetscCall(MatSetRandom(sell->A, rctx));
930   PetscCall(MatSetRandom(sell->B, rctx));
931   PetscCall(MatAssemblyBegin(x, MAT_FINAL_ASSEMBLY));
932   PetscCall(MatAssemblyEnd(x, MAT_FINAL_ASSEMBLY));
933   PetscFunctionReturn(PETSC_SUCCESS);
934 }
935 
936 static PetscErrorCode MatSetFromOptions_MPISELL(Mat A, PetscOptionItems PetscOptionsObject)
937 {
938   PetscFunctionBegin;
939   PetscOptionsHeadBegin(PetscOptionsObject, "MPISELL options");
940   PetscOptionsHeadEnd();
941   PetscFunctionReturn(PETSC_SUCCESS);
942 }
943 
944 static PetscErrorCode MatShift_MPISELL(Mat Y, PetscScalar a)
945 {
946   Mat_MPISELL *msell = (Mat_MPISELL *)Y->data;
947   Mat_SeqSELL *sell  = (Mat_SeqSELL *)msell->A->data;
948 
949   PetscFunctionBegin;
950   if (!Y->preallocated) {
951     PetscCall(MatMPISELLSetPreallocation(Y, 1, NULL, 0, NULL));
952   } else if (!sell->nz) {
953     PetscInt nonew = sell->nonew;
954     PetscCall(MatSeqSELLSetPreallocation(msell->A, 1, NULL));
955     sell->nonew = nonew;
956   }
957   PetscCall(MatShift_Basic(Y, a));
958   PetscFunctionReturn(PETSC_SUCCESS);
959 }
960 
961 static PetscErrorCode MatMissingDiagonal_MPISELL(Mat A, PetscBool *missing, PetscInt *d)
962 {
963   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
964 
965   PetscFunctionBegin;
966   PetscCheck(A->rmap->n == A->cmap->n, PETSC_COMM_SELF, PETSC_ERR_SUP, "Only works for square matrices");
967   PetscCall(MatMissingDiagonal(a->A, missing, d));
968   if (d) {
969     PetscInt rstart;
970     PetscCall(MatGetOwnershipRange(A, &rstart, NULL));
971     *d += rstart;
972   }
973   PetscFunctionReturn(PETSC_SUCCESS);
974 }
975 
976 static PetscErrorCode MatGetDiagonalBlock_MPISELL(Mat A, Mat *a)
977 {
978   PetscFunctionBegin;
979   *a = ((Mat_MPISELL *)A->data)->A;
980   PetscFunctionReturn(PETSC_SUCCESS);
981 }
982 
983 static PetscErrorCode MatStoreValues_MPISELL(Mat mat)
984 {
985   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
986 
987   PetscFunctionBegin;
988   PetscCall(MatStoreValues(sell->A));
989   PetscCall(MatStoreValues(sell->B));
990   PetscFunctionReturn(PETSC_SUCCESS);
991 }
992 
993 static PetscErrorCode MatRetrieveValues_MPISELL(Mat mat)
994 {
995   Mat_MPISELL *sell = (Mat_MPISELL *)mat->data;
996 
997   PetscFunctionBegin;
998   PetscCall(MatRetrieveValues(sell->A));
999   PetscCall(MatRetrieveValues(sell->B));
1000   PetscFunctionReturn(PETSC_SUCCESS);
1001 }
1002 
1003 static PetscErrorCode MatMPISELLSetPreallocation_MPISELL(Mat B, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[])
1004 {
1005   Mat_MPISELL *b;
1006 
1007   PetscFunctionBegin;
1008   PetscCall(PetscLayoutSetUp(B->rmap));
1009   PetscCall(PetscLayoutSetUp(B->cmap));
1010   b = (Mat_MPISELL *)B->data;
1011 
1012   if (!B->preallocated) {
1013     /* Explicitly create 2 MATSEQSELL matrices. */
1014     PetscCall(MatCreate(PETSC_COMM_SELF, &b->A));
1015     PetscCall(MatSetSizes(b->A, B->rmap->n, B->cmap->n, B->rmap->n, B->cmap->n));
1016     PetscCall(MatSetBlockSizesFromMats(b->A, B, B));
1017     PetscCall(MatSetType(b->A, MATSEQSELL));
1018     PetscCall(MatCreate(PETSC_COMM_SELF, &b->B));
1019     PetscCall(MatSetSizes(b->B, B->rmap->n, B->cmap->N, B->rmap->n, B->cmap->N));
1020     PetscCall(MatSetBlockSizesFromMats(b->B, B, B));
1021     PetscCall(MatSetType(b->B, MATSEQSELL));
1022   }
1023 
1024   PetscCall(MatSeqSELLSetPreallocation(b->A, d_rlenmax, d_rlen));
1025   PetscCall(MatSeqSELLSetPreallocation(b->B, o_rlenmax, o_rlen));
1026   B->preallocated  = PETSC_TRUE;
1027   B->was_assembled = PETSC_FALSE;
1028   /*
1029     critical for MatAssemblyEnd to work.
1030     MatAssemblyBegin checks it to set up was_assembled
1031     and MatAssemblyEnd checks was_assembled to determine whether to build garray
1032   */
1033   B->assembled = PETSC_FALSE;
1034   PetscFunctionReturn(PETSC_SUCCESS);
1035 }
1036 
1037 static PetscErrorCode MatDuplicate_MPISELL(Mat matin, MatDuplicateOption cpvalues, Mat *newmat)
1038 {
1039   Mat          mat;
1040   Mat_MPISELL *a, *oldmat = (Mat_MPISELL *)matin->data;
1041 
1042   PetscFunctionBegin;
1043   *newmat = NULL;
1044   PetscCall(MatCreate(PetscObjectComm((PetscObject)matin), &mat));
1045   PetscCall(MatSetSizes(mat, matin->rmap->n, matin->cmap->n, matin->rmap->N, matin->cmap->N));
1046   PetscCall(MatSetBlockSizesFromMats(mat, matin, matin));
1047   PetscCall(MatSetType(mat, ((PetscObject)matin)->type_name));
1048   a = (Mat_MPISELL *)mat->data;
1049 
1050   mat->factortype   = matin->factortype;
1051   mat->assembled    = PETSC_TRUE;
1052   mat->insertmode   = NOT_SET_VALUES;
1053   mat->preallocated = PETSC_TRUE;
1054 
1055   a->size         = oldmat->size;
1056   a->rank         = oldmat->rank;
1057   a->donotstash   = oldmat->donotstash;
1058   a->roworiented  = oldmat->roworiented;
1059   a->rowindices   = NULL;
1060   a->rowvalues    = NULL;
1061   a->getrowactive = PETSC_FALSE;
1062 
1063   PetscCall(PetscLayoutReference(matin->rmap, &mat->rmap));
1064   PetscCall(PetscLayoutReference(matin->cmap, &mat->cmap));
1065 
1066   if (oldmat->colmap) {
1067 #if defined(PETSC_USE_CTABLE)
1068     PetscCall(PetscHMapIDuplicate(oldmat->colmap, &a->colmap));
1069 #else
1070     PetscCall(PetscMalloc1(mat->cmap->N, &a->colmap));
1071     PetscCall(PetscArraycpy(a->colmap, oldmat->colmap, mat->cmap->N));
1072 #endif
1073   } else a->colmap = NULL;
1074   if (oldmat->garray) {
1075     PetscInt len;
1076     len = oldmat->B->cmap->n;
1077     PetscCall(PetscMalloc1(len + 1, &a->garray));
1078     if (len) PetscCall(PetscArraycpy(a->garray, oldmat->garray, len));
1079   } else a->garray = NULL;
1080 
1081   PetscCall(VecDuplicate(oldmat->lvec, &a->lvec));
1082   PetscCall(VecScatterCopy(oldmat->Mvctx, &a->Mvctx));
1083   PetscCall(MatDuplicate(oldmat->A, cpvalues, &a->A));
1084   PetscCall(MatDuplicate(oldmat->B, cpvalues, &a->B));
1085   PetscCall(PetscFunctionListDuplicate(((PetscObject)matin)->qlist, &((PetscObject)mat)->qlist));
1086   *newmat = mat;
1087   PetscFunctionReturn(PETSC_SUCCESS);
1088 }
1089 
1090 static const struct _MatOps MatOps_Values = {MatSetValues_MPISELL,
1091                                              NULL,
1092                                              NULL,
1093                                              MatMult_MPISELL,
1094                                              /* 4*/ MatMultAdd_MPISELL,
1095                                              MatMultTranspose_MPISELL,
1096                                              MatMultTransposeAdd_MPISELL,
1097                                              NULL,
1098                                              NULL,
1099                                              NULL,
1100                                              /*10*/ NULL,
1101                                              NULL,
1102                                              NULL,
1103                                              MatSOR_MPISELL,
1104                                              NULL,
1105                                              /*15*/ MatGetInfo_MPISELL,
1106                                              MatEqual_MPISELL,
1107                                              MatGetDiagonal_MPISELL,
1108                                              MatDiagonalScale_MPISELL,
1109                                              NULL,
1110                                              /*20*/ MatAssemblyBegin_MPISELL,
1111                                              MatAssemblyEnd_MPISELL,
1112                                              MatSetOption_MPISELL,
1113                                              MatZeroEntries_MPISELL,
1114                                              /*24*/ NULL,
1115                                              NULL,
1116                                              NULL,
1117                                              NULL,
1118                                              NULL,
1119                                              /*29*/ MatSetUp_MPISELL,
1120                                              NULL,
1121                                              NULL,
1122                                              MatGetDiagonalBlock_MPISELL,
1123                                              NULL,
1124                                              /*34*/ MatDuplicate_MPISELL,
1125                                              NULL,
1126                                              NULL,
1127                                              NULL,
1128                                              NULL,
1129                                              /*39*/ NULL,
1130                                              NULL,
1131                                              NULL,
1132                                              MatGetValues_MPISELL,
1133                                              MatCopy_MPISELL,
1134                                              /*44*/ NULL,
1135                                              MatScale_MPISELL,
1136                                              MatShift_MPISELL,
1137                                              MatDiagonalSet_MPISELL,
1138                                              NULL,
1139                                              /*49*/ MatSetRandom_MPISELL,
1140                                              NULL,
1141                                              NULL,
1142                                              NULL,
1143                                              NULL,
1144                                              /*54*/ MatFDColoringCreate_MPIXAIJ,
1145                                              NULL,
1146                                              MatSetUnfactored_MPISELL,
1147                                              NULL,
1148                                              NULL,
1149                                              /*59*/ NULL,
1150                                              MatDestroy_MPISELL,
1151                                              MatView_MPISELL,
1152                                              NULL,
1153                                              NULL,
1154                                              /*64*/ NULL,
1155                                              NULL,
1156                                              NULL,
1157                                              NULL,
1158                                              NULL,
1159                                              /*69*/ NULL,
1160                                              NULL,
1161                                              NULL,
1162                                              MatFDColoringApply_AIJ, /* reuse AIJ function */
1163                                              MatSetFromOptions_MPISELL,
1164                                              NULL,
1165                                              /*75*/ NULL,
1166                                              NULL,
1167                                              NULL,
1168                                              NULL,
1169                                              NULL,
1170                                              /*80*/ NULL,
1171                                              NULL,
1172                                              NULL,
1173                                              /*83*/ NULL,
1174                                              NULL,
1175                                              NULL,
1176                                              NULL,
1177                                              NULL,
1178                                              NULL,
1179                                              /*89*/ NULL,
1180                                              NULL,
1181                                              NULL,
1182                                              NULL,
1183                                              MatConjugate_MPISELL,
1184                                              /*94*/ NULL,
1185                                              NULL,
1186                                              MatRealPart_MPISELL,
1187                                              MatImaginaryPart_MPISELL,
1188                                              NULL,
1189                                              /*99*/ NULL,
1190                                              NULL,
1191                                              NULL,
1192                                              NULL,
1193                                              NULL,
1194                                              /*104*/ MatMissingDiagonal_MPISELL,
1195                                              NULL,
1196                                              NULL,
1197                                              MatGetGhosts_MPISELL,
1198                                              NULL,
1199                                              /*109*/ NULL,
1200                                              MatMultDiagonalBlock_MPISELL,
1201                                              NULL,
1202                                              NULL,
1203                                              NULL,
1204                                              /*114*/ NULL,
1205                                              NULL,
1206                                              NULL,
1207                                              MatInvertBlockDiagonal_MPISELL,
1208                                              NULL,
1209                                              /*119*/ NULL,
1210                                              NULL,
1211                                              NULL,
1212                                              NULL,
1213                                              NULL,
1214                                              /*124*/ NULL,
1215                                              NULL,
1216                                              NULL,
1217                                              NULL,
1218                                              NULL,
1219                                              /*129*/ MatFDColoringSetUp_MPIXAIJ,
1220                                              NULL,
1221                                              NULL,
1222                                              NULL,
1223                                              NULL,
1224                                              /*134*/ NULL,
1225                                              NULL,
1226                                              NULL,
1227                                              NULL,
1228                                              NULL,
1229                                              /*139*/ NULL,
1230                                              NULL,
1231                                              NULL,
1232                                              NULL,
1233                                              NULL};
1234 
1235 /*@C
1236   MatMPISELLSetPreallocation - Preallocates memory for a `MATMPISELL` sparse parallel matrix in sell format.
1237   For good matrix assembly performance the user should preallocate the matrix storage by
1238   setting the parameters `d_nz` (or `d_nnz`) and `o_nz` (or `o_nnz`).
1239 
1240   Collective
1241 
1242   Input Parameters:
1243 + B     - the matrix
1244 . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
1245            (same value is used for all local rows)
1246 . d_nnz - array containing the number of nonzeros in the various rows of the
1247            DIAGONAL portion of the local submatrix (possibly different for each row)
1248            or NULL (`PETSC_NULL_INTEGER` in Fortran), if `d_nz` is used to specify the nonzero structure.
1249            The size of this array is equal to the number of local rows, i.e 'm'.
1250            For matrices that will be factored, you must leave room for (and set)
1251            the diagonal entry even if it is zero.
1252 . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
1253            submatrix (same value is used for all local rows).
1254 - o_nnz - array containing the number of nonzeros in the various rows of the
1255            OFF-DIAGONAL portion of the local submatrix (possibly different for
1256            each row) or NULL (`PETSC_NULL_INTEGER` in Fortran), if `o_nz` is used to specify the nonzero
1257            structure. The size of this array is equal to the number
1258            of local rows, i.e 'm'.
1259 
1260   Example usage:
1261   Consider the following 8x8 matrix with 34 non-zero values, that is
1262   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1263   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1264   as follows
1265 
1266 .vb
1267             1  2  0  |  0  3  0  |  0  4
1268     Proc0   0  5  6  |  7  0  0  |  8  0
1269             9  0 10  | 11  0  0  | 12  0
1270     -------------------------------------
1271            13  0 14  | 15 16 17  |  0  0
1272     Proc1   0 18  0  | 19 20 21  |  0  0
1273             0  0  0  | 22 23  0  | 24  0
1274     -------------------------------------
1275     Proc2  25 26 27  |  0  0 28  | 29  0
1276            30  0  0  | 31 32 33  |  0 34
1277 .ve
1278 
1279   This can be represented as a collection of submatrices as
1280 
1281 .vb
1282       A B C
1283       D E F
1284       G H I
1285 .ve
1286 
1287   Where the submatrices A,B,C are owned by proc0, D,E,F are
1288   owned by proc1, G,H,I are owned by proc2.
1289 
1290   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1291   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1292   The 'M','N' parameters are 8,8, and have the same values on all procs.
1293 
1294   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1295   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1296   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1297   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1298   part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1299   matrix, and [DF] as another SeqSELL matrix.
1300 
1301   When `d_nz`, `o_nz` parameters are specified, `d_nz` storage elements are
1302   allocated for every row of the local DIAGONAL submatrix, and o_nz
1303   storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
1304   One way to choose `d_nz` and `o_nz` is to use the maximum number of nonzeros over
1305   the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1306   In this case, the values of d_nz,o_nz are
1307 .vb
1308      proc0  dnz = 2, o_nz = 2
1309      proc1  dnz = 3, o_nz = 2
1310      proc2  dnz = 1, o_nz = 4
1311 .ve
1312   We are allocating m*(d_nz+o_nz) storage locations for every proc. This
1313   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1314   for proc3. i.e we are using 12+15+10=37 storage locations to store
1315   34 values.
1316 
1317   When `d_nnz`, `o_nnz` parameters are specified, the storage is specified
1318   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1319   In the above case the values for d_nnz,o_nnz are
1320 .vb
1321      proc0 d_nnz = [2,2,2] and o_nnz = [2,2,2]
1322      proc1 d_nnz = [3,3,2] and o_nnz = [2,1,1]
1323      proc2 d_nnz = [1,1]   and o_nnz = [4,4]
1324 .ve
1325   Here the space allocated is according to nz (or maximum values in the nnz
1326   if nnz is provided) for DIAGONAL and OFF-DIAGONAL submatrices, i.e (2+2+3+2)*3+(1+4)*2=37
1327 
1328   Level: intermediate
1329 
1330   Notes:
1331   If the *_nnz parameter is given then the *_nz parameter is ignored
1332 
1333   The stored row and column indices begin with zero.
1334 
1335   The parallel matrix is partitioned such that the first m0 rows belong to
1336   process 0, the next m1 rows belong to process 1, the next m2 rows belong
1337   to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.
1338 
1339   The DIAGONAL portion of the local submatrix of a processor can be defined
1340   as the submatrix which is obtained by extraction the part corresponding to
1341   the rows r1-r2 and columns c1-c2 of the global matrix, where r1 is the
1342   first row that belongs to the processor, r2 is the last row belonging to
1343   the this processor, and c1-c2 is range of indices of the local part of a
1344   vector suitable for applying the matrix to.  This is an mxn matrix.  In the
1345   common case of a square matrix, the row and column ranges are the same and
1346   the DIAGONAL part is also square. The remaining portion of the local
1347   submatrix (mxN) constitute the OFF-DIAGONAL portion.
1348 
1349   If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
1350 
1351   You can call `MatGetInfo()` to get information on how effective the preallocation was;
1352   for example the fields mallocs,nz_allocated,nz_used,nz_unneeded;
1353   You can also run with the option -info and look for messages with the string
1354   malloc in them to see if additional memory allocation was needed.
1355 
1356 .seealso: `Mat`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatCreateSELL()`,
1357           `MATMPISELL`, `MatGetInfo()`, `PetscSplitOwnership()`, `MATSELL`
1358 @*/
1359 PetscErrorCode MatMPISELLSetPreallocation(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
1360 {
1361   PetscFunctionBegin;
1362   PetscValidHeaderSpecific(B, MAT_CLASSID, 1);
1363   PetscValidType(B, 1);
1364   PetscTryMethod(B, "MatMPISELLSetPreallocation_C", (Mat, PetscInt, const PetscInt[], PetscInt, const PetscInt[]), (B, d_nz, d_nnz, o_nz, o_nnz));
1365   PetscFunctionReturn(PETSC_SUCCESS);
1366 }
1367 
1368 /*MC
1369    MATMPISELL - MATMPISELL = "mpisell" - A matrix type to be used for MPI sparse matrices,
1370    based on the sliced Ellpack format
1371 
1372    Options Database Key:
1373 . -mat_type sell - sets the matrix type to `MATSELL` during a call to `MatSetFromOptions()`
1374 
1375    Level: beginner
1376 
1377 .seealso: `Mat`, `MatCreateSELL()`, `MATSEQSELL`, `MATSELL`, `MATSEQAIJ`, `MATAIJ`, `MATMPIAIJ`
1378 M*/
1379 
1380 /*@C
1381   MatCreateSELL - Creates a sparse parallel matrix in `MATSELL` format.
1382 
1383   Collective
1384 
1385   Input Parameters:
1386 + comm      - MPI communicator
1387 . m         - number of local rows (or `PETSC_DECIDE` to have calculated if M is given)
1388               This value should be the same as the local size used in creating the
1389               y vector for the matrix-vector product y = Ax.
1390 . n         - This value should be the same as the local size used in creating the
1391               x vector for the matrix-vector product y = Ax. (or `PETSC_DECIDE` to have
1392               calculated if `N` is given) For square matrices n is almost always `m`.
1393 . M         - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
1394 . N         - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
1395 . d_rlenmax - max number of nonzeros per row in DIAGONAL portion of local submatrix
1396              (same value is used for all local rows)
1397 . d_rlen    - array containing the number of nonzeros in the various rows of the
1398               DIAGONAL portion of the local submatrix (possibly different for each row)
1399               or `NULL`, if d_rlenmax is used to specify the nonzero structure.
1400               The size of this array is equal to the number of local rows, i.e `m`.
1401 . o_rlenmax - max number of nonzeros per row in the OFF-DIAGONAL portion of local
1402               submatrix (same value is used for all local rows).
1403 - o_rlen    - array containing the number of nonzeros in the various rows of the
1404               OFF-DIAGONAL portion of the local submatrix (possibly different for
1405               each row) or `NULL`, if `o_rlenmax` is used to specify the nonzero
1406               structure. The size of this array is equal to the number
1407               of local rows, i.e `m`.
1408 
1409   Output Parameter:
1410 . A - the matrix
1411 
1412   Options Database Key:
1413 . -mat_sell_oneindex - Internally use indexing starting at 1
1414         rather than 0.  When calling `MatSetValues()`,
1415         the user still MUST index entries starting at 0!
1416 
1417   Example:
1418   Consider the following 8x8 matrix with 34 non-zero values, that is
1419   assembled across 3 processors. Lets assume that proc0 owns 3 rows,
1420   proc1 owns 3 rows, proc2 owns 2 rows. This division can be shown
1421   as follows
1422 
1423 .vb
1424             1  2  0  |  0  3  0  |  0  4
1425     Proc0   0  5  6  |  7  0  0  |  8  0
1426             9  0 10  | 11  0  0  | 12  0
1427     -------------------------------------
1428            13  0 14  | 15 16 17  |  0  0
1429     Proc1   0 18  0  | 19 20 21  |  0  0
1430             0  0  0  | 22 23  0  | 24  0
1431     -------------------------------------
1432     Proc2  25 26 27  |  0  0 28  | 29  0
1433            30  0  0  | 31 32 33  |  0 34
1434 .ve
1435 
1436   This can be represented as a collection of submatrices as
1437 .vb
1438       A B C
1439       D E F
1440       G H I
1441 .ve
1442 
1443   Where the submatrices A,B,C are owned by proc0, D,E,F are
1444   owned by proc1, G,H,I are owned by proc2.
1445 
1446   The 'm' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1447   The 'n' parameters for proc0,proc1,proc2 are 3,3,2 respectively.
1448   The 'M','N' parameters are 8,8, and have the same values on all procs.
1449 
1450   The DIAGONAL submatrices corresponding to proc0,proc1,proc2 are
1451   submatrices [A], [E], [I] respectively. The OFF-DIAGONAL submatrices
1452   corresponding to proc0,proc1,proc2 are [BC], [DF], [GH] respectively.
1453   Internally, each processor stores the DIAGONAL part, and the OFF-DIAGONAL
1454   part as `MATSEQSELL` matrices. For example, proc1 will store [E] as a `MATSEQSELL`
1455   matrix, and [DF] as another `MATSEQSELL` matrix.
1456 
1457   When d_rlenmax, o_rlenmax parameters are specified, d_rlenmax storage elements are
1458   allocated for every row of the local DIAGONAL submatrix, and o_rlenmax
1459   storage locations are allocated for every row of the OFF-DIAGONAL submatrix.
1460   One way to choose `d_rlenmax` and `o_rlenmax` is to use the maximum number of nonzeros over
1461   the local rows for each of the local DIAGONAL, and the OFF-DIAGONAL submatrices.
1462   In this case, the values of d_rlenmax,o_rlenmax are
1463 .vb
1464      proc0 - d_rlenmax = 2, o_rlenmax = 2
1465      proc1 - d_rlenmax = 3, o_rlenmax = 2
1466      proc2 - d_rlenmax = 1, o_rlenmax = 4
1467 .ve
1468   We are allocating m*(d_rlenmax+o_rlenmax) storage locations for every proc. This
1469   translates to 3*(2+2)=12 for proc0, 3*(3+2)=15 for proc1, 2*(1+4)=10
1470   for proc3. i.e we are using 12+15+10=37 storage locations to store
1471   34 values.
1472 
1473   When `d_rlen`, `o_rlen` parameters are specified, the storage is specified
1474   for every row, corresponding to both DIAGONAL and OFF-DIAGONAL submatrices.
1475   In the above case the values for `d_nnz`, `o_nnz` are
1476 .vb
1477      proc0 - d_nnz = [2,2,2] and o_nnz = [2,2,2]
1478      proc1 - d_nnz = [3,3,2] and o_nnz = [2,1,1]
1479      proc2 - d_nnz = [1,1]   and o_nnz = [4,4]
1480 .ve
1481   Here the space allocated is still 37 though there are 34 nonzeros because
1482   the allocation is always done according to rlenmax.
1483 
1484   Level: intermediate
1485 
1486   Notes:
1487   It is recommended that one use the `MatCreate()`, `MatSetType()` and/or `MatSetFromOptions()`,
1488   MatXXXXSetPreallocation() paradigm instead of this routine directly.
1489   [MatXXXXSetPreallocation() is, for example, `MatSeqSELLSetPreallocation()`]
1490 
1491   If the *_rlen parameter is given then the *_rlenmax parameter is ignored
1492 
1493   `m`, `n`, `M`, `N` parameters specify the size of the matrix, and its partitioning across
1494   processors, while `d_rlenmax`, `d_rlen`, `o_rlenmax` , `o_rlen` parameters specify the approximate
1495   storage requirements for this matrix.
1496 
1497   If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
1498   processor than it must be used on all processors that share the object for
1499   that argument.
1500 
1501   The user MUST specify either the local or global matrix dimensions
1502   (possibly both).
1503 
1504   The parallel matrix is partitioned across processors such that the
1505   first m0 rows belong to process 0, the next m1 rows belong to
1506   process 1, the next m2 rows belong to process 2 etc.. where
1507   m0,m1,m2,.. are the input parameter 'm'. i.e each processor stores
1508   values corresponding to [`m` x `N`] submatrix.
1509 
1510   The columns are logically partitioned with the n0 columns belonging
1511   to 0th partition, the next n1 columns belonging to the next
1512   partition etc.. where n0,n1,n2... are the input parameter `n`.
1513 
1514   The DIAGONAL portion of the local submatrix on any given processor
1515   is the submatrix corresponding to the rows and columns `m`, `n`
1516   corresponding to the given processor. i.e diagonal matrix on
1517   process 0 is [m0 x n0], diagonal matrix on process 1 is [m1 x n1]
1518   etc. The remaining portion of the local submatrix [m x (N-n)]
1519   constitute the OFF-DIAGONAL portion. The example below better
1520   illustrates this concept.
1521 
1522   For a square global matrix we define each processor's diagonal portion
1523   to be its local rows and the corresponding columns (a square submatrix);
1524   each processor's off-diagonal portion encompasses the remainder of the
1525   local matrix (a rectangular submatrix).
1526 
1527   If `o_rlen`, `d_rlen` are specified, then `o_rlenmax`, and `d_rlenmax` are ignored.
1528 
1529   When calling this routine with a single process communicator, a matrix of
1530   type `MATSEQSELL` is returned.  If a matrix of type `MATMPISELL` is desired for this
1531   type of communicator, use the construction mechanism
1532 .vb
1533    MatCreate(...,&A);
1534    MatSetType(A,MATMPISELL);
1535    MatSetSizes(A, m,n,M,N);
1536    MatMPISELLSetPreallocation(A,...);
1537 .ve
1538 
1539 .seealso: `Mat`, `MATSELL`, `MatCreate()`, `MatCreateSeqSELL()`, `MatSetValues()`, `MatMPISELLSetPreallocation()`, `MATMPISELL`
1540 @*/
1541 PetscErrorCode MatCreateSELL(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_rlenmax, const PetscInt d_rlen[], PetscInt o_rlenmax, const PetscInt o_rlen[], Mat *A)
1542 {
1543   PetscMPIInt size;
1544 
1545   PetscFunctionBegin;
1546   PetscCall(MatCreate(comm, A));
1547   PetscCall(MatSetSizes(*A, m, n, M, N));
1548   PetscCallMPI(MPI_Comm_size(comm, &size));
1549   if (size > 1) {
1550     PetscCall(MatSetType(*A, MATMPISELL));
1551     PetscCall(MatMPISELLSetPreallocation(*A, d_rlenmax, d_rlen, o_rlenmax, o_rlen));
1552   } else {
1553     PetscCall(MatSetType(*A, MATSEQSELL));
1554     PetscCall(MatSeqSELLSetPreallocation(*A, d_rlenmax, d_rlen));
1555   }
1556   PetscFunctionReturn(PETSC_SUCCESS);
1557 }
1558 
1559 /*@C
1560   MatMPISELLGetSeqSELL - Returns the local pieces of this distributed matrix
1561 
1562   Not Collective
1563 
1564   Input Parameter:
1565 . A - the `MATMPISELL` matrix
1566 
1567   Output Parameters:
1568 + Ad     - The diagonal portion of `A`
1569 . Ao     - The off-diagonal portion of `A`
1570 - colmap - An array mapping local column numbers of `Ao` to global column numbers of the parallel matrix
1571 
1572   Level: advanced
1573 
1574 .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`
1575 @*/
1576 PetscErrorCode MatMPISELLGetSeqSELL(Mat A, Mat *Ad, Mat *Ao, const PetscInt *colmap[])
1577 {
1578   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1579   PetscBool    flg;
1580 
1581   PetscFunctionBegin;
1582   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &flg));
1583   PetscCheck(flg, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "This function requires a MATMPISELL matrix as input");
1584   if (Ad) *Ad = a->A;
1585   if (Ao) *Ao = a->B;
1586   if (colmap) *colmap = a->garray;
1587   PetscFunctionReturn(PETSC_SUCCESS);
1588 }
1589 
1590 /*@C
1591   MatMPISELLGetLocalMatCondensed - Creates a `MATSEQSELL` matrix from an `MATMPISELL` matrix by
1592   taking all its local rows and NON-ZERO columns
1593 
1594   Not Collective
1595 
1596   Input Parameters:
1597 + A     - the matrix
1598 . scall - either `MAT_INITIAL_MATRIX` or `MAT_REUSE_MATRIX`
1599 . row   - index sets of rows to extract (or `NULL`)
1600 - col   - index sets of columns to extract (or `NULL`)
1601 
1602   Output Parameter:
1603 . A_loc - the local sequential matrix generated
1604 
1605   Level: advanced
1606 
1607 .seealso: `Mat`, `MATSEQSELL`, `MATMPISELL`, `MatGetOwnershipRange()`, `MatMPISELLGetLocalMat()`
1608 @*/
1609 PetscErrorCode MatMPISELLGetLocalMatCondensed(Mat A, MatReuse scall, IS *row, IS *col, Mat *A_loc)
1610 {
1611   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1612   PetscInt     i, start, end, ncols, nzA, nzB, *cmap, imark, *idx;
1613   IS           isrowa, iscola;
1614   Mat         *aloc;
1615   PetscBool    match;
1616 
1617   PetscFunctionBegin;
1618   PetscCall(PetscObjectTypeCompare((PetscObject)A, MATMPISELL, &match));
1619   PetscCheck(match, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Requires MATMPISELL matrix as input");
1620   PetscCall(PetscLogEventBegin(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1621   if (!row) {
1622     start = A->rmap->rstart;
1623     end   = A->rmap->rend;
1624     PetscCall(ISCreateStride(PETSC_COMM_SELF, end - start, start, 1, &isrowa));
1625   } else {
1626     isrowa = *row;
1627   }
1628   if (!col) {
1629     start = A->cmap->rstart;
1630     cmap  = a->garray;
1631     nzA   = a->A->cmap->n;
1632     nzB   = a->B->cmap->n;
1633     PetscCall(PetscMalloc1(nzA + nzB, &idx));
1634     ncols = 0;
1635     for (i = 0; i < nzB; i++) {
1636       if (cmap[i] < start) idx[ncols++] = cmap[i];
1637       else break;
1638     }
1639     imark = i;
1640     for (i = 0; i < nzA; i++) idx[ncols++] = start + i;
1641     for (i = imark; i < nzB; i++) idx[ncols++] = cmap[i];
1642     PetscCall(ISCreateGeneral(PETSC_COMM_SELF, ncols, idx, PETSC_OWN_POINTER, &iscola));
1643   } else {
1644     iscola = *col;
1645   }
1646   if (scall != MAT_INITIAL_MATRIX) {
1647     PetscCall(PetscMalloc1(1, &aloc));
1648     aloc[0] = *A_loc;
1649   }
1650   PetscCall(MatCreateSubMatrices(A, 1, &isrowa, &iscola, scall, &aloc));
1651   *A_loc = aloc[0];
1652   PetscCall(PetscFree(aloc));
1653   if (!row) PetscCall(ISDestroy(&isrowa));
1654   if (!col) PetscCall(ISDestroy(&iscola));
1655   PetscCall(PetscLogEventEnd(MAT_Getlocalmatcondensed, A, 0, 0, 0));
1656   PetscFunctionReturn(PETSC_SUCCESS);
1657 }
1658 
1659 #include <../src/mat/impls/aij/mpi/mpiaij.h>
1660 
1661 PetscErrorCode MatConvert_MPISELL_MPIAIJ(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1662 {
1663   Mat_MPISELL *a = (Mat_MPISELL *)A->data;
1664   Mat          B;
1665   Mat_MPIAIJ  *b;
1666 
1667   PetscFunctionBegin;
1668   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");
1669 
1670   if (reuse == MAT_REUSE_MATRIX) {
1671     B = *newmat;
1672   } else {
1673     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1674     PetscCall(MatSetType(B, MATMPIAIJ));
1675     PetscCall(MatSetSizes(B, A->rmap->n, A->cmap->n, A->rmap->N, A->cmap->N));
1676     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1677     PetscCall(MatSeqAIJSetPreallocation(B, 0, NULL));
1678     PetscCall(MatMPIAIJSetPreallocation(B, 0, NULL, 0, NULL));
1679   }
1680   b = (Mat_MPIAIJ *)B->data;
1681 
1682   if (reuse == MAT_REUSE_MATRIX) {
1683     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_REUSE_MATRIX, &b->A));
1684     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_REUSE_MATRIX, &b->B));
1685   } else {
1686     PetscCall(MatDestroy(&b->A));
1687     PetscCall(MatDestroy(&b->B));
1688     PetscCall(MatDisAssemble_MPISELL(A));
1689     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->A, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->A));
1690     PetscCall(MatConvert_SeqSELL_SeqAIJ(a->B, MATSEQAIJ, MAT_INITIAL_MATRIX, &b->B));
1691     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1692     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1693     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1694     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1695   }
1696 
1697   if (reuse == MAT_INPLACE_MATRIX) {
1698     PetscCall(MatHeaderReplace(A, &B));
1699   } else {
1700     *newmat = B;
1701   }
1702   PetscFunctionReturn(PETSC_SUCCESS);
1703 }
1704 
1705 PetscErrorCode MatConvert_MPIAIJ_MPISELL(Mat A, MatType newtype, MatReuse reuse, Mat *newmat)
1706 {
1707   Mat_MPIAIJ  *a = (Mat_MPIAIJ *)A->data;
1708   Mat          B;
1709   Mat_MPISELL *b;
1710 
1711   PetscFunctionBegin;
1712   PetscCheck(A->assembled, PetscObjectComm((PetscObject)A), PETSC_ERR_SUP, "Matrix must be assembled");
1713 
1714   if (reuse == MAT_REUSE_MATRIX) {
1715     B = *newmat;
1716   } else {
1717     Mat_SeqAIJ *Aa = (Mat_SeqAIJ *)a->A->data, *Ba = (Mat_SeqAIJ *)a->B->data;
1718     PetscInt    i, d_nz = 0, o_nz = 0, m = A->rmap->N, n = A->cmap->N, lm = A->rmap->n, ln = A->cmap->n;
1719     PetscInt   *d_nnz, *o_nnz;
1720     PetscCall(PetscMalloc2(lm, &d_nnz, lm, &o_nnz));
1721     for (i = 0; i < lm; i++) {
1722       d_nnz[i] = Aa->i[i + 1] - Aa->i[i];
1723       o_nnz[i] = Ba->i[i + 1] - Ba->i[i];
1724       if (d_nnz[i] > d_nz) d_nz = d_nnz[i];
1725       if (o_nnz[i] > o_nz) o_nz = o_nnz[i];
1726     }
1727     PetscCall(MatCreate(PetscObjectComm((PetscObject)A), &B));
1728     PetscCall(MatSetType(B, MATMPISELL));
1729     PetscCall(MatSetSizes(B, lm, ln, m, n));
1730     PetscCall(MatSetBlockSizes(B, A->rmap->bs, A->cmap->bs));
1731     PetscCall(MatSeqSELLSetPreallocation(B, d_nz, d_nnz));
1732     PetscCall(MatMPISELLSetPreallocation(B, d_nz, d_nnz, o_nz, o_nnz));
1733     PetscCall(PetscFree2(d_nnz, o_nnz));
1734   }
1735   b = (Mat_MPISELL *)B->data;
1736 
1737   if (reuse == MAT_REUSE_MATRIX) {
1738     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_REUSE_MATRIX, &b->A));
1739     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_REUSE_MATRIX, &b->B));
1740   } else {
1741     PetscCall(MatDestroy(&b->A));
1742     PetscCall(MatDestroy(&b->B));
1743     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->A, MATSEQSELL, MAT_INITIAL_MATRIX, &b->A));
1744     PetscCall(MatConvert_SeqAIJ_SeqSELL(a->B, MATSEQSELL, MAT_INITIAL_MATRIX, &b->B));
1745     PetscCall(MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY));
1746     PetscCall(MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY));
1747     PetscCall(MatAssemblyBegin(B, MAT_FINAL_ASSEMBLY));
1748     PetscCall(MatAssemblyEnd(B, MAT_FINAL_ASSEMBLY));
1749   }
1750 
1751   if (reuse == MAT_INPLACE_MATRIX) {
1752     PetscCall(MatHeaderReplace(A, &B));
1753   } else {
1754     *newmat = B;
1755   }
1756   PetscFunctionReturn(PETSC_SUCCESS);
1757 }
1758 
1759 PetscErrorCode MatSOR_MPISELL(Mat matin, Vec bb, PetscReal omega, MatSORType flag, PetscReal fshift, PetscInt its, PetscInt lits, Vec xx)
1760 {
1761   Mat_MPISELL *mat = (Mat_MPISELL *)matin->data;
1762   Vec          bb1 = NULL;
1763 
1764   PetscFunctionBegin;
1765   if (flag == SOR_APPLY_UPPER) {
1766     PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1767     PetscFunctionReturn(PETSC_SUCCESS);
1768   }
1769 
1770   if (its > 1 || ~flag & SOR_ZERO_INITIAL_GUESS || flag & SOR_EISENSTAT) PetscCall(VecDuplicate(bb, &bb1));
1771 
1772   if ((flag & SOR_LOCAL_SYMMETRIC_SWEEP) == SOR_LOCAL_SYMMETRIC_SWEEP) {
1773     if (flag & SOR_ZERO_INITIAL_GUESS) {
1774       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1775       its--;
1776     }
1777 
1778     while (its--) {
1779       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1780       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1781 
1782       /* update rhs: bb1 = bb - B*x */
1783       PetscCall(VecScale(mat->lvec, -1.0));
1784       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1785 
1786       /* local sweep */
1787       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_SYMMETRIC_SWEEP, fshift, lits, 1, xx));
1788     }
1789   } else if (flag & SOR_LOCAL_FORWARD_SWEEP) {
1790     if (flag & SOR_ZERO_INITIAL_GUESS) {
1791       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1792       its--;
1793     }
1794     while (its--) {
1795       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1796       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1797 
1798       /* update rhs: bb1 = bb - B*x */
1799       PetscCall(VecScale(mat->lvec, -1.0));
1800       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1801 
1802       /* local sweep */
1803       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_FORWARD_SWEEP, fshift, lits, 1, xx));
1804     }
1805   } else if (flag & SOR_LOCAL_BACKWARD_SWEEP) {
1806     if (flag & SOR_ZERO_INITIAL_GUESS) {
1807       PetscCall((*mat->A->ops->sor)(mat->A, bb, omega, flag, fshift, lits, 1, xx));
1808       its--;
1809     }
1810     while (its--) {
1811       PetscCall(VecScatterBegin(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1812       PetscCall(VecScatterEnd(mat->Mvctx, xx, mat->lvec, INSERT_VALUES, SCATTER_FORWARD));
1813 
1814       /* update rhs: bb1 = bb - B*x */
1815       PetscCall(VecScale(mat->lvec, -1.0));
1816       PetscCall((*mat->B->ops->multadd)(mat->B, mat->lvec, bb, bb1));
1817 
1818       /* local sweep */
1819       PetscCall((*mat->A->ops->sor)(mat->A, bb1, omega, SOR_BACKWARD_SWEEP, fshift, lits, 1, xx));
1820     }
1821   } else SETERRQ(PetscObjectComm((PetscObject)matin), PETSC_ERR_SUP, "Parallel SOR not supported");
1822 
1823   PetscCall(VecDestroy(&bb1));
1824 
1825   matin->factorerrortype = mat->A->factorerrortype;
1826   PetscFunctionReturn(PETSC_SUCCESS);
1827 }
1828 
1829 #if defined(PETSC_HAVE_CUDA)
1830 PETSC_INTERN PetscErrorCode MatConvert_MPISELL_MPISELLCUDA(Mat, MatType, MatReuse, Mat *);
1831 #endif
1832 
1833 /*MC
1834    MATMPISELL - MATMPISELL = "MPISELL" - A matrix type to be used for parallel sparse matrices.
1835 
1836    Options Database Keys:
1837 . -mat_type mpisell - sets the matrix type to `MATMPISELL` during a call to `MatSetFromOptions()`
1838 
1839   Level: beginner
1840 
1841 .seealso: `Mat`, `MATSELL`, `MATSEQSELL` `MatCreateSELL()`
1842 M*/
1843 PETSC_EXTERN PetscErrorCode MatCreate_MPISELL(Mat B)
1844 {
1845   Mat_MPISELL *b;
1846   PetscMPIInt  size;
1847 
1848   PetscFunctionBegin;
1849   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)B), &size));
1850   PetscCall(PetscNew(&b));
1851   B->data       = (void *)b;
1852   B->ops[0]     = MatOps_Values;
1853   B->assembled  = PETSC_FALSE;
1854   B->insertmode = NOT_SET_VALUES;
1855   b->size       = size;
1856   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)B), &b->rank));
1857   /* build cache for off array entries formed */
1858   PetscCall(MatStashCreate_Private(PetscObjectComm((PetscObject)B), 1, &B->stash));
1859 
1860   b->donotstash  = PETSC_FALSE;
1861   b->colmap      = NULL;
1862   b->garray      = NULL;
1863   b->roworiented = PETSC_TRUE;
1864 
1865   /* stuff used for matrix vector multiply */
1866   b->lvec  = NULL;
1867   b->Mvctx = NULL;
1868 
1869   /* stuff for MatGetRow() */
1870   b->rowindices   = NULL;
1871   b->rowvalues    = NULL;
1872   b->getrowactive = PETSC_FALSE;
1873 
1874   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatStoreValues_C", MatStoreValues_MPISELL));
1875   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatRetrieveValues_C", MatRetrieveValues_MPISELL));
1876   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatIsTranspose_C", MatIsTranspose_MPISELL));
1877   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPISELLSetPreallocation_C", MatMPISELLSetPreallocation_MPISELL));
1878   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpiaij_C", MatConvert_MPISELL_MPIAIJ));
1879 #if defined(PETSC_HAVE_CUDA)
1880   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatConvert_mpisell_mpisellcuda_C", MatConvert_MPISELL_MPISELLCUDA));
1881 #endif
1882   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatDiagonalScaleLocal_C", MatDiagonalScaleLocal_MPISELL));
1883   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPISELL));
1884   PetscFunctionReturn(PETSC_SUCCESS);
1885 }
1886