xref: /petsc/src/mat/impls/aij/mpi/mpimatmatmult.c (revision 77a2fc528284dc99e38fdeba26184ea2721169c7)
1 
2 /*
3   Defines matrix-matrix product routines for pairs of MPIAIJ matrices
4           C = A * B
5 */
6 #include <../src/mat/impls/aij/seq/aij.h> /*I "petscmat.h" I*/
7 #include <../src/mat/utils/freespace.h>
8 #include <../src/mat/impls/aij/mpi/mpiaij.h>
9 #include <petscbt.h>
10 #include <../src/mat/impls/dense/mpi/mpidense.h>
11 #include <petsc/private/vecimpl.h>
12 #include <petsc/private/sfimpl.h>
13 
14 #if defined(PETSC_HAVE_HYPRE)
15 PETSC_INTERN PetscErrorCode MatMatMultSymbolic_AIJ_AIJ_wHYPRE(Mat,Mat,PetscReal,Mat);
16 #endif
17 
18 PETSC_INTERN PetscErrorCode MatProductSymbolic_AB_MPIAIJ_MPIAIJ(Mat C)
19 {
20   Mat_Product         *product = C->product;
21   Mat                 A=product->A,B=product->B;
22   MatProductAlgorithm alg=product->alg;
23   PetscReal           fill=product->fill;
24   PetscBool           flg;
25 
26   PetscFunctionBegin;
27   /* scalable */
28   PetscCall(PetscStrcmp(alg,"scalable",&flg));
29   if (flg) {
30     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C));
31     PetscFunctionReturn(0);
32   }
33 
34   /* nonscalable */
35   PetscCall(PetscStrcmp(alg,"nonscalable",&flg));
36   if (flg) {
37     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C));
38     PetscFunctionReturn(0);
39   }
40 
41   /* seqmpi */
42   PetscCall(PetscStrcmp(alg,"seqmpi",&flg));
43   if (flg) {
44     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(A,B,fill,C));
45     PetscFunctionReturn(0);
46   }
47 
48   /* backend general code */
49   PetscCall(PetscStrcmp(alg,"backend",&flg));
50   if (flg) {
51     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
52     PetscFunctionReturn(0);
53   }
54 
55 #if defined(PETSC_HAVE_HYPRE)
56   PetscCall(PetscStrcmp(alg,"hypre",&flg));
57   if (flg) {
58     PetscCall(MatMatMultSymbolic_AIJ_AIJ_wHYPRE(A,B,fill,C));
59     PetscFunctionReturn(0);
60   }
61 #endif
62   SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_SUP,"Mat Product Algorithm is not supported");
63 }
64 
65 PetscErrorCode MatDestroy_MPIAIJ_MatMatMult(void *data)
66 {
67   Mat_APMPI      *ptap = (Mat_APMPI*)data;
68 
69   PetscFunctionBegin;
70   PetscCall(PetscFree2(ptap->startsj_s,ptap->startsj_r));
71   PetscCall(PetscFree(ptap->bufa));
72   PetscCall(MatDestroy(&ptap->P_loc));
73   PetscCall(MatDestroy(&ptap->P_oth));
74   PetscCall(MatDestroy(&ptap->Pt));
75   PetscCall(PetscFree(ptap->api));
76   PetscCall(PetscFree(ptap->apj));
77   PetscCall(PetscFree(ptap->apa));
78   PetscCall(PetscFree(ptap));
79   PetscFunctionReturn(0);
80 }
81 
82 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,Mat C)
83 {
84   Mat_MPIAIJ        *a  =(Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
85   Mat_SeqAIJ        *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
86   Mat_SeqAIJ        *cd =(Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
87   PetscScalar       *cda=cd->a,*coa=co->a;
88   Mat_SeqAIJ        *p_loc,*p_oth;
89   PetscScalar       *apa,*ca;
90   PetscInt          cm =C->rmap->n;
91   Mat_APMPI         *ptap;
92   PetscInt          *api,*apj,*apJ,i,k;
93   PetscInt          cstart=C->cmap->rstart;
94   PetscInt          cdnz,conz,k0,k1;
95   const PetscScalar *dummy;
96   MPI_Comm          comm;
97   PetscMPIInt       size;
98 
99   PetscFunctionBegin;
100   MatCheckProduct(C,3);
101   ptap = (Mat_APMPI*)C->product->data;
102   PetscCheck(ptap,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
103   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
104   PetscCallMPI(MPI_Comm_size(comm,&size));
105   PetscCheck(ptap->P_oth || size <= 1,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
106 
107   /* flag CPU mask for C */
108 #if defined(PETSC_HAVE_DEVICE)
109   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
110   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
111   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
112 #endif
113 
114   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
115   /*-----------------------------------------------------*/
116   /* update numerical values of P_oth and P_loc */
117   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth));
118   PetscCall(MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc));
119 
120   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
121   /*----------------------------------------------------------*/
122   /* get data from symbolic products */
123   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
124   p_oth = NULL;
125   if (size >1) {
126     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
127   }
128 
129   /* get apa for storing dense row A[i,:]*P */
130   apa = ptap->apa;
131 
132   api = ptap->api;
133   apj = ptap->apj;
134   /* trigger copy to CPU */
135   PetscCall(MatSeqAIJGetArrayRead(a->A,&dummy));
136   PetscCall(MatSeqAIJRestoreArrayRead(a->A,&dummy));
137   PetscCall(MatSeqAIJGetArrayRead(a->B,&dummy));
138   PetscCall(MatSeqAIJRestoreArrayRead(a->B,&dummy));
139   for (i=0; i<cm; i++) {
140     /* compute apa = A[i,:]*P */
141     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
142 
143     /* set values in C */
144     apJ  = apj + api[i];
145     cdnz = cd->i[i+1] - cd->i[i];
146     conz = co->i[i+1] - co->i[i];
147 
148     /* 1st off-diagonal part of C */
149     ca = coa + co->i[i];
150     k  = 0;
151     for (k0=0; k0<conz; k0++) {
152       if (apJ[k] >= cstart) break;
153       ca[k0]      = apa[apJ[k]];
154       apa[apJ[k++]] = 0.0;
155     }
156 
157     /* diagonal part of C */
158     ca = cda + cd->i[i];
159     for (k1=0; k1<cdnz; k1++) {
160       ca[k1]      = apa[apJ[k]];
161       apa[apJ[k++]] = 0.0;
162     }
163 
164     /* 2nd off-diagonal part of C */
165     ca = coa + co->i[i];
166     for (; k0<conz; k0++) {
167       ca[k0]      = apa[apJ[k]];
168       apa[apJ[k++]] = 0.0;
169     }
170   }
171   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
172   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
173   PetscFunctionReturn(0);
174 }
175 
176 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat A,Mat P,PetscReal fill,Mat C)
177 {
178   MPI_Comm           comm;
179   PetscMPIInt        size;
180   Mat_APMPI          *ptap;
181   PetscFreeSpaceList free_space=NULL,current_space=NULL;
182   Mat_MPIAIJ         *a=(Mat_MPIAIJ*)A->data;
183   Mat_SeqAIJ         *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
184   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
185   PetscInt           *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
186   PetscInt           *lnk,i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi;
187   PetscInt           am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n;
188   PetscBT            lnkbt;
189   PetscReal          afill;
190   MatType            mtype;
191 
192   PetscFunctionBegin;
193   MatCheckProduct(C,4);
194   PetscCheck(!C->product->data,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
195   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
196   PetscCallMPI(MPI_Comm_size(comm,&size));
197 
198   /* create struct Mat_APMPI and attached it to C later */
199   PetscCall(PetscNew(&ptap));
200 
201   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
202   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth));
203 
204   /* get P_loc by taking all local rows of P */
205   PetscCall(MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc));
206 
207   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
208   pi_loc = p_loc->i; pj_loc = p_loc->j;
209   if (size > 1) {
210     p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
211     pi_oth = p_oth->i; pj_oth = p_oth->j;
212   } else {
213     p_oth = NULL;
214     pi_oth = NULL; pj_oth = NULL;
215   }
216 
217   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
218   /*-------------------------------------------------------------------*/
219   PetscCall(PetscMalloc1(am+2,&api));
220   ptap->api = api;
221   api[0]    = 0;
222 
223   /* create and initialize a linked list */
224   PetscCall(PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt));
225 
226   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
227   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space));
228   current_space = free_space;
229 
230   MatPreallocateBegin(comm,am,pn,dnz,onz);
231   for (i=0; i<am; i++) {
232     /* diagonal portion of A */
233     nzi = adi[i+1] - adi[i];
234     for (j=0; j<nzi; j++) {
235       row  = *adj++;
236       pnz  = pi_loc[row+1] - pi_loc[row];
237       Jptr = pj_loc + pi_loc[row];
238       /* add non-zero cols of P into the sorted linked list lnk */
239       PetscCall(PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt));
240     }
241     /* off-diagonal portion of A */
242     nzi = aoi[i+1] - aoi[i];
243     for (j=0; j<nzi; j++) {
244       row  = *aoj++;
245       pnz  = pi_oth[row+1] - pi_oth[row];
246       Jptr = pj_oth + pi_oth[row];
247       PetscCall(PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt));
248     }
249     /* add possible missing diagonal entry */
250     if (C->force_diagonals) {
251       j = i + rstart; /* column index */
252       PetscCall(PetscLLCondensedAddSorted(1,&j,lnk,lnkbt));
253     }
254 
255     apnz     = lnk[0];
256     api[i+1] = api[i] + apnz;
257 
258     /* if free space is not available, double the total space in the list */
259     if (current_space->local_remaining<apnz) {
260       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space));
261       nspacedouble++;
262     }
263 
264     /* Copy data into free space, then initialize lnk */
265     PetscCall(PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt));
266     PetscCall(MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz));
267 
268     current_space->array           += apnz;
269     current_space->local_used      += apnz;
270     current_space->local_remaining -= apnz;
271   }
272 
273   /* Allocate space for apj, initialize apj, and */
274   /* destroy list of free space and other temporary array(s) */
275   PetscCall(PetscMalloc1(api[am]+1,&ptap->apj));
276   apj  = ptap->apj;
277   PetscCall(PetscFreeSpaceContiguous(&free_space,ptap->apj));
278   PetscCall(PetscLLDestroy(lnk,lnkbt));
279 
280   /* malloc apa to store dense row A[i,:]*P */
281   PetscCall(PetscCalloc1(pN,&ptap->apa));
282 
283   /* set and assemble symbolic parallel matrix C */
284   /*---------------------------------------------*/
285   PetscCall(MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE));
286   PetscCall(MatSetBlockSizesFromMats(C,A,P));
287 
288   PetscCall(MatGetType(A,&mtype));
289   PetscCall(MatSetType(C,mtype));
290   PetscCall(MatMPIAIJSetPreallocation(C,0,dnz,0,onz));
291   MatPreallocateEnd(dnz,onz);
292 
293   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
294   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
295   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
296   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
297 
298   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
299   C->ops->productnumeric = MatProductNumeric_AB;
300 
301   /* attach the supporting struct to C for reuse */
302   C->product->data    = ptap;
303   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
304 
305   /* set MatInfo */
306   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
307   if (afill < 1.0) afill = 1.0;
308   C->info.mallocs           = nspacedouble;
309   C->info.fill_ratio_given  = fill;
310   C->info.fill_ratio_needed = afill;
311 
312 #if defined(PETSC_USE_INFO)
313   if (api[am]) {
314     PetscCall(PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill));
315     PetscCall(PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill));
316   } else {
317     PetscCall(PetscInfo(C,"Empty matrix product\n"));
318   }
319 #endif
320   PetscFunctionReturn(0);
321 }
322 
323 /* ------------------------------------------------------- */
324 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat,Mat,PetscReal,Mat);
325 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat,Mat,Mat);
326 
327 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AB(Mat C)
328 {
329   Mat_Product *product = C->product;
330   Mat         A = product->A,B=product->B;
331 
332   PetscFunctionBegin;
333   if (A->cmap->rstart != B->rmap->rstart || A->cmap->rend != B->rmap->rend)
334     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,B->rmap->rstart,B->rmap->rend);
335 
336   C->ops->matmultsymbolic = MatMatMultSymbolic_MPIAIJ_MPIDense;
337   C->ops->productsymbolic = MatProductSymbolic_AB;
338   PetscFunctionReturn(0);
339 }
340 /* -------------------------------------------------------------------- */
341 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(Mat C)
342 {
343   Mat_Product *product = C->product;
344   Mat         A = product->A,B=product->B;
345 
346   PetscFunctionBegin;
347   if (A->rmap->rstart != B->rmap->rstart || A->rmap->rend != B->rmap->rend)
348     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, (%" PetscInt_FMT ", %" PetscInt_FMT ") != (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
349 
350   C->ops->transposematmultsymbolic = MatTransposeMatMultSymbolic_MPIAIJ_MPIDense;
351   C->ops->productsymbolic          = MatProductSymbolic_AtB;
352   PetscFunctionReturn(0);
353 }
354 
355 /* --------------------------------------------------------------------- */
356 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ_MPIDense(Mat C)
357 {
358   Mat_Product    *product = C->product;
359 
360   PetscFunctionBegin;
361   switch (product->type) {
362   case MATPRODUCT_AB:
363     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AB(C));
364     break;
365   case MATPRODUCT_AtB:
366     PetscCall(MatProductSetFromOptions_MPIAIJ_MPIDense_AtB(C));
367     break;
368   default:
369     break;
370   }
371   PetscFunctionReturn(0);
372 }
373 /* ------------------------------------------------------- */
374 
375 typedef struct {
376   Mat          workB,workB1;
377   MPI_Request  *rwaits,*swaits;
378   PetscInt     nsends,nrecvs;
379   MPI_Datatype *stype,*rtype;
380   PetscInt     blda;
381 } MPIAIJ_MPIDense;
382 
383 PetscErrorCode MatMPIAIJ_MPIDenseDestroy(void *ctx)
384 {
385   MPIAIJ_MPIDense *contents = (MPIAIJ_MPIDense*)ctx;
386   PetscInt        i;
387 
388   PetscFunctionBegin;
389   PetscCall(MatDestroy(&contents->workB));
390   PetscCall(MatDestroy(&contents->workB1));
391   for (i=0; i<contents->nsends; i++) {
392     PetscCallMPI(MPI_Type_free(&contents->stype[i]));
393   }
394   for (i=0; i<contents->nrecvs; i++) {
395     PetscCallMPI(MPI_Type_free(&contents->rtype[i]));
396   }
397   PetscCall(PetscFree4(contents->stype,contents->rtype,contents->rwaits,contents->swaits));
398   PetscCall(PetscFree(contents));
399   PetscFunctionReturn(0);
400 }
401 
402 static PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIDense(Mat A,Mat B,PetscReal fill,Mat C)
403 {
404   Mat_MPIAIJ      *aij=(Mat_MPIAIJ*)A->data;
405   PetscInt        nz=aij->B->cmap->n,nsends,nrecvs,i,nrows_to,j,blda,m,M,n,N;
406   MPIAIJ_MPIDense *contents;
407   VecScatter      ctx=aij->Mvctx;
408   PetscInt        Am=A->rmap->n,Bm=B->rmap->n,BN=B->cmap->N,Bbn,Bbn1,bs,nrows_from,numBb;
409   MPI_Comm        comm;
410   MPI_Datatype    type1,*stype,*rtype;
411   const PetscInt  *sindices,*sstarts,*rstarts;
412   PetscMPIInt     *disp;
413   PetscBool       cisdense;
414 
415   PetscFunctionBegin;
416   MatCheckProduct(C,4);
417   PetscCheck(!C->product->data,PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data not empty");
418   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
419   PetscCall(PetscObjectBaseTypeCompare((PetscObject)C,MATMPIDENSE,&cisdense));
420   if (!cisdense) {
421     PetscCall(MatSetType(C,((PetscObject)B)->type_name));
422   }
423   PetscCall(MatGetLocalSize(C,&m,&n));
424   PetscCall(MatGetSize(C,&M,&N));
425   if (m == PETSC_DECIDE || n == PETSC_DECIDE || M == PETSC_DECIDE || N == PETSC_DECIDE) {
426     PetscCall(MatSetSizes(C,Am,B->cmap->n,A->rmap->N,BN));
427   }
428   PetscCall(MatSetBlockSizesFromMats(C,A,B));
429   PetscCall(MatSetUp(C));
430   PetscCall(MatDenseGetLDA(B,&blda));
431   PetscCall(PetscNew(&contents));
432 
433   PetscCall(VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL));
434   PetscCall(VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL));
435 
436   /* Create column block of B and C for memory scalability when BN is too large */
437   /* Estimate Bbn, column size of Bb */
438   if (nz) {
439     Bbn1 = 2*Am*BN/nz;
440     if (!Bbn1) Bbn1 = 1;
441   } else Bbn1 = BN;
442 
443   bs = PetscAbs(B->cmap->bs);
444   Bbn1 = Bbn1/bs *bs; /* Bbn1 is a multiple of bs */
445   if (Bbn1 > BN) Bbn1 = BN;
446   PetscCallMPI(MPI_Allreduce(&Bbn1,&Bbn,1,MPIU_INT,MPI_MAX,comm));
447 
448   /* Enable runtime option for Bbn */
449   PetscOptionsBegin(comm,((PetscObject)C)->prefix,"MatMatMult","Mat");
450   PetscCall(PetscOptionsInt("-matmatmult_Bbn","Number of columns in Bb","MatMatMult",Bbn,&Bbn,NULL));
451   PetscOptionsEnd();
452   Bbn  = PetscMin(Bbn,BN);
453 
454   if (Bbn > 0 && Bbn < BN) {
455     numBb = BN/Bbn;
456     Bbn1 = BN - numBb*Bbn;
457   } else numBb = 0;
458 
459   if (numBb) {
460     PetscCall(PetscInfo(C,"use Bb, BN=%" PetscInt_FMT ", Bbn=%" PetscInt_FMT "; numBb=%" PetscInt_FMT "\n",BN,Bbn,numBb));
461     if (Bbn1) { /* Create workB1 for the remaining columns */
462       PetscCall(PetscInfo(C,"use Bb1, BN=%" PetscInt_FMT ", Bbn1=%" PetscInt_FMT "\n",BN,Bbn1));
463       /* Create work matrix used to store off processor rows of B needed for local product */
464       PetscCall(MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn1,NULL,&contents->workB1));
465     } else contents->workB1 = NULL;
466   }
467 
468   /* Create work matrix used to store off processor rows of B needed for local product */
469   PetscCall(MatCreateSeqDense(PETSC_COMM_SELF,nz,Bbn,NULL,&contents->workB));
470 
471   /* Use MPI derived data type to reduce memory required by the send/recv buffers */
472   PetscCall(PetscMalloc4(nsends,&stype,nrecvs,&rtype,nrecvs,&contents->rwaits,nsends,&contents->swaits));
473   contents->stype  = stype;
474   contents->nsends = nsends;
475 
476   contents->rtype  = rtype;
477   contents->nrecvs = nrecvs;
478   contents->blda   = blda;
479 
480   PetscCall(PetscMalloc1(Bm+1,&disp));
481   for (i=0; i<nsends; i++) {
482     nrows_to = sstarts[i+1]-sstarts[i];
483     for (j=0; j<nrows_to; j++) disp[j] = sindices[sstarts[i]+j]; /* rowB to be sent */
484     PetscCallMPI(MPI_Type_create_indexed_block(nrows_to,1,disp,MPIU_SCALAR,&type1));
485     PetscCallMPI(MPI_Type_create_resized(type1,0,blda*sizeof(PetscScalar),&stype[i]));
486     PetscCallMPI(MPI_Type_commit(&stype[i]));
487     PetscCallMPI(MPI_Type_free(&type1));
488   }
489 
490   for (i=0; i<nrecvs; i++) {
491     /* received values from a process form a (nrows_from x Bbn) row block in workB (column-wise) */
492     nrows_from = rstarts[i+1]-rstarts[i];
493     disp[0] = 0;
494     PetscCallMPI(MPI_Type_create_indexed_block(1,nrows_from,disp,MPIU_SCALAR,&type1));
495     PetscCallMPI(MPI_Type_create_resized(type1,0,nz*sizeof(PetscScalar),&rtype[i]));
496     PetscCallMPI(MPI_Type_commit(&rtype[i]));
497     PetscCallMPI(MPI_Type_free(&type1));
498   }
499 
500   PetscCall(PetscFree(disp));
501   PetscCall(VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,NULL,NULL));
502   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,NULL,NULL));
503   PetscCall(MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE));
504   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
505   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
506   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
507 
508   C->product->data = contents;
509   C->product->destroy = MatMPIAIJ_MPIDenseDestroy;
510   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIDense;
511   PetscFunctionReturn(0);
512 }
513 
514 PETSC_INTERN PetscErrorCode MatMatMultNumericAdd_SeqAIJ_SeqDense(Mat,Mat,Mat,const PetscBool);
515 /*
516     Performs an efficient scatter on the rows of B needed by this process; this is
517     a modification of the VecScatterBegin_() routines.
518 
519     Input: Bbidx = 0: B = Bb
520                  = 1: B = Bb1, see MatMatMultSymbolic_MPIAIJ_MPIDense()
521 */
522 PetscErrorCode MatMPIDenseScatter(Mat A,Mat B,PetscInt Bbidx,Mat C,Mat *outworkB)
523 {
524   Mat_MPIAIJ        *aij = (Mat_MPIAIJ*)A->data;
525   const PetscScalar *b;
526   PetscScalar       *rvalues;
527   VecScatter        ctx = aij->Mvctx;
528   const PetscInt    *sindices,*sstarts,*rstarts;
529   const PetscMPIInt *sprocs,*rprocs;
530   PetscInt          i,nsends,nrecvs;
531   MPI_Request       *swaits,*rwaits;
532   MPI_Comm          comm;
533   PetscMPIInt       tag=((PetscObject)ctx)->tag,ncols=B->cmap->N,nrows=aij->B->cmap->n,nsends_mpi,nrecvs_mpi;
534   MPIAIJ_MPIDense   *contents;
535   Mat               workB;
536   MPI_Datatype      *stype,*rtype;
537   PetscInt          blda;
538 
539   PetscFunctionBegin;
540   MatCheckProduct(C,4);
541   PetscCheck(C->product->data,PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
542   contents = (MPIAIJ_MPIDense*)C->product->data;
543   PetscCall(VecScatterGetRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL/*bs*/));
544   PetscCall(VecScatterGetRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL/*bs*/));
545   PetscCall(PetscMPIIntCast(nsends,&nsends_mpi));
546   PetscCall(PetscMPIIntCast(nrecvs,&nrecvs_mpi));
547   if (Bbidx == 0) workB = *outworkB = contents->workB;
548   else workB = *outworkB = contents->workB1;
549   PetscCheck(nrows == workB->rmap->n,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Number of rows of workB %" PetscInt_FMT " not equal to columns of aij->B %d",workB->cmap->n,nrows);
550   swaits = contents->swaits;
551   rwaits = contents->rwaits;
552 
553   PetscCall(MatDenseGetArrayRead(B,&b));
554   PetscCall(MatDenseGetLDA(B,&blda));
555   PetscCheck(blda == contents->blda,PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot reuse an input matrix with lda %" PetscInt_FMT " != %" PetscInt_FMT,blda,contents->blda);
556   PetscCall(MatDenseGetArray(workB,&rvalues));
557 
558   /* Post recv, use MPI derived data type to save memory */
559   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
560   rtype = contents->rtype;
561   for (i=0; i<nrecvs; i++) {
562     PetscCallMPI(MPI_Irecv(rvalues+(rstarts[i]-rstarts[0]),ncols,rtype[i],rprocs[i],tag,comm,rwaits+i));
563   }
564 
565   stype = contents->stype;
566   for (i=0; i<nsends; i++) {
567     PetscCallMPI(MPI_Isend(b,ncols,stype[i],sprocs[i],tag,comm,swaits+i));
568   }
569 
570   if (nrecvs) PetscCallMPI(MPI_Waitall(nrecvs_mpi,rwaits,MPI_STATUSES_IGNORE));
571   if (nsends) PetscCallMPI(MPI_Waitall(nsends_mpi,swaits,MPI_STATUSES_IGNORE));
572 
573   PetscCall(VecScatterRestoreRemote_Private(ctx,PETSC_TRUE/*send*/,&nsends,&sstarts,&sindices,&sprocs,NULL));
574   PetscCall(VecScatterRestoreRemoteOrdered_Private(ctx,PETSC_FALSE/*recv*/,&nrecvs,&rstarts,NULL,&rprocs,NULL));
575   PetscCall(MatDenseRestoreArrayRead(B,&b));
576   PetscCall(MatDenseRestoreArray(workB,&rvalues));
577   PetscFunctionReturn(0);
578 }
579 
580 static PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIDense(Mat A,Mat B,Mat C)
581 {
582   Mat_MPIAIJ      *aij    = (Mat_MPIAIJ*)A->data;
583   Mat_MPIDense    *bdense = (Mat_MPIDense*)B->data;
584   Mat_MPIDense    *cdense = (Mat_MPIDense*)C->data;
585   Mat             workB;
586   MPIAIJ_MPIDense *contents;
587 
588   PetscFunctionBegin;
589   MatCheckProduct(C,3);
590   PetscCheck(C->product->data,PetscObjectComm((PetscObject)C),PETSC_ERR_PLIB,"Product data empty");
591   contents = (MPIAIJ_MPIDense*)C->product->data;
592   /* diagonal block of A times all local rows of B */
593   /* TODO: this calls a symbolic multiplication every time, which could be avoided */
594   PetscCall(MatMatMult(aij->A,bdense->A,MAT_REUSE_MATRIX,PETSC_DEFAULT,&cdense->A));
595   if (contents->workB->cmap->n == B->cmap->N) {
596     /* get off processor parts of B needed to complete C=A*B */
597     PetscCall(MatMPIDenseScatter(A,B,0,C,&workB));
598 
599     /* off-diagonal block of A times nonlocal rows of B */
600     PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE));
601   } else {
602     Mat       Bb,Cb;
603     PetscInt  BN=B->cmap->N,n=contents->workB->cmap->n,i;
604     PetscBool ccpu;
605 
606     PetscCheck(n > 0,PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Column block size %" PetscInt_FMT " must be positive",n);
607     /* Prevent from unneeded copies back and forth from the GPU
608        when getting and restoring the submatrix
609        We need a proper GPU code for AIJ * dense in parallel */
610     PetscCall(MatBoundToCPU(C,&ccpu));
611     PetscCall(MatBindToCPU(C,PETSC_TRUE));
612     for (i=0; i<BN; i+=n) {
613       PetscCall(MatDenseGetSubMatrix(B,i,PetscMin(i+n,BN),&Bb));
614       PetscCall(MatDenseGetSubMatrix(C,i,PetscMin(i+n,BN),&Cb));
615 
616       /* get off processor parts of B needed to complete C=A*B */
617       PetscCall(MatMPIDenseScatter(A,Bb,(i+n)>BN,C,&workB));
618 
619       /* off-diagonal block of A times nonlocal rows of B */
620       cdense = (Mat_MPIDense*)Cb->data;
621       PetscCall(MatMatMultNumericAdd_SeqAIJ_SeqDense(aij->B,workB,cdense->A,PETSC_TRUE));
622       PetscCall(MatDenseRestoreSubMatrix(B,&Bb));
623       PetscCall(MatDenseRestoreSubMatrix(C,&Cb));
624     }
625     PetscCall(MatBindToCPU(C,ccpu));
626   }
627   PetscFunctionReturn(0);
628 }
629 
630 PetscErrorCode MatMatMultNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
631 {
632   Mat_MPIAIJ        *a   = (Mat_MPIAIJ*)A->data,*c=(Mat_MPIAIJ*)C->data;
633   Mat_SeqAIJ        *ad  = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
634   Mat_SeqAIJ        *cd  = (Mat_SeqAIJ*)(c->A)->data,*co=(Mat_SeqAIJ*)(c->B)->data;
635   PetscInt          *adi = ad->i,*adj,*aoi=ao->i,*aoj;
636   PetscScalar       *ada,*aoa,*cda=cd->a,*coa=co->a;
637   Mat_SeqAIJ        *p_loc,*p_oth;
638   PetscInt          *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pj;
639   PetscScalar       *pa_loc,*pa_oth,*pa,valtmp,*ca;
640   PetscInt          cm    = C->rmap->n,anz,pnz;
641   Mat_APMPI         *ptap;
642   PetscScalar       *apa_sparse;
643   const PetscScalar *dummy;
644   PetscInt          *api,*apj,*apJ,i,j,k,row;
645   PetscInt          cstart = C->cmap->rstart;
646   PetscInt          cdnz,conz,k0,k1,nextp;
647   MPI_Comm          comm;
648   PetscMPIInt       size;
649 
650   PetscFunctionBegin;
651   MatCheckProduct(C,3);
652   ptap = (Mat_APMPI*)C->product->data;
653   PetscCheck(ptap,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
654   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
655   PetscCallMPI(MPI_Comm_size(comm,&size));
656   PetscCheck(ptap->P_oth || size <= 1,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"AP cannot be reused. Do not call MatProductClear()");
657 
658   /* flag CPU mask for C */
659 #if defined(PETSC_HAVE_DEVICE)
660   if (C->offloadmask != PETSC_OFFLOAD_UNALLOCATED) C->offloadmask = PETSC_OFFLOAD_CPU;
661   if (c->A->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->A->offloadmask = PETSC_OFFLOAD_CPU;
662   if (c->B->offloadmask != PETSC_OFFLOAD_UNALLOCATED) c->B->offloadmask = PETSC_OFFLOAD_CPU;
663 #endif
664   apa_sparse = ptap->apa;
665 
666   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
667   /*-----------------------------------------------------*/
668   /* update numerical values of P_oth and P_loc */
669   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth));
670   PetscCall(MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc));
671 
672   /* 2) compute numeric C_loc = A_loc*P = Ad*P_loc + Ao*P_oth */
673   /*----------------------------------------------------------*/
674   /* get data from symbolic products */
675   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
676   pi_loc = p_loc->i; pj_loc = p_loc->j; pa_loc = p_loc->a;
677   if (size >1) {
678     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
679     pi_oth = p_oth->i; pj_oth = p_oth->j; pa_oth = p_oth->a;
680   } else {
681     p_oth = NULL; pi_oth = NULL; pj_oth = NULL; pa_oth = NULL;
682   }
683 
684   /* trigger copy to CPU */
685   PetscCall(MatSeqAIJGetArrayRead(a->A,&dummy));
686   PetscCall(MatSeqAIJRestoreArrayRead(a->A,&dummy));
687   PetscCall(MatSeqAIJGetArrayRead(a->B,&dummy));
688   PetscCall(MatSeqAIJRestoreArrayRead(a->B,&dummy));
689   api = ptap->api;
690   apj = ptap->apj;
691   for (i=0; i<cm; i++) {
692     apJ = apj + api[i];
693 
694     /* diagonal portion of A */
695     anz = adi[i+1] - adi[i];
696     adj = ad->j + adi[i];
697     ada = ad->a + adi[i];
698     for (j=0; j<anz; j++) {
699       row = adj[j];
700       pnz = pi_loc[row+1] - pi_loc[row];
701       pj  = pj_loc + pi_loc[row];
702       pa  = pa_loc + pi_loc[row];
703       /* perform sparse axpy */
704       valtmp = ada[j];
705       nextp  = 0;
706       for (k=0; nextp<pnz; k++) {
707         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
708           apa_sparse[k] += valtmp*pa[nextp++];
709         }
710       }
711       PetscCall(PetscLogFlops(2.0*pnz));
712     }
713 
714     /* off-diagonal portion of A */
715     anz = aoi[i+1] - aoi[i];
716     aoj = ao->j + aoi[i];
717     aoa = ao->a + aoi[i];
718     for (j=0; j<anz; j++) {
719       row = aoj[j];
720       pnz = pi_oth[row+1] - pi_oth[row];
721       pj  = pj_oth + pi_oth[row];
722       pa  = pa_oth + pi_oth[row];
723       /* perform sparse axpy */
724       valtmp = aoa[j];
725       nextp  = 0;
726       for (k=0; nextp<pnz; k++) {
727         if (apJ[k] == pj[nextp]) { /* column of AP == column of P */
728           apa_sparse[k] += valtmp*pa[nextp++];
729         }
730       }
731       PetscCall(PetscLogFlops(2.0*pnz));
732     }
733 
734     /* set values in C */
735     cdnz = cd->i[i+1] - cd->i[i];
736     conz = co->i[i+1] - co->i[i];
737 
738     /* 1st off-diagonal part of C */
739     ca = coa + co->i[i];
740     k  = 0;
741     for (k0=0; k0<conz; k0++) {
742       if (apJ[k] >= cstart) break;
743       ca[k0]        = apa_sparse[k];
744       apa_sparse[k] = 0.0;
745       k++;
746     }
747 
748     /* diagonal part of C */
749     ca = cda + cd->i[i];
750     for (k1=0; k1<cdnz; k1++) {
751       ca[k1]        = apa_sparse[k];
752       apa_sparse[k] = 0.0;
753       k++;
754     }
755 
756     /* 2nd off-diagonal part of C */
757     ca = coa + co->i[i];
758     for (; k0<conz; k0++) {
759       ca[k0]        = apa_sparse[k];
760       apa_sparse[k] = 0.0;
761       k++;
762     }
763   }
764   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
765   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
766   PetscFunctionReturn(0);
767 }
768 
769 /* same as MatMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(), except using LLCondensed to avoid O(BN) memory requirement */
770 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat C)
771 {
772   MPI_Comm           comm;
773   PetscMPIInt        size;
774   Mat_APMPI          *ptap;
775   PetscFreeSpaceList free_space = NULL,current_space=NULL;
776   Mat_MPIAIJ         *a  = (Mat_MPIAIJ*)A->data;
777   Mat_SeqAIJ         *ad = (Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc,*p_oth;
778   PetscInt           *pi_loc,*pj_loc,*pi_oth,*pj_oth,*dnz,*onz;
779   PetscInt           *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,rstart=A->rmap->rstart;
780   PetscInt           i,pnz,row,*api,*apj,*Jptr,apnz,nspacedouble=0,j,nzi,*lnk,apnz_max=1;
781   PetscInt           am=A->rmap->n,pn=P->cmap->n,pm=P->rmap->n,lsize=pn+20;
782   PetscReal          afill;
783   MatType            mtype;
784 
785   PetscFunctionBegin;
786   MatCheckProduct(C,4);
787   PetscCheck(!C->product->data,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
788   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
789   PetscCallMPI(MPI_Comm_size(comm,&size));
790 
791   /* create struct Mat_APMPI and attached it to C later */
792   PetscCall(PetscNew(&ptap));
793 
794   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
795   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth));
796 
797   /* get P_loc by taking all local rows of P */
798   PetscCall(MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc));
799 
800   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
801   pi_loc = p_loc->i; pj_loc = p_loc->j;
802   if (size > 1) {
803     p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
804     pi_oth = p_oth->i; pj_oth = p_oth->j;
805   } else {
806     p_oth  = NULL;
807     pi_oth = NULL; pj_oth = NULL;
808   }
809 
810   /* first, compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth */
811   /*-------------------------------------------------------------------*/
812   PetscCall(PetscMalloc1(am+2,&api));
813   ptap->api = api;
814   api[0]    = 0;
815 
816   PetscCall(PetscLLCondensedCreate_Scalable(lsize,&lnk));
817 
818   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
819   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space));
820   current_space = free_space;
821   MatPreallocateBegin(comm,am,pn,dnz,onz);
822   for (i=0; i<am; i++) {
823     /* diagonal portion of A */
824     nzi = adi[i+1] - adi[i];
825     for (j=0; j<nzi; j++) {
826       row  = *adj++;
827       pnz  = pi_loc[row+1] - pi_loc[row];
828       Jptr = pj_loc + pi_loc[row];
829       /* Expand list if it is not long enough */
830       if (pnz+apnz_max > lsize) {
831         lsize = pnz+apnz_max;
832         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
833       }
834       /* add non-zero cols of P into the sorted linked list lnk */
835       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk));
836       apnz     = *lnk; /* The first element in the list is the number of items in the list */
837       api[i+1] = api[i] + apnz;
838       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
839     }
840     /* off-diagonal portion of A */
841     nzi = aoi[i+1] - aoi[i];
842     for (j=0; j<nzi; j++) {
843       row  = *aoj++;
844       pnz  = pi_oth[row+1] - pi_oth[row];
845       Jptr = pj_oth + pi_oth[row];
846       /* Expand list if it is not long enough */
847       if (pnz+apnz_max > lsize) {
848         lsize = pnz + apnz_max;
849         PetscCall(PetscLLCondensedExpand_Scalable(lsize, &lnk));
850       }
851       /* add non-zero cols of P into the sorted linked list lnk */
852       PetscCall(PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk));
853       apnz     = *lnk;  /* The first element in the list is the number of items in the list */
854       api[i+1] = api[i] + apnz;
855       if (apnz > apnz_max) apnz_max = apnz + 1; /* '1' for diagonal entry */
856     }
857 
858     /* add missing diagonal entry */
859     if (C->force_diagonals) {
860       j = i + rstart; /* column index */
861       PetscCall(PetscLLCondensedAddSorted_Scalable(1,&j,lnk));
862     }
863 
864     apnz     = *lnk;
865     api[i+1] = api[i] + apnz;
866     if (apnz > apnz_max) apnz_max = apnz;
867 
868     /* if free space is not available, double the total space in the list */
869     if (current_space->local_remaining<apnz) {
870       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space));
871       nspacedouble++;
872     }
873 
874     /* Copy data into free space, then initialize lnk */
875     PetscCall(PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk));
876     PetscCall(MatPreallocateSet(i+rstart,apnz,current_space->array,dnz,onz));
877 
878     current_space->array           += apnz;
879     current_space->local_used      += apnz;
880     current_space->local_remaining -= apnz;
881   }
882 
883   /* Allocate space for apj, initialize apj, and */
884   /* destroy list of free space and other temporary array(s) */
885   PetscCall(PetscMalloc1(api[am]+1,&ptap->apj));
886   apj  = ptap->apj;
887   PetscCall(PetscFreeSpaceContiguous(&free_space,ptap->apj));
888   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
889 
890   /* create and assemble symbolic parallel matrix C */
891   /*----------------------------------------------------*/
892   PetscCall(MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE));
893   PetscCall(MatSetBlockSizesFromMats(C,A,P));
894   PetscCall(MatGetType(A,&mtype));
895   PetscCall(MatSetType(C,mtype));
896   PetscCall(MatMPIAIJSetPreallocation(C,0,dnz,0,onz));
897   MatPreallocateEnd(dnz,onz);
898 
899   /* malloc apa for assembly C */
900   PetscCall(PetscCalloc1(apnz_max,&ptap->apa));
901 
902   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
903   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
904   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
905   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
906 
907   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ;
908   C->ops->productnumeric = MatProductNumeric_AB;
909 
910   /* attach the supporting struct to C for reuse */
911   C->product->data    = ptap;
912   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
913 
914   /* set MatInfo */
915   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
916   if (afill < 1.0) afill = 1.0;
917   C->info.mallocs           = nspacedouble;
918   C->info.fill_ratio_given  = fill;
919   C->info.fill_ratio_needed = afill;
920 
921 #if defined(PETSC_USE_INFO)
922   if (api[am]) {
923     PetscCall(PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill));
924     PetscCall(PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill));
925   } else {
926     PetscCall(PetscInfo(C,"Empty matrix product\n"));
927   }
928 #endif
929   PetscFunctionReturn(0);
930 }
931 
932 /* This function is needed for the seqMPI matrix-matrix multiplication.  */
933 /* Three input arrays are merged to one output array. The size of the    */
934 /* output array is also output. Duplicate entries only show up once.     */
935 static void Merge3SortedArrays(PetscInt  size1, PetscInt *in1,
936                                PetscInt  size2, PetscInt *in2,
937                                PetscInt  size3, PetscInt *in3,
938                                PetscInt *size4, PetscInt *out)
939 {
940   int i = 0, j = 0, k = 0, l = 0;
941 
942   /* Traverse all three arrays */
943   while (i<size1 && j<size2 && k<size3) {
944     if (in1[i] < in2[j] && in1[i] < in3[k]) {
945       out[l++] = in1[i++];
946     }
947     else if (in2[j] < in1[i] && in2[j] < in3[k]) {
948       out[l++] = in2[j++];
949     }
950     else if (in3[k] < in1[i] && in3[k] < in2[j]) {
951       out[l++] = in3[k++];
952     }
953     else if (in1[i] == in2[j] && in1[i] < in3[k]) {
954       out[l++] = in1[i];
955       i++, j++;
956     }
957     else if (in1[i] == in3[k] && in1[i] < in2[j]) {
958       out[l++] = in1[i];
959       i++, k++;
960     }
961     else if (in3[k] == in2[j] && in2[j] < in1[i])  {
962       out[l++] = in2[j];
963       k++, j++;
964     }
965     else if (in1[i] == in2[j] && in1[i] == in3[k]) {
966       out[l++] = in1[i];
967       i++, j++, k++;
968     }
969   }
970 
971   /* Traverse two remaining arrays */
972   while (i<size1 && j<size2) {
973     if (in1[i] < in2[j]) {
974       out[l++] = in1[i++];
975     }
976     else if (in1[i] > in2[j]) {
977       out[l++] = in2[j++];
978     }
979     else {
980       out[l++] = in1[i];
981       i++, j++;
982     }
983   }
984 
985   while (i<size1 && k<size3) {
986     if (in1[i] < in3[k]) {
987       out[l++] = in1[i++];
988     }
989     else if (in1[i] > in3[k]) {
990       out[l++] = in3[k++];
991     }
992     else {
993       out[l++] = in1[i];
994       i++, k++;
995     }
996   }
997 
998   while (k<size3 && j<size2)  {
999     if (in3[k] < in2[j]) {
1000       out[l++] = in3[k++];
1001     }
1002     else if (in3[k] > in2[j]) {
1003       out[l++] = in2[j++];
1004     }
1005     else {
1006       out[l++] = in3[k];
1007       k++, j++;
1008     }
1009   }
1010 
1011   /* Traverse one remaining array */
1012   while (i<size1) out[l++] = in1[i++];
1013   while (j<size2) out[l++] = in2[j++];
1014   while (k<size3) out[l++] = in3[k++];
1015 
1016   *size4 = l;
1017 }
1018 
1019 /* This matrix-matrix multiplication algorithm divides the multiplication into three multiplications and  */
1020 /* adds up the products. Two of these three multiplications are performed with existing (sequential)      */
1021 /* matrix-matrix multiplications.  */
1022 PetscErrorCode MatMatMultSymbolic_MPIAIJ_MPIAIJ_seqMPI(Mat A, Mat P, PetscReal fill, Mat C)
1023 {
1024   MPI_Comm           comm;
1025   PetscMPIInt        size;
1026   Mat_APMPI          *ptap;
1027   PetscFreeSpaceList free_space_diag=NULL, current_space=NULL;
1028   Mat_MPIAIJ         *a  =(Mat_MPIAIJ*)A->data;
1029   Mat_SeqAIJ         *ad =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*p_loc;
1030   Mat_MPIAIJ         *p  =(Mat_MPIAIJ*)P->data;
1031   Mat_SeqAIJ         *adpd_seq, *p_off, *aopoth_seq;
1032   PetscInt           adponz, adpdnz;
1033   PetscInt           *pi_loc,*dnz,*onz;
1034   PetscInt           *adi=ad->i,*adj=ad->j,*aoi=ao->i,rstart=A->rmap->rstart;
1035   PetscInt           *lnk,i, i1=0,pnz,row,*adpoi,*adpoj, *api, *adpoJ, *aopJ, *apJ,*Jptr, aopnz, nspacedouble=0,j,nzi,
1036                      *apj,apnz, *adpdi, *adpdj, *adpdJ, *poff_i, *poff_j, *j_temp, *aopothi, *aopothj;
1037   PetscInt           am=A->rmap->n,pN=P->cmap->N,pn=P->cmap->n,pm=P->rmap->n, p_colstart, p_colend;
1038   PetscBT            lnkbt;
1039   PetscReal          afill;
1040   PetscMPIInt        rank;
1041   Mat                adpd, aopoth;
1042   MatType            mtype;
1043   const char         *prefix;
1044 
1045   PetscFunctionBegin;
1046   MatCheckProduct(C,4);
1047   PetscCheck(!C->product->data,PETSC_COMM_SELF,PETSC_ERR_PLIB,"Extra product struct not empty");
1048   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
1049   PetscCallMPI(MPI_Comm_size(comm,&size));
1050   PetscCallMPI(MPI_Comm_rank(comm, &rank));
1051   PetscCall(MatGetOwnershipRangeColumn(P, &p_colstart, &p_colend));
1052 
1053   /* create struct Mat_APMPI and attached it to C later */
1054   PetscCall(PetscNew(&ptap));
1055 
1056   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
1057   PetscCall(MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth));
1058 
1059   /* get P_loc by taking all local rows of P */
1060   PetscCall(MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc));
1061 
1062   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1063   pi_loc = p_loc->i;
1064 
1065   /* Allocate memory for the i arrays of the matrices A*P, A_diag*P_off and A_offd * P */
1066   PetscCall(PetscMalloc1(am+2,&api));
1067   PetscCall(PetscMalloc1(am+2,&adpoi));
1068 
1069   adpoi[0]    = 0;
1070   ptap->api = api;
1071   api[0] = 0;
1072 
1073   /* create and initialize a linked list, will be used for both A_diag * P_loc_off and A_offd * P_oth */
1074   PetscCall(PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt));
1075   MatPreallocateBegin(comm,am,pn,dnz,onz);
1076 
1077   /* Symbolic calc of A_loc_diag * P_loc_diag */
1078   PetscCall(MatGetOptionsPrefix(A,&prefix));
1079   PetscCall(MatProductCreate(a->A,p->A,NULL,&adpd));
1080   PetscCall(MatGetOptionsPrefix(A,&prefix));
1081   PetscCall(MatSetOptionsPrefix(adpd,prefix));
1082   PetscCall(MatAppendOptionsPrefix(adpd,"inner_diag_"));
1083 
1084   PetscCall(MatProductSetType(adpd,MATPRODUCT_AB));
1085   PetscCall(MatProductSetAlgorithm(adpd,"sorted"));
1086   PetscCall(MatProductSetFill(adpd,fill));
1087   PetscCall(MatProductSetFromOptions(adpd));
1088 
1089   adpd->force_diagonals = C->force_diagonals;
1090   PetscCall(MatProductSymbolic(adpd));
1091 
1092   adpd_seq = (Mat_SeqAIJ*)((adpd)->data);
1093   adpdi = adpd_seq->i; adpdj = adpd_seq->j;
1094   p_off = (Mat_SeqAIJ*)((p->B)->data);
1095   poff_i = p_off->i; poff_j = p_off->j;
1096 
1097   /* j_temp stores indices of a result row before they are added to the linked list */
1098   PetscCall(PetscMalloc1(pN+2,&j_temp));
1099 
1100   /* Symbolic calc of the A_diag * p_loc_off */
1101   /* Initial FreeSpace size is fill*(nnz(A)+nnz(P)) */
1102   PetscCall(PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(adi[am],PetscIntSumTruncate(aoi[am],pi_loc[pm]))),&free_space_diag));
1103   current_space = free_space_diag;
1104 
1105   for (i=0; i<am; i++) {
1106     /* A_diag * P_loc_off */
1107     nzi = adi[i+1] - adi[i];
1108     for (j=0; j<nzi; j++) {
1109       row  = *adj++;
1110       pnz  = poff_i[row+1] - poff_i[row];
1111       Jptr = poff_j + poff_i[row];
1112       for (i1 = 0; i1 < pnz; i1++) {
1113         j_temp[i1] = p->garray[Jptr[i1]];
1114       }
1115       /* add non-zero cols of P into the sorted linked list lnk */
1116       PetscCall(PetscLLCondensedAddSorted(pnz,j_temp,lnk,lnkbt));
1117     }
1118 
1119     adponz     = lnk[0];
1120     adpoi[i+1] = adpoi[i] + adponz;
1121 
1122     /* if free space is not available, double the total space in the list */
1123     if (current_space->local_remaining<adponz) {
1124       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(adponz,current_space->total_array_size),&current_space));
1125       nspacedouble++;
1126     }
1127 
1128     /* Copy data into free space, then initialize lnk */
1129     PetscCall(PetscLLCondensedClean(pN,adponz,current_space->array,lnk,lnkbt));
1130 
1131     current_space->array           += adponz;
1132     current_space->local_used      += adponz;
1133     current_space->local_remaining -= adponz;
1134   }
1135 
1136   /* Symbolic calc of A_off * P_oth */
1137   PetscCall(MatSetOptionsPrefix(a->B,prefix));
1138   PetscCall(MatAppendOptionsPrefix(a->B,"inner_offdiag_"));
1139   PetscCall(MatCreate(PETSC_COMM_SELF,&aopoth));
1140   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(a->B, ptap->P_oth, fill, aopoth));
1141   aopoth_seq = (Mat_SeqAIJ*)((aopoth)->data);
1142   aopothi = aopoth_seq->i; aopothj = aopoth_seq->j;
1143 
1144   /* Allocate space for apj, adpj, aopj, ... */
1145   /* destroy lists of free space and other temporary array(s) */
1146 
1147   PetscCall(PetscMalloc1(aopothi[am] + adpoi[am] + adpdi[am]+2, &ptap->apj));
1148   PetscCall(PetscMalloc1(adpoi[am]+2, &adpoj));
1149 
1150   /* Copy from linked list to j-array */
1151   PetscCall(PetscFreeSpaceContiguous(&free_space_diag,adpoj));
1152   PetscCall(PetscLLDestroy(lnk,lnkbt));
1153 
1154   adpoJ = adpoj;
1155   adpdJ = adpdj;
1156   aopJ = aopothj;
1157   apj  = ptap->apj;
1158   apJ = apj; /* still empty */
1159 
1160   /* Merge j-arrays of A_off * P, A_diag * P_loc_off, and */
1161   /* A_diag * P_loc_diag to get A*P */
1162   for (i = 0; i < am; i++) {
1163     aopnz  =  aopothi[i+1] -  aopothi[i];
1164     adponz = adpoi[i+1] - adpoi[i];
1165     adpdnz = adpdi[i+1] - adpdi[i];
1166 
1167     /* Correct indices from A_diag*P_diag */
1168     for (i1 = 0; i1 < adpdnz; i1++) {
1169       adpdJ[i1] += p_colstart;
1170     }
1171     /* Merge j-arrays of A_diag * P_loc_off and A_diag * P_loc_diag and A_off * P_oth */
1172     Merge3SortedArrays(adponz, adpoJ, adpdnz, adpdJ, aopnz, aopJ, &apnz, apJ);
1173     PetscCall(MatPreallocateSet(i+rstart, apnz, apJ, dnz, onz));
1174 
1175     aopJ += aopnz;
1176     adpoJ += adponz;
1177     adpdJ += adpdnz;
1178     apJ += apnz;
1179     api[i+1] = api[i] + apnz;
1180   }
1181 
1182   /* malloc apa to store dense row A[i,:]*P */
1183   PetscCall(PetscCalloc1(pN+2,&ptap->apa));
1184 
1185   /* create and assemble symbolic parallel matrix C */
1186   PetscCall(MatSetSizes(C,am,pn,PETSC_DETERMINE,PETSC_DETERMINE));
1187   PetscCall(MatSetBlockSizesFromMats(C,A,P));
1188   PetscCall(MatGetType(A,&mtype));
1189   PetscCall(MatSetType(C,mtype));
1190   PetscCall(MatMPIAIJSetPreallocation(C,0,dnz,0,onz));
1191   MatPreallocateEnd(dnz,onz);
1192 
1193   PetscCall(MatSetValues_MPIAIJ_CopyFromCSRFormat_Symbolic(C, apj, api));
1194   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
1195   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
1196   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
1197 
1198   C->ops->matmultnumeric = MatMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1199   C->ops->productnumeric = MatProductNumeric_AB;
1200 
1201   /* attach the supporting struct to C for reuse */
1202   C->product->data    = ptap;
1203   C->product->destroy = MatDestroy_MPIAIJ_MatMatMult;
1204 
1205   /* set MatInfo */
1206   afill = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1) + 1.e-5;
1207   if (afill < 1.0) afill = 1.0;
1208   C->info.mallocs           = nspacedouble;
1209   C->info.fill_ratio_given  = fill;
1210   C->info.fill_ratio_needed = afill;
1211 
1212 #if defined(PETSC_USE_INFO)
1213   if (api[am]) {
1214     PetscCall(PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill));
1215     PetscCall(PetscInfo(C,"Use MatMatMult(A,B,MatReuse,%g,&C) for best performance.;\n",(double)afill));
1216   } else {
1217     PetscCall(PetscInfo(C,"Empty matrix product\n"));
1218   }
1219 #endif
1220 
1221   PetscCall(MatDestroy(&aopoth));
1222   PetscCall(MatDestroy(&adpd));
1223   PetscCall(PetscFree(j_temp));
1224   PetscCall(PetscFree(adpoj));
1225   PetscCall(PetscFree(adpoi));
1226   PetscFunctionReturn(0);
1227 }
1228 
1229 /*-------------------------------------------------------------------------*/
1230 /* This routine only works when scall=MAT_REUSE_MATRIX! */
1231 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult(Mat P,Mat A,Mat C)
1232 {
1233   Mat_APMPI      *ptap;
1234   Mat            Pt;
1235 
1236   PetscFunctionBegin;
1237   MatCheckProduct(C,3);
1238   ptap = (Mat_APMPI*)C->product->data;
1239   PetscCheck(ptap,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1240   PetscCheck(ptap->Pt,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1241 
1242   Pt   = ptap->Pt;
1243   PetscCall(MatTranspose(P,MAT_REUSE_MATRIX,&Pt));
1244   PetscCall(MatMatMultNumeric_MPIAIJ_MPIAIJ(Pt,A,C));
1245   PetscFunctionReturn(0);
1246 }
1247 
1248 /* This routine is modified from MatPtAPSymbolic_MPIAIJ_MPIAIJ() */
1249 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,PetscReal fill,Mat C)
1250 {
1251   Mat_APMPI           *ptap;
1252   Mat_MPIAIJ          *p=(Mat_MPIAIJ*)P->data;
1253   MPI_Comm            comm;
1254   PetscMPIInt         size,rank;
1255   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
1256   PetscInt            pn=P->cmap->n,aN=A->cmap->N,an=A->cmap->n;
1257   PetscInt            *lnk,i,k,nsend,rstart;
1258   PetscBT             lnkbt;
1259   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,nrecv;
1260   PETSC_UNUSED PetscMPIInt icompleted=0;
1261   PetscInt            **buf_rj,**buf_ri,**buf_ri_k,row,ncols,*cols;
1262   PetscInt            len,proc,*dnz,*onz,*owners,nzi;
1263   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1264   MPI_Request         *swaits,*rwaits;
1265   MPI_Status          *sstatus,rstatus;
1266   PetscLayout         rowmap;
1267   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
1268   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
1269   PetscInt            *Jptr,*prmap=p->garray,con,j,Crmax;
1270   Mat_SeqAIJ          *a_loc,*c_loc,*c_oth;
1271   PetscTable          ta;
1272   MatType             mtype;
1273   const char          *prefix;
1274 
1275   PetscFunctionBegin;
1276   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
1277   PetscCallMPI(MPI_Comm_size(comm,&size));
1278   PetscCallMPI(MPI_Comm_rank(comm,&rank));
1279 
1280   /* create symbolic parallel matrix C */
1281   PetscCall(MatGetType(A,&mtype));
1282   PetscCall(MatSetType(C,mtype));
1283 
1284   C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable;
1285 
1286   /* create struct Mat_APMPI and attached it to C later */
1287   PetscCall(PetscNew(&ptap));
1288   ptap->reuse = MAT_INITIAL_MATRIX;
1289 
1290   /* (0) compute Rd = Pd^T, Ro = Po^T  */
1291   /* --------------------------------- */
1292   PetscCall(MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd));
1293   PetscCall(MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro));
1294 
1295   /* (1) compute symbolic A_loc */
1296   /* ---------------------------*/
1297   PetscCall(MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&ptap->A_loc));
1298 
1299   /* (2-1) compute symbolic C_oth = Ro*A_loc  */
1300   /* ------------------------------------ */
1301   PetscCall(MatGetOptionsPrefix(A,&prefix));
1302   PetscCall(MatSetOptionsPrefix(ptap->Ro,prefix));
1303   PetscCall(MatAppendOptionsPrefix(ptap->Ro,"inner_offdiag_"));
1304   PetscCall(MatCreate(PETSC_COMM_SELF,&ptap->C_oth));
1305   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->A_loc,fill,ptap->C_oth));
1306 
1307   /* (3) send coj of C_oth to other processors  */
1308   /* ------------------------------------------ */
1309   /* determine row ownership */
1310   PetscCall(PetscLayoutCreate(comm,&rowmap));
1311   rowmap->n  = pn;
1312   rowmap->bs = 1;
1313   PetscCall(PetscLayoutSetUp(rowmap));
1314   owners = rowmap->range;
1315 
1316   /* determine the number of messages to send, their lengths */
1317   PetscCall(PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co));
1318   PetscCall(PetscArrayzero(len_s,size));
1319   PetscCall(PetscArrayzero(len_si,size));
1320 
1321   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
1322   coi   = c_oth->i; coj = c_oth->j;
1323   con   = ptap->C_oth->rmap->n;
1324   proc  = 0;
1325   for (i=0; i<con; i++) {
1326     while (prmap[i] >= owners[proc+1]) proc++;
1327     len_si[proc]++;               /* num of rows in Co(=Pt*A) to be sent to [proc] */
1328     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
1329   }
1330 
1331   len          = 0; /* max length of buf_si[], see (4) */
1332   owners_co[0] = 0;
1333   nsend        = 0;
1334   for (proc=0; proc<size; proc++) {
1335     owners_co[proc+1] = owners_co[proc] + len_si[proc];
1336     if (len_s[proc]) {
1337       nsend++;
1338       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
1339       len         += len_si[proc];
1340     }
1341   }
1342 
1343   /* determine the number and length of messages to receive for coi and coj  */
1344   PetscCall(PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv));
1345   PetscCall(PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri));
1346 
1347   /* post the Irecv and Isend of coj */
1348   PetscCall(PetscCommGetNewTag(comm,&tagj));
1349   PetscCall(PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits));
1350   PetscCall(PetscMalloc1(nsend+1,&swaits));
1351   for (proc=0, k=0; proc<size; proc++) {
1352     if (!len_s[proc]) continue;
1353     i    = owners_co[proc];
1354     PetscCallMPI(MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k));
1355     k++;
1356   }
1357 
1358   /* (2-2) compute symbolic C_loc = Rd*A_loc */
1359   /* ---------------------------------------- */
1360   PetscCall(MatSetOptionsPrefix(ptap->Rd,prefix));
1361   PetscCall(MatAppendOptionsPrefix(ptap->Rd,"inner_diag_"));
1362   PetscCall(MatCreate(PETSC_COMM_SELF,&ptap->C_loc));
1363   PetscCall(MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->A_loc,fill,ptap->C_loc));
1364   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
1365 
1366   /* receives coj are complete */
1367   for (i=0; i<nrecv; i++) {
1368     PetscCallMPI(MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus));
1369   }
1370   PetscCall(PetscFree(rwaits));
1371   if (nsend) PetscCallMPI(MPI_Waitall(nsend,swaits,sstatus));
1372 
1373   /* add received column indices into ta to update Crmax */
1374   a_loc = (Mat_SeqAIJ*)(ptap->A_loc)->data;
1375 
1376   /* create and initialize a linked list */
1377   PetscCall(PetscTableCreate(an,aN,&ta)); /* for compute Crmax */
1378   MatRowMergeMax_SeqAIJ(a_loc,ptap->A_loc->rmap->N,ta);
1379 
1380   for (k=0; k<nrecv; k++) {/* k-th received message */
1381     Jptr = buf_rj[k];
1382     for (j=0; j<len_r[k]; j++) {
1383       PetscCall(PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES));
1384     }
1385   }
1386   PetscCall(PetscTableGetCount(ta,&Crmax));
1387   PetscCall(PetscTableDestroy(&ta));
1388 
1389   /* (4) send and recv coi */
1390   /*-----------------------*/
1391   PetscCall(PetscCommGetNewTag(comm,&tagi));
1392   PetscCall(PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits));
1393   PetscCall(PetscMalloc1(len+1,&buf_s));
1394   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
1395   for (proc=0,k=0; proc<size; proc++) {
1396     if (!len_s[proc]) continue;
1397     /* form outgoing message for i-structure:
1398          buf_si[0]:                 nrows to be sent
1399                [1:nrows]:           row index (global)
1400                [nrows+1:2*nrows+1]: i-structure index
1401     */
1402     /*-------------------------------------------*/
1403     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
1404     buf_si_i    = buf_si + nrows+1;
1405     buf_si[0]   = nrows;
1406     buf_si_i[0] = 0;
1407     nrows       = 0;
1408     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1409       nzi = coi[i+1] - coi[i];
1410       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
1411       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
1412       nrows++;
1413     }
1414     PetscCallMPI(MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k));
1415     k++;
1416     buf_si += len_si[proc];
1417   }
1418   for (i=0; i<nrecv; i++) {
1419     PetscCallMPI(MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus));
1420   }
1421   PetscCall(PetscFree(rwaits));
1422   if (nsend) PetscCallMPI(MPI_Waitall(nsend,swaits,sstatus));
1423 
1424   PetscCall(PetscFree4(len_s,len_si,sstatus,owners_co));
1425   PetscCall(PetscFree(len_ri));
1426   PetscCall(PetscFree(swaits));
1427   PetscCall(PetscFree(buf_s));
1428 
1429   /* (5) compute the local portion of C      */
1430   /* ------------------------------------------ */
1431   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of C */
1432   PetscCall(PetscFreeSpaceGet(Crmax,&free_space));
1433   current_space = free_space;
1434 
1435   PetscCall(PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci));
1436   for (k=0; k<nrecv; k++) {
1437     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1438     nrows       = *buf_ri_k[k];
1439     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
1440     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1441   }
1442 
1443   MatPreallocateBegin(comm,pn,an,dnz,onz);
1444   PetscCall(PetscLLCondensedCreate(Crmax,aN,&lnk,&lnkbt));
1445   for (i=0; i<pn; i++) { /* for each local row of C */
1446     /* add C_loc into C */
1447     nzi  = c_loc->i[i+1] - c_loc->i[i];
1448     Jptr = c_loc->j + c_loc->i[i];
1449     PetscCall(PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt));
1450 
1451     /* add received col data into lnk */
1452     for (k=0; k<nrecv; k++) { /* k-th received message */
1453       if (i == *nextrow[k]) { /* i-th row */
1454         nzi  = *(nextci[k]+1) - *nextci[k];
1455         Jptr = buf_rj[k] + *nextci[k];
1456         PetscCall(PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt));
1457         nextrow[k]++; nextci[k]++;
1458       }
1459     }
1460 
1461     /* add missing diagonal entry */
1462     if (C->force_diagonals) {
1463       k = i + owners[rank]; /* column index */
1464       PetscCall(PetscLLCondensedAddSorted(1,&k,lnk,lnkbt));
1465     }
1466 
1467     nzi = lnk[0];
1468 
1469     /* copy data into free space, then initialize lnk */
1470     PetscCall(PetscLLCondensedClean(aN,nzi,current_space->array,lnk,lnkbt));
1471     PetscCall(MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz));
1472   }
1473   PetscCall(PetscFree3(buf_ri_k,nextrow,nextci));
1474   PetscCall(PetscLLDestroy(lnk,lnkbt));
1475   PetscCall(PetscFreeSpaceDestroy(free_space));
1476 
1477   /* local sizes and preallocation */
1478   PetscCall(MatSetSizes(C,pn,an,PETSC_DETERMINE,PETSC_DETERMINE));
1479   if (P->cmap->bs > 0) PetscCall(PetscLayoutSetBlockSize(C->rmap,P->cmap->bs));
1480   if (A->cmap->bs > 0) PetscCall(PetscLayoutSetBlockSize(C->cmap,A->cmap->bs));
1481   PetscCall(MatMPIAIJSetPreallocation(C,0,dnz,0,onz));
1482   MatPreallocateEnd(dnz,onz);
1483 
1484   /* add C_loc and C_oth to C */
1485   PetscCall(MatGetOwnershipRange(C,&rstart,NULL));
1486   for (i=0; i<pn; i++) {
1487     ncols = c_loc->i[i+1] - c_loc->i[i];
1488     cols  = c_loc->j + c_loc->i[i];
1489     row   = rstart + i;
1490     PetscCall(MatSetValues(C,1,(const PetscInt*)&row,ncols,(const PetscInt*)cols,NULL,INSERT_VALUES));
1491 
1492     if (C->force_diagonals) {
1493       PetscCall(MatSetValues(C,1,(const PetscInt*)&row,1,(const PetscInt*)&row,NULL,INSERT_VALUES));
1494     }
1495   }
1496   for (i=0; i<con; i++) {
1497     ncols = c_oth->i[i+1] - c_oth->i[i];
1498     cols  = c_oth->j + c_oth->i[i];
1499     row   = prmap[i];
1500     PetscCall(MatSetValues(C,1,(const PetscInt*)&row,ncols,(const PetscInt*)cols,NULL,INSERT_VALUES));
1501   }
1502   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
1503   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
1504   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
1505 
1506   /* members in merge */
1507   PetscCall(PetscFree(id_r));
1508   PetscCall(PetscFree(len_r));
1509   PetscCall(PetscFree(buf_ri[0]));
1510   PetscCall(PetscFree(buf_ri));
1511   PetscCall(PetscFree(buf_rj[0]));
1512   PetscCall(PetscFree(buf_rj));
1513   PetscCall(PetscLayoutDestroy(&rowmap));
1514 
1515   /* attach the supporting struct to C for reuse */
1516   C->product->data    = ptap;
1517   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
1518   PetscFunctionReturn(0);
1519 }
1520 
1521 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_nonscalable(Mat P,Mat A,Mat C)
1522 {
1523   Mat_MPIAIJ        *p=(Mat_MPIAIJ*)P->data;
1524   Mat_SeqAIJ        *c_seq;
1525   Mat_APMPI         *ptap;
1526   Mat               A_loc,C_loc,C_oth;
1527   PetscInt          i,rstart,rend,cm,ncols,row;
1528   const PetscInt    *cols;
1529   const PetscScalar *vals;
1530 
1531   PetscFunctionBegin;
1532   MatCheckProduct(C,3);
1533   ptap = (Mat_APMPI*)C->product->data;
1534   PetscCheck(ptap,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1535   PetscCheck(ptap->A_loc,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1536   PetscCall(MatZeroEntries(C));
1537 
1538   if (ptap->reuse == MAT_REUSE_MATRIX) {
1539     /* These matrices are obtained in MatTransposeMatMultSymbolic() */
1540     /* 1) get R = Pd^T, Ro = Po^T */
1541     /*----------------------------*/
1542     PetscCall(MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd));
1543     PetscCall(MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro));
1544 
1545     /* 2) compute numeric A_loc */
1546     /*--------------------------*/
1547     PetscCall(MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&ptap->A_loc));
1548   }
1549 
1550   /* 3) C_loc = Rd*A_loc, C_oth = Ro*A_loc */
1551   A_loc = ptap->A_loc;
1552   PetscCall(((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,A_loc,ptap->C_loc));
1553   PetscCall(((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,A_loc,ptap->C_oth));
1554   C_loc = ptap->C_loc;
1555   C_oth = ptap->C_oth;
1556 
1557   /* add C_loc and C_oth to C */
1558   PetscCall(MatGetOwnershipRange(C,&rstart,&rend));
1559 
1560   /* C_loc -> C */
1561   cm    = C_loc->rmap->N;
1562   c_seq = (Mat_SeqAIJ*)C_loc->data;
1563   cols = c_seq->j;
1564   vals = c_seq->a;
1565   for (i=0; i<cm; i++) {
1566     ncols = c_seq->i[i+1] - c_seq->i[i];
1567     row = rstart + i;
1568     PetscCall(MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES));
1569     cols += ncols; vals += ncols;
1570   }
1571 
1572   /* Co -> C, off-processor part */
1573   cm    = C_oth->rmap->N;
1574   c_seq = (Mat_SeqAIJ*)C_oth->data;
1575   cols  = c_seq->j;
1576   vals  = c_seq->a;
1577   for (i=0; i<cm; i++) {
1578     ncols = c_seq->i[i+1] - c_seq->i[i];
1579     row = p->garray[i];
1580     PetscCall(MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES));
1581     cols += ncols; vals += ncols;
1582   }
1583   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
1584   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
1585   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
1586 
1587   ptap->reuse = MAT_REUSE_MATRIX;
1588   PetscFunctionReturn(0);
1589 }
1590 
1591 PetscErrorCode MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ(Mat P,Mat A,Mat C)
1592 {
1593   Mat_Merge_SeqsToMPI *merge;
1594   Mat_MPIAIJ          *p =(Mat_MPIAIJ*)P->data;
1595   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1596   Mat_APMPI           *ptap;
1597   PetscInt            *adj;
1598   PetscInt            i,j,k,anz,pnz,row,*cj,nexta;
1599   MatScalar           *ada,*ca,valtmp;
1600   PetscInt            am=A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1601   MPI_Comm            comm;
1602   PetscMPIInt         size,rank,taga,*len_s;
1603   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1604   PetscInt            **buf_ri,**buf_rj;
1605   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
1606   MPI_Request         *s_waits,*r_waits;
1607   MPI_Status          *status;
1608   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
1609   const PetscScalar   *dummy;
1610   PetscInt            *ai,*aj,*coi,*coj,*poJ,*pdJ;
1611   Mat                 A_loc;
1612   Mat_SeqAIJ          *a_loc;
1613 
1614   PetscFunctionBegin;
1615   MatCheckProduct(C,3);
1616   ptap = (Mat_APMPI*)C->product->data;
1617   PetscCheck(ptap,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be computed. Missing data");
1618   PetscCheck(ptap->A_loc,PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_WRONGSTATE,"PtA cannot be reused. Do not call MatProductClear()");
1619   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
1620   PetscCallMPI(MPI_Comm_size(comm,&size));
1621   PetscCallMPI(MPI_Comm_rank(comm,&rank));
1622 
1623   merge = ptap->merge;
1624 
1625   /* 2) compute numeric C_seq = P_loc^T*A_loc */
1626   /*------------------------------------------*/
1627   /* get data from symbolic products */
1628   coi    = merge->coi; coj = merge->coj;
1629   PetscCall(PetscCalloc1(coi[pon]+1,&coa));
1630   bi     = merge->bi; bj = merge->bj;
1631   owners = merge->rowmap->range;
1632   PetscCall(PetscCalloc1(bi[cm]+1,&ba));
1633 
1634   /* get A_loc by taking all local rows of A */
1635   A_loc = ptap->A_loc;
1636   PetscCall(MatMPIAIJGetLocalMat(A,MAT_REUSE_MATRIX,&A_loc));
1637   a_loc = (Mat_SeqAIJ*)(A_loc)->data;
1638   ai    = a_loc->i;
1639   aj    = a_loc->j;
1640 
1641   /* trigger copy to CPU */
1642   PetscCall(MatSeqAIJGetArrayRead(p->A,&dummy));
1643   PetscCall(MatSeqAIJRestoreArrayRead(p->A,&dummy));
1644   PetscCall(MatSeqAIJGetArrayRead(p->B,&dummy));
1645   PetscCall(MatSeqAIJRestoreArrayRead(p->B,&dummy));
1646   for (i=0; i<am; i++) {
1647     anz = ai[i+1] - ai[i];
1648     adj = aj + ai[i];
1649     ada = a_loc->a + ai[i];
1650 
1651     /* 2-b) Compute Cseq = P_loc[i,:]^T*A[i,:] using outer product */
1652     /*-------------------------------------------------------------*/
1653     /* put the value into Co=(p->B)^T*A (off-diagonal part, send to others) */
1654     pnz = po->i[i+1] - po->i[i];
1655     poJ = po->j + po->i[i];
1656     pA  = po->a + po->i[i];
1657     for (j=0; j<pnz; j++) {
1658       row = poJ[j];
1659       cj  = coj + coi[row];
1660       ca  = coa + coi[row];
1661       /* perform sparse axpy */
1662       nexta  = 0;
1663       valtmp = pA[j];
1664       for (k=0; nexta<anz; k++) {
1665         if (cj[k] == adj[nexta]) {
1666           ca[k] += valtmp*ada[nexta];
1667           nexta++;
1668         }
1669       }
1670       PetscCall(PetscLogFlops(2.0*anz));
1671     }
1672 
1673     /* put the value into Cd (diagonal part) */
1674     pnz = pd->i[i+1] - pd->i[i];
1675     pdJ = pd->j + pd->i[i];
1676     pA  = pd->a + pd->i[i];
1677     for (j=0; j<pnz; j++) {
1678       row = pdJ[j];
1679       cj  = bj + bi[row];
1680       ca  = ba + bi[row];
1681       /* perform sparse axpy */
1682       nexta  = 0;
1683       valtmp = pA[j];
1684       for (k=0; nexta<anz; k++) {
1685         if (cj[k] == adj[nexta]) {
1686           ca[k] += valtmp*ada[nexta];
1687           nexta++;
1688         }
1689       }
1690       PetscCall(PetscLogFlops(2.0*anz));
1691     }
1692   }
1693 
1694   /* 3) send and recv matrix values coa */
1695   /*------------------------------------*/
1696   buf_ri = merge->buf_ri;
1697   buf_rj = merge->buf_rj;
1698   len_s  = merge->len_s;
1699   PetscCall(PetscCommGetNewTag(comm,&taga));
1700   PetscCall(PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits));
1701 
1702   PetscCall(PetscMalloc2(merge->nsend+1,&s_waits,size,&status));
1703   for (proc=0,k=0; proc<size; proc++) {
1704     if (!len_s[proc]) continue;
1705     i    = merge->owners_co[proc];
1706     PetscCallMPI(MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k));
1707     k++;
1708   }
1709   if (merge->nrecv) PetscCallMPI(MPI_Waitall(merge->nrecv,r_waits,status));
1710   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend,s_waits,status));
1711 
1712   PetscCall(PetscFree2(s_waits,status));
1713   PetscCall(PetscFree(r_waits));
1714   PetscCall(PetscFree(coa));
1715 
1716   /* 4) insert local Cseq and received values into Cmpi */
1717   /*----------------------------------------------------*/
1718   PetscCall(PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci));
1719   for (k=0; k<merge->nrecv; k++) {
1720     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1721     nrows       = *(buf_ri_k[k]);
1722     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1723     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th recved i-structure  */
1724   }
1725 
1726   for (i=0; i<cm; i++) {
1727     row  = owners[rank] + i; /* global row index of C_seq */
1728     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1729     ba_i = ba + bi[i];
1730     bnz  = bi[i+1] - bi[i];
1731     /* add received vals into ba */
1732     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1733       /* i-th row */
1734       if (i == *nextrow[k]) {
1735         cnz    = *(nextci[k]+1) - *nextci[k];
1736         cj     = buf_rj[k] + *(nextci[k]);
1737         ca     = abuf_r[k] + *(nextci[k]);
1738         nextcj = 0;
1739         for (j=0; nextcj<cnz; j++) {
1740           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1741             ba_i[j] += ca[nextcj++];
1742           }
1743         }
1744         nextrow[k]++; nextci[k]++;
1745         PetscCall(PetscLogFlops(2.0*cnz));
1746       }
1747     }
1748     PetscCall(MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES));
1749   }
1750   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
1751   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
1752 
1753   PetscCall(PetscFree(ba));
1754   PetscCall(PetscFree(abuf_r[0]));
1755   PetscCall(PetscFree(abuf_r));
1756   PetscCall(PetscFree3(buf_ri_k,nextrow,nextci));
1757   PetscFunctionReturn(0);
1758 }
1759 
1760 PetscErrorCode MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(Mat P,Mat A,PetscReal fill,Mat C)
1761 {
1762   Mat                 A_loc;
1763   Mat_APMPI           *ptap;
1764   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
1765   Mat_MPIAIJ          *p=(Mat_MPIAIJ*)P->data,*a=(Mat_MPIAIJ*)A->data;
1766   PetscInt            *pdti,*pdtj,*poti,*potj,*ptJ;
1767   PetscInt            nnz;
1768   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
1769   PetscInt            am  =A->rmap->n,pn=P->cmap->n;
1770   MPI_Comm            comm;
1771   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
1772   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
1773   PetscInt            len,proc,*dnz,*onz,*owners;
1774   PetscInt            nzi,*bi,*bj;
1775   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
1776   MPI_Request         *swaits,*rwaits;
1777   MPI_Status          *sstatus,rstatus;
1778   Mat_Merge_SeqsToMPI *merge;
1779   PetscInt            *ai,*aj,*Jptr,anz,*prmap=p->garray,pon,nspacedouble=0,j;
1780   PetscReal           afill  =1.0,afill_tmp;
1781   PetscInt            rstart = P->cmap->rstart,rmax,aN=A->cmap->N,Armax;
1782   Mat_SeqAIJ          *a_loc;
1783   PetscTable          ta;
1784   MatType             mtype;
1785 
1786   PetscFunctionBegin;
1787   PetscCall(PetscObjectGetComm((PetscObject)A,&comm));
1788   /* check if matrix local sizes are compatible */
1789   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend,comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != P (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
1790 
1791   PetscCallMPI(MPI_Comm_size(comm,&size));
1792   PetscCallMPI(MPI_Comm_rank(comm,&rank));
1793 
1794   /* create struct Mat_APMPI and attached it to C later */
1795   PetscCall(PetscNew(&ptap));
1796 
1797   /* get A_loc by taking all local rows of A */
1798   PetscCall(MatMPIAIJGetLocalMat(A,MAT_INITIAL_MATRIX,&A_loc));
1799 
1800   ptap->A_loc = A_loc;
1801   a_loc       = (Mat_SeqAIJ*)(A_loc)->data;
1802   ai          = a_loc->i;
1803   aj          = a_loc->j;
1804 
1805   /* determine symbolic Co=(p->B)^T*A - send to others */
1806   /*----------------------------------------------------*/
1807   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj));
1808   PetscCall(MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj));
1809   pon = (p->B)->cmap->n; /* total num of rows to be sent to other processors
1810                          >= (num of nonzero rows of C_seq) - pn */
1811   PetscCall(PetscMalloc1(pon+1,&coi));
1812   coi[0] = 0;
1813 
1814   /* set initial free space to be fill*(nnz(p->B) + nnz(A)) */
1815   nnz           = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(poti[pon],ai[am]));
1816   PetscCall(PetscFreeSpaceGet(nnz,&free_space));
1817   current_space = free_space;
1818 
1819   /* create and initialize a linked list */
1820   PetscCall(PetscTableCreate(A->cmap->n + a->B->cmap->N,aN,&ta));
1821   MatRowMergeMax_SeqAIJ(a_loc,am,ta);
1822   PetscCall(PetscTableGetCount(ta,&Armax));
1823 
1824   PetscCall(PetscLLCondensedCreate_Scalable(Armax,&lnk));
1825 
1826   for (i=0; i<pon; i++) {
1827     pnz = poti[i+1] - poti[i];
1828     ptJ = potj + poti[i];
1829     for (j=0; j<pnz; j++) {
1830       row  = ptJ[j]; /* row of A_loc == col of Pot */
1831       anz  = ai[row+1] - ai[row];
1832       Jptr = aj + ai[row];
1833       /* add non-zero cols of AP into the sorted linked list lnk */
1834       PetscCall(PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk));
1835     }
1836     nnz = lnk[0];
1837 
1838     /* If free space is not available, double the total space in the list */
1839     if (current_space->local_remaining<nnz) {
1840       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),&current_space));
1841       nspacedouble++;
1842     }
1843 
1844     /* Copy data into free space, and zero out denserows */
1845     PetscCall(PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk));
1846 
1847     current_space->array           += nnz;
1848     current_space->local_used      += nnz;
1849     current_space->local_remaining -= nnz;
1850 
1851     coi[i+1] = coi[i] + nnz;
1852   }
1853 
1854   PetscCall(PetscMalloc1(coi[pon]+1,&coj));
1855   PetscCall(PetscFreeSpaceContiguous(&free_space,coj));
1856   PetscCall(PetscLLCondensedDestroy_Scalable(lnk)); /* must destroy to get a new one for C */
1857 
1858   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + ai[am]+1);
1859   if (afill_tmp > afill) afill = afill_tmp;
1860 
1861   /* send j-array (coj) of Co to other processors */
1862   /*----------------------------------------------*/
1863   /* determine row ownership */
1864   PetscCall(PetscNew(&merge));
1865   PetscCall(PetscLayoutCreate(comm,&merge->rowmap));
1866 
1867   merge->rowmap->n  = pn;
1868   merge->rowmap->bs = 1;
1869 
1870   PetscCall(PetscLayoutSetUp(merge->rowmap));
1871   owners = merge->rowmap->range;
1872 
1873   /* determine the number of messages to send, their lengths */
1874   PetscCall(PetscCalloc1(size,&len_si));
1875   PetscCall(PetscCalloc1(size,&merge->len_s));
1876 
1877   len_s        = merge->len_s;
1878   merge->nsend = 0;
1879 
1880   PetscCall(PetscMalloc1(size+2,&owners_co));
1881 
1882   proc = 0;
1883   for (i=0; i<pon; i++) {
1884     while (prmap[i] >= owners[proc+1]) proc++;
1885     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
1886     len_s[proc] += coi[i+1] - coi[i];
1887   }
1888 
1889   len          = 0; /* max length of buf_si[] */
1890   owners_co[0] = 0;
1891   for (proc=0; proc<size; proc++) {
1892     owners_co[proc+1] = owners_co[proc] + len_si[proc];
1893     if (len_si[proc]) {
1894       merge->nsend++;
1895       len_si[proc] = 2*(len_si[proc] + 1);
1896       len         += len_si[proc];
1897     }
1898   }
1899 
1900   /* determine the number and length of messages to receive for coi and coj  */
1901   PetscCall(PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv));
1902   PetscCall(PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri));
1903 
1904   /* post the Irecv and Isend of coj */
1905   PetscCall(PetscCommGetNewTag(comm,&tagj));
1906   PetscCall(PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits));
1907   PetscCall(PetscMalloc1(merge->nsend+1,&swaits));
1908   for (proc=0, k=0; proc<size; proc++) {
1909     if (!len_s[proc]) continue;
1910     i    = owners_co[proc];
1911     PetscCallMPI(MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k));
1912     k++;
1913   }
1914 
1915   /* receives and sends of coj are complete */
1916   PetscCall(PetscMalloc1(size,&sstatus));
1917   for (i=0; i<merge->nrecv; i++) {
1918     PETSC_UNUSED PetscMPIInt icompleted;
1919     PetscCallMPI(MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus));
1920   }
1921   PetscCall(PetscFree(rwaits));
1922   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend,swaits,sstatus));
1923 
1924   /* add received column indices into table to update Armax */
1925   /* Armax can be as large as aN if a P[row,:] is dense, see src/ksp/ksp/tutorials/ex56.c! */
1926   for (k=0; k<merge->nrecv; k++) {/* k-th received message */
1927     Jptr = buf_rj[k];
1928     for (j=0; j<merge->len_r[k]; j++) {
1929       PetscCall(PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES));
1930     }
1931   }
1932   PetscCall(PetscTableGetCount(ta,&Armax));
1933 
1934   /* send and recv coi */
1935   /*-------------------*/
1936   PetscCall(PetscCommGetNewTag(comm,&tagi));
1937   PetscCall(PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits));
1938   PetscCall(PetscMalloc1(len+1,&buf_s));
1939   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
1940   for (proc=0,k=0; proc<size; proc++) {
1941     if (!len_s[proc]) continue;
1942     /* form outgoing message for i-structure:
1943          buf_si[0]:                 nrows to be sent
1944                [1:nrows]:           row index (global)
1945                [nrows+1:2*nrows+1]: i-structure index
1946     */
1947     /*-------------------------------------------*/
1948     nrows       = len_si[proc]/2 - 1;
1949     buf_si_i    = buf_si + nrows+1;
1950     buf_si[0]   = nrows;
1951     buf_si_i[0] = 0;
1952     nrows       = 0;
1953     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
1954       nzi               = coi[i+1] - coi[i];
1955       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
1956       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
1957       nrows++;
1958     }
1959     PetscCallMPI(MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k));
1960     k++;
1961     buf_si += len_si[proc];
1962   }
1963   i = merge->nrecv;
1964   while (i--) {
1965     PETSC_UNUSED PetscMPIInt icompleted;
1966     PetscCallMPI(MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus));
1967   }
1968   PetscCall(PetscFree(rwaits));
1969   if (merge->nsend) PetscCallMPI(MPI_Waitall(merge->nsend,swaits,sstatus));
1970   PetscCall(PetscFree(len_si));
1971   PetscCall(PetscFree(len_ri));
1972   PetscCall(PetscFree(swaits));
1973   PetscCall(PetscFree(sstatus));
1974   PetscCall(PetscFree(buf_s));
1975 
1976   /* compute the local portion of C (mpi mat) */
1977   /*------------------------------------------*/
1978   /* allocate bi array and free space for accumulating nonzero column info */
1979   PetscCall(PetscMalloc1(pn+1,&bi));
1980   bi[0] = 0;
1981 
1982   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
1983   nnz           = PetscRealIntMultTruncate(fill,PetscIntSumTruncate(pdti[pn],PetscIntSumTruncate(poti[pon],ai[am])));
1984   PetscCall(PetscFreeSpaceGet(nnz,&free_space));
1985   current_space = free_space;
1986 
1987   PetscCall(PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci));
1988   for (k=0; k<merge->nrecv; k++) {
1989     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1990     nrows       = *buf_ri_k[k];
1991     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
1992     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* points to the next i-structure of k-th received i-structure  */
1993   }
1994 
1995   PetscCall(PetscLLCondensedCreate_Scalable(Armax,&lnk));
1996   MatPreallocateBegin(comm,pn,A->cmap->n,dnz,onz);
1997   rmax = 0;
1998   for (i=0; i<pn; i++) {
1999     /* add pdt[i,:]*AP into lnk */
2000     pnz = pdti[i+1] - pdti[i];
2001     ptJ = pdtj + pdti[i];
2002     for (j=0; j<pnz; j++) {
2003       row  = ptJ[j];  /* row of AP == col of Pt */
2004       anz  = ai[row+1] - ai[row];
2005       Jptr = aj + ai[row];
2006       /* add non-zero cols of AP into the sorted linked list lnk */
2007       PetscCall(PetscLLCondensedAddSorted_Scalable(anz,Jptr,lnk));
2008     }
2009 
2010     /* add received col data into lnk */
2011     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
2012       if (i == *nextrow[k]) { /* i-th row */
2013         nzi  = *(nextci[k]+1) - *nextci[k];
2014         Jptr = buf_rj[k] + *nextci[k];
2015         PetscCall(PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk));
2016         nextrow[k]++; nextci[k]++;
2017       }
2018     }
2019 
2020     /* add missing diagonal entry */
2021     if (C->force_diagonals) {
2022       k = i + owners[rank]; /* column index */
2023       PetscCall(PetscLLCondensedAddSorted_Scalable(1,&k,lnk));
2024     }
2025 
2026     nnz = lnk[0];
2027 
2028     /* if free space is not available, make more free space */
2029     if (current_space->local_remaining<nnz) {
2030       PetscCall(PetscFreeSpaceGet(PetscIntSumTruncate(nnz,current_space->total_array_size),&current_space));
2031       nspacedouble++;
2032     }
2033     /* copy data into free space, then initialize lnk */
2034     PetscCall(PetscLLCondensedClean_Scalable(nnz,current_space->array,lnk));
2035     PetscCall(MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz));
2036 
2037     current_space->array           += nnz;
2038     current_space->local_used      += nnz;
2039     current_space->local_remaining -= nnz;
2040 
2041     bi[i+1] = bi[i] + nnz;
2042     if (nnz > rmax) rmax = nnz;
2043   }
2044   PetscCall(PetscFree3(buf_ri_k,nextrow,nextci));
2045 
2046   PetscCall(PetscMalloc1(bi[pn]+1,&bj));
2047   PetscCall(PetscFreeSpaceContiguous(&free_space,bj));
2048   afill_tmp = (PetscReal)bi[pn]/(pdti[pn] + poti[pon] + ai[am]+1);
2049   if (afill_tmp > afill) afill = afill_tmp;
2050   PetscCall(PetscLLCondensedDestroy_Scalable(lnk));
2051   PetscCall(PetscTableDestroy(&ta));
2052   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj));
2053   PetscCall(MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj));
2054 
2055   /* create symbolic parallel matrix C - why cannot be assembled in Numeric part   */
2056   /*-------------------------------------------------------------------------------*/
2057   PetscCall(MatSetSizes(C,pn,A->cmap->n,PETSC_DETERMINE,PETSC_DETERMINE));
2058   PetscCall(MatSetBlockSizes(C,PetscAbs(P->cmap->bs),PetscAbs(A->cmap->bs)));
2059   PetscCall(MatGetType(A,&mtype));
2060   PetscCall(MatSetType(C,mtype));
2061   PetscCall(MatMPIAIJSetPreallocation(C,0,dnz,0,onz));
2062   MatPreallocateEnd(dnz,onz);
2063   PetscCall(MatSetBlockSize(C,1));
2064   PetscCall(MatSetOption(C,MAT_NO_OFF_PROC_ENTRIES,PETSC_TRUE));
2065   for (i=0; i<pn; i++) {
2066     row  = i + rstart;
2067     nnz  = bi[i+1] - bi[i];
2068     Jptr = bj + bi[i];
2069     PetscCall(MatSetValues(C,1,&row,nnz,Jptr,NULL,INSERT_VALUES));
2070   }
2071   PetscCall(MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY));
2072   PetscCall(MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY));
2073   PetscCall(MatSetOption(C,MAT_NEW_NONZERO_LOCATION_ERR,PETSC_TRUE));
2074   merge->bi        = bi;
2075   merge->bj        = bj;
2076   merge->coi       = coi;
2077   merge->coj       = coj;
2078   merge->buf_ri    = buf_ri;
2079   merge->buf_rj    = buf_rj;
2080   merge->owners_co = owners_co;
2081 
2082   /* attach the supporting struct to C for reuse */
2083   C->product->data    = ptap;
2084   C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2085   ptap->merge         = merge;
2086 
2087   C->ops->mattransposemultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ;
2088 
2089 #if defined(PETSC_USE_INFO)
2090   if (bi[pn] != 0) {
2091     PetscCall(PetscInfo(C,"Reallocs %" PetscInt_FMT "; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill));
2092     PetscCall(PetscInfo(C,"Use MatTransposeMatMult(A,B,MatReuse,%g,&C) for best performance.\n",(double)afill));
2093   } else {
2094     PetscCall(PetscInfo(C,"Empty matrix product\n"));
2095   }
2096 #endif
2097   PetscFunctionReturn(0);
2098 }
2099 
2100 /* ---------------------------------------------------------------- */
2101 static PetscErrorCode MatProductSymbolic_AtB_MPIAIJ_MPIAIJ(Mat C)
2102 {
2103   Mat_Product    *product = C->product;
2104   Mat            A=product->A,B=product->B;
2105   PetscReal      fill=product->fill;
2106   PetscBool      flg;
2107 
2108   PetscFunctionBegin;
2109   /* scalable */
2110   PetscCall(PetscStrcmp(product->alg,"scalable",&flg));
2111   if (flg) {
2112     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ(A,B,fill,C));
2113     goto next;
2114   }
2115 
2116   /* nonscalable */
2117   PetscCall(PetscStrcmp(product->alg,"nonscalable",&flg));
2118   if (flg) {
2119     PetscCall(MatTransposeMatMultSymbolic_MPIAIJ_MPIAIJ_nonscalable(A,B,fill,C));
2120     goto next;
2121   }
2122 
2123   /* matmatmult */
2124   PetscCall(PetscStrcmp(product->alg,"at*b",&flg));
2125   if (flg) {
2126     Mat       At;
2127     Mat_APMPI *ptap;
2128 
2129     PetscCall(MatTranspose(A,MAT_INITIAL_MATRIX,&At));
2130     PetscCall(MatMatMultSymbolic_MPIAIJ_MPIAIJ(At,B,fill,C));
2131     ptap = (Mat_APMPI*)C->product->data;
2132     if (ptap) {
2133       ptap->Pt = At;
2134       C->product->destroy = MatDestroy_MPIAIJ_PtAP;
2135     }
2136     C->ops->transposematmultnumeric = MatTransposeMatMultNumeric_MPIAIJ_MPIAIJ_matmatmult;
2137     goto next;
2138   }
2139 
2140   /* backend general code */
2141   PetscCall(PetscStrcmp(product->alg,"backend",&flg));
2142   if (flg) {
2143     PetscCall(MatProductSymbolic_MPIAIJBACKEND(C));
2144     PetscFunctionReturn(0);
2145   }
2146 
2147   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"MatProduct type is not supported");
2148 
2149 next:
2150   C->ops->productnumeric = MatProductNumeric_AtB;
2151   PetscFunctionReturn(0);
2152 }
2153 
2154 /* ---------------------------------------------------------------- */
2155 /* Set options for MatMatMultxxx_MPIAIJ_MPIAIJ */
2156 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AB(Mat C)
2157 {
2158   Mat_Product    *product = C->product;
2159   Mat            A=product->A,B=product->B;
2160 #if defined(PETSC_HAVE_HYPRE)
2161   const char     *algTypes[5] = {"scalable","nonscalable","seqmpi","backend","hypre"};
2162   PetscInt       nalg = 5;
2163 #else
2164   const char     *algTypes[4] = {"scalable","nonscalable","seqmpi","backend",};
2165   PetscInt       nalg = 4;
2166 #endif
2167   PetscInt       alg = 1; /* set nonscalable algorithm as default */
2168   PetscBool      flg;
2169   MPI_Comm       comm;
2170 
2171   PetscFunctionBegin;
2172   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
2173 
2174   /* Set "nonscalable" as default algorithm */
2175   PetscCall(PetscStrcmp(C->product->alg,"default",&flg));
2176   if (flg) {
2177     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2178 
2179     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2180     if (B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2181       MatInfo     Ainfo,Binfo;
2182       PetscInt    nz_local;
2183       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
2184 
2185       PetscCall(MatGetInfo(A,MAT_LOCAL,&Ainfo));
2186       PetscCall(MatGetInfo(B,MAT_LOCAL,&Binfo));
2187       nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2188 
2189       if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2190       PetscCall(MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm));
2191 
2192       if (alg_scalable) {
2193         alg  = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2194         PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2195         PetscCall(PetscInfo(B,"Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n",B->cmap->N,(double)(product->fill*nz_local)));
2196       }
2197     }
2198   }
2199 
2200   /* Get runtime option */
2201   if (product->api_user) {
2202     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMult","Mat");
2203     PetscCall(PetscOptionsEList("-matmatmult_via","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg));
2204     PetscOptionsEnd();
2205   } else {
2206     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AB","Mat");
2207     PetscCall(PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg));
2208     PetscOptionsEnd();
2209   }
2210   if (flg) {
2211     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2212   }
2213 
2214   C->ops->productsymbolic = MatProductSymbolic_AB_MPIAIJ_MPIAIJ;
2215   PetscFunctionReturn(0);
2216 }
2217 
2218 /* Set options for MatTransposeMatMultXXX_MPIAIJ_MPIAIJ */
2219 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_AtB(Mat C)
2220 {
2221   Mat_Product    *product = C->product;
2222   Mat            A=product->A,B=product->B;
2223   const char     *algTypes[4] = {"scalable","nonscalable","at*b","backend"};
2224   PetscInt       nalg = 4;
2225   PetscInt       alg = 1; /* set default algorithm  */
2226   PetscBool      flg;
2227   MPI_Comm       comm;
2228 
2229   PetscFunctionBegin;
2230   /* Check matrix local sizes */
2231   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
2232   PetscCheck(A->rmap->rstart == B->rmap->rstart && A->rmap->rend == B->rmap->rend,PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A (%" PetscInt_FMT ", %" PetscInt_FMT ") != B (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->rstart,A->rmap->rend,B->rmap->rstart,B->rmap->rend);
2233 
2234   /* Set default algorithm */
2235   PetscCall(PetscStrcmp(C->product->alg,"default",&flg));
2236   if (flg) {
2237     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2238   }
2239 
2240   /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2241   if (alg && B->cmap->N > 100000) { /* may switch to scalable algorithm as default */
2242     MatInfo     Ainfo,Binfo;
2243     PetscInt    nz_local;
2244     PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
2245 
2246     PetscCall(MatGetInfo(A,MAT_LOCAL,&Ainfo));
2247     PetscCall(MatGetInfo(B,MAT_LOCAL,&Binfo));
2248     nz_local = (PetscInt)(Ainfo.nz_allocated + Binfo.nz_allocated);
2249 
2250     if (B->cmap->N > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2251     PetscCall(MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm));
2252 
2253     if (alg_scalable) {
2254       alg  = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2255       PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2256       PetscCall(PetscInfo(B,"Use scalable algorithm, BN %" PetscInt_FMT ", fill*nz_allocated %g\n",B->cmap->N,(double)(product->fill*nz_local)));
2257     }
2258   }
2259 
2260   /* Get runtime option */
2261   if (product->api_user) {
2262     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatTransposeMatMult","Mat");
2263     PetscCall(PetscOptionsEList("-mattransposematmult_via","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg));
2264     PetscOptionsEnd();
2265   } else {
2266     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_AtB","Mat");
2267     PetscCall(PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatTransposeMatMult",algTypes,nalg,algTypes[alg],&alg,&flg));
2268     PetscOptionsEnd();
2269   }
2270   if (flg) {
2271     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2272   }
2273 
2274   C->ops->productsymbolic = MatProductSymbolic_AtB_MPIAIJ_MPIAIJ;
2275   PetscFunctionReturn(0);
2276 }
2277 
2278 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_PtAP(Mat C)
2279 {
2280   Mat_Product    *product = C->product;
2281   Mat            A=product->A,P=product->B;
2282   MPI_Comm       comm;
2283   PetscBool      flg;
2284   PetscInt       alg=1; /* set default algorithm */
2285 #if !defined(PETSC_HAVE_HYPRE)
2286   const char     *algTypes[5] = {"scalable","nonscalable","allatonce","allatonce_merged","backend"};
2287   PetscInt       nalg=5;
2288 #else
2289   const char     *algTypes[6] = {"scalable","nonscalable","allatonce","allatonce_merged","backend","hypre"};
2290   PetscInt       nalg=6;
2291 #endif
2292   PetscInt       pN=P->cmap->N;
2293 
2294   PetscFunctionBegin;
2295   /* Check matrix local sizes */
2296   PetscCall(PetscObjectGetComm((PetscObject)C,&comm));
2297   PetscCheck(A->rmap->rstart == P->rmap->rstart && A->rmap->rend == P->rmap->rend,PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
2298   PetscCheck(A->cmap->rstart == P->rmap->rstart && A->cmap->rend == P->rmap->rend,PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%" PetscInt_FMT ", %" PetscInt_FMT ") != Prow (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
2299 
2300   /* Set "nonscalable" as default algorithm */
2301   PetscCall(PetscStrcmp(C->product->alg,"default",&flg));
2302   if (flg) {
2303     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2304 
2305     /* Set "scalable" as default if BN and local nonzeros of A and B are large */
2306     if (pN > 100000) {
2307       MatInfo     Ainfo,Pinfo;
2308       PetscInt    nz_local;
2309       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
2310 
2311       PetscCall(MatGetInfo(A,MAT_LOCAL,&Ainfo));
2312       PetscCall(MatGetInfo(P,MAT_LOCAL,&Pinfo));
2313       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
2314 
2315       if (pN > product->fill*nz_local) alg_scalable_loc = PETSC_TRUE;
2316       PetscCall(MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm));
2317 
2318       if (alg_scalable) {
2319         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
2320         PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2321       }
2322     }
2323   }
2324 
2325   /* Get runtime option */
2326   if (product->api_user) {
2327     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatPtAP","Mat");
2328     PetscCall(PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg));
2329     PetscOptionsEnd();
2330   } else {
2331     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_PtAP","Mat");
2332     PetscCall(PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg));
2333     PetscOptionsEnd();
2334   }
2335   if (flg) {
2336     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2337   }
2338 
2339   C->ops->productsymbolic = MatProductSymbolic_PtAP_MPIAIJ_MPIAIJ;
2340   PetscFunctionReturn(0);
2341 }
2342 
2343 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_RARt(Mat C)
2344 {
2345   Mat_Product *product = C->product;
2346   Mat         A = product->A,R=product->B;
2347 
2348   PetscFunctionBegin;
2349   /* Check matrix local sizes */
2350   PetscCheck(A->cmap->n == R->cmap->n && A->rmap->n == R->cmap->n,PETSC_COMM_SELF,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, A local (%" PetscInt_FMT ", %" PetscInt_FMT "), R local (%" PetscInt_FMT ",%" PetscInt_FMT ")",A->rmap->n,A->rmap->n,R->rmap->n,R->cmap->n);
2351 
2352   C->ops->productsymbolic = MatProductSymbolic_RARt_MPIAIJ_MPIAIJ;
2353   PetscFunctionReturn(0);
2354 }
2355 
2356 /*
2357  Set options for ABC = A*B*C = A*(B*C); ABC's algorithm must be chosen from AB's algorithm
2358 */
2359 static PetscErrorCode MatProductSetFromOptions_MPIAIJ_ABC(Mat C)
2360 {
2361   Mat_Product    *product = C->product;
2362   PetscBool      flg = PETSC_FALSE;
2363   PetscInt       alg = 1; /* default algorithm */
2364   const char     *algTypes[3] = {"scalable","nonscalable","seqmpi"};
2365   PetscInt       nalg = 3;
2366 
2367   PetscFunctionBegin;
2368   /* Set default algorithm */
2369   PetscCall(PetscStrcmp(C->product->alg,"default",&flg));
2370   if (flg) {
2371     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2372   }
2373 
2374   /* Get runtime option */
2375   if (product->api_user) {
2376     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatMatMatMult","Mat");
2377     PetscCall(PetscOptionsEList("-matmatmatmult_via","Algorithmic approach","MatMatMatMult",algTypes,nalg,algTypes[alg],&alg,&flg));
2378     PetscOptionsEnd();
2379   } else {
2380     PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatProduct_ABC","Mat");
2381     PetscCall(PetscOptionsEList("-mat_product_algorithm","Algorithmic approach","MatProduct_ABC",algTypes,nalg,algTypes[alg],&alg,&flg));
2382     PetscOptionsEnd();
2383   }
2384   if (flg) {
2385     PetscCall(MatProductSetAlgorithm(C,(MatProductAlgorithm)algTypes[alg]));
2386   }
2387 
2388   C->ops->matmatmultsymbolic = MatMatMatMultSymbolic_MPIAIJ_MPIAIJ_MPIAIJ;
2389   C->ops->productsymbolic    = MatProductSymbolic_ABC;
2390   PetscFunctionReturn(0);
2391 }
2392 
2393 PETSC_INTERN PetscErrorCode MatProductSetFromOptions_MPIAIJ(Mat C)
2394 {
2395   Mat_Product    *product = C->product;
2396 
2397   PetscFunctionBegin;
2398   switch (product->type) {
2399   case MATPRODUCT_AB:
2400     PetscCall(MatProductSetFromOptions_MPIAIJ_AB(C));
2401     break;
2402   case MATPRODUCT_AtB:
2403     PetscCall(MatProductSetFromOptions_MPIAIJ_AtB(C));
2404     break;
2405   case MATPRODUCT_PtAP:
2406     PetscCall(MatProductSetFromOptions_MPIAIJ_PtAP(C));
2407     break;
2408   case MATPRODUCT_RARt:
2409     PetscCall(MatProductSetFromOptions_MPIAIJ_RARt(C));
2410     break;
2411   case MATPRODUCT_ABC:
2412     PetscCall(MatProductSetFromOptions_MPIAIJ_ABC(C));
2413     break;
2414   default:
2415     break;
2416   }
2417   PetscFunctionReturn(0);
2418 }
2419