xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 60552ceab132e88826086a7d3e9f9db7a7ec622a)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_APMPI         *ptap=a->ap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
40 {
41   PetscErrorCode      ierr;
42   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data;
43   Mat_APMPI           *ptap=a->ap;
44   Mat_Merge_SeqsToMPI *merge;
45 
46   PetscFunctionBegin;
47   if (!ptap) PetscFunctionReturn(0);
48 
49   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
50   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
51   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
52   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
53   ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
54   ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
55   ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
56   if (ptap->AP_loc) { /* used by alg_rap */
57     Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
58     ierr = PetscFree(ap->i);CHKERRQ(ierr);
59     ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
60     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
61   } else { /* used by alg_ptap */
62     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
63     ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
64   }
65   ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
66   ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
67   if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
68 
69   ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr);
70 
71   merge=ptap->merge;
72   if (merge) { /* used by alg_ptap */
73     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
74     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
75     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
76     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
77     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
78     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
79     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
80     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
81     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
82     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
83     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
84     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
85     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
86     ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
87   }
88 
89   A->ops->destroy = ptap->destroy;
90   ierr = PetscFree(a->ap);CHKERRQ(ierr);
91   PetscFunctionReturn(0);
92 }
93 
94 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
95 {
96   PetscErrorCode ierr;
97 
98   PetscFunctionBegin;
99   ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr);
100   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
101   PetscFunctionReturn(0);
102 }
103 
104 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
105 {
106   PetscErrorCode ierr;
107   PetscBool      flg;
108   MPI_Comm       comm;
109 #if !defined(PETSC_HAVE_HYPRE)
110   const char          *algTypes[2] = {"scalable","nonscalable"};
111   PetscInt            nalg=2;
112 #else
113   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
114   PetscInt            nalg=3;
115 #endif
116   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
117 
118   PetscFunctionBegin;
119   /* check if matrix local sizes are compatible */
120   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
121   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
122   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
123 
124   if (scall == MAT_INITIAL_MATRIX) {
125     /* pick an algorithm */
126     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
127     PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
128     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
129     ierr = PetscOptionsEnd();CHKERRQ(ierr);
130 
131     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
132       MatInfo     Ainfo,Pinfo;
133       PetscInt    nz_local;
134       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
135 
136       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
137       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
138       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
139 
140       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
141       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
142 
143       if (alg_scalable) {
144         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
145       }
146     }
147 
148     switch (alg) {
149     case 1:
150       /* do R=P^T locally, then C=R*A*P -- nonscalable */
151       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
152       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
153       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
154       break;
155 #if defined(PETSC_HAVE_HYPRE)
156     case 2:
157       /* Use boomerAMGBuildCoarseOperator */
158       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
159       PetscFunctionReturn(0);
160       break;
161 #endif
162     default:
163       /* do R=P^T locally, then C=R*A*P */
164       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
165       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
166       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
167       break;
168     }
169   }
170   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
171   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
172   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
173   PetscFunctionReturn(0);
174 }
175 
176 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
177 {
178   PetscErrorCode    ierr;
179   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
180   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
181   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
182   Mat_APMPI         *ptap = c->ap;
183   Mat               AP_loc,C_loc,C_oth;
184   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
185   PetscScalar       *apa;
186   const PetscInt    *cols;
187   const PetscScalar *vals;
188   PetscBool         freestruct;
189 
190   PetscFunctionBegin;
191   if (!ptap) {
192     MPI_Comm comm;
193     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
194     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
195   }
196 
197   ierr = MatZeroEntries(C);CHKERRQ(ierr);
198 
199   /* 1) get R = Pd^T,Ro = Po^T */
200   if (ptap->reuse == MAT_REUSE_MATRIX) {
201     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
202     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
203   }
204 
205   /* 2) get AP_loc */
206   AP_loc = ptap->AP_loc;
207   ap = (Mat_SeqAIJ*)AP_loc->data;
208 
209   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
210   /*-----------------------------------------------------*/
211   if (ptap->reuse == MAT_REUSE_MATRIX) {
212     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
213     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
214     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
215   }
216 
217   /* 2-2) compute numeric A_loc*P - dominating part */
218   /* ---------------------------------------------- */
219   /* get data from symbolic products */
220   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
221   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
222   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
223 
224   api   = ap->i;
225   apj   = ap->j;
226   for (i=0; i<am; i++) {
227     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
228     apnz = api[i+1] - api[i];
229     apa = ap->a + api[i];
230     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
231     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
232     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
233   }
234 
235   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
236   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
237   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
238 
239   C_loc = ptap->C_loc;
240   C_oth = ptap->C_oth;
241 
242   /* add C_loc and Co to to C */
243   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
244 
245   /* C_loc -> C */
246   cm    = C_loc->rmap->N;
247   c_seq = (Mat_SeqAIJ*)C_loc->data;
248   cols = c_seq->j;
249   vals = c_seq->a;
250 
251   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
252   /* when there are no off-processor parts.  */
253   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
254   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
255   /* a table, and other, more complex stuff has to be done. */
256   if (C->assembled) {
257     C->was_assembled = PETSC_TRUE;
258     C->assembled     = PETSC_FALSE;
259   }
260   if (C->was_assembled) {
261     for (i=0; i<cm; i++) {
262       ncols = c_seq->i[i+1] - c_seq->i[i];
263       row = rstart + i;
264       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
265       cols += ncols; vals += ncols;
266     }
267   } else {
268     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
269   }
270 
271   /* Co -> C, off-processor part */
272   cm = C_oth->rmap->N;
273   c_seq = (Mat_SeqAIJ*)C_oth->data;
274   cols = c_seq->j;
275   vals = c_seq->a;
276   for (i=0; i<cm; i++) {
277     ncols = c_seq->i[i+1] - c_seq->i[i];
278     row = p->garray[i];
279     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
280     cols += ncols; vals += ncols;
281   }
282   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
283   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
284 
285   ptap->reuse = MAT_REUSE_MATRIX;
286 
287   ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr);
288   PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
289   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
290   freestruct = PETSC_FALSE;
291   ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr);
292   if (freestruct) {
293     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
294   }
295   ierr = PetscOptionsEnd();CHKERRQ(ierr);
296   PetscFunctionReturn(0);
297 }
298 
299 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
300 {
301   PetscErrorCode      ierr;
302   Mat_APMPI           *ptap;
303   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
304   MPI_Comm            comm;
305   PetscMPIInt         size,rank;
306   Mat                 Cmpi,P_loc,P_oth;
307   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
308   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
309   PetscInt            *lnk,i,k,pnz,row,nsend;
310   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
311   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
312   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
313   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
314   MPI_Request         *swaits,*rwaits;
315   MPI_Status          *sstatus,rstatus;
316   PetscLayout         rowmap;
317   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
318   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
319   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
320   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
321   PetscScalar         *apv;
322   PetscTable          ta;
323   MatType             mtype;
324 #if defined(PETSC_USE_INFO)
325   PetscReal           apfill;
326 #endif
327 
328   PetscFunctionBegin;
329   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
330   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
331   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
332 
333   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
334 
335   /* create symbolic parallel matrix Cmpi */
336   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
337   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
338   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
339 
340   /* create struct Mat_APMPI and attached it to C later */
341   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
342   ptap->reuse = MAT_INITIAL_MATRIX;
343   ptap->algType = 0;
344 
345   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
346   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
347   /* get P_loc by taking all local rows of P */
348   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
349 
350   ptap->P_loc = P_loc;
351   ptap->P_oth = P_oth;
352 
353   /* (0) compute Rd = Pd^T, Ro = Po^T  */
354   /* --------------------------------- */
355   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
356   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
357 
358   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
359   /* ----------------------------------------------------------------- */
360   p_loc  = (Mat_SeqAIJ*)P_loc->data;
361   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
362 
363   /* create and initialize a linked list */
364   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
365   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
366   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
367   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
368 
369   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
370 
371   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
372   if (ao) {
373     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
374   } else {
375     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
376   }
377   current_space = free_space;
378   nspacedouble  = 0;
379 
380   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
381   api[0] = 0;
382   for (i=0; i<am; i++) {
383     /* diagonal portion: Ad[i,:]*P */
384     ai = ad->i; pi = p_loc->i;
385     nzi = ai[i+1] - ai[i];
386     aj  = ad->j + ai[i];
387     for (j=0; j<nzi; j++) {
388       row  = aj[j];
389       pnz  = pi[row+1] - pi[row];
390       Jptr = p_loc->j + pi[row];
391       /* add non-zero cols of P into the sorted linked list lnk */
392       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
393     }
394     /* off-diagonal portion: Ao[i,:]*P */
395     if (ao) {
396       ai = ao->i; pi = p_oth->i;
397       nzi = ai[i+1] - ai[i];
398       aj  = ao->j + ai[i];
399       for (j=0; j<nzi; j++) {
400         row  = aj[j];
401         pnz  = pi[row+1] - pi[row];
402         Jptr = p_oth->j + pi[row];
403         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
404       }
405     }
406     apnz     = lnk[0];
407     api[i+1] = api[i] + apnz;
408 
409     /* if free space is not available, double the total space in the list */
410     if (current_space->local_remaining<apnz) {
411       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
412       nspacedouble++;
413     }
414 
415     /* Copy data into free space, then initialize lnk */
416     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
417 
418     current_space->array           += apnz;
419     current_space->local_used      += apnz;
420     current_space->local_remaining -= apnz;
421   }
422   /* Allocate space for apj and apv, initialize apj, and */
423   /* destroy list of free space and other temporary array(s) */
424   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
425   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
426   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
427 
428   /* Create AP_loc for reuse */
429   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
430 
431 #if defined(PETSC_USE_INFO)
432   if (ao) {
433     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
434   } else {
435     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
436   }
437   ptap->AP_loc->info.mallocs           = nspacedouble;
438   ptap->AP_loc->info.fill_ratio_given  = fill;
439   ptap->AP_loc->info.fill_ratio_needed = apfill;
440 
441   if (api[am]) {
442     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
443     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
444   } else {
445     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
446   }
447 #endif
448 
449   /* (2-1) compute symbolic Co = Ro*AP_loc  */
450   /* ------------------------------------ */
451   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
452 
453   /* (3) send coj of C_oth to other processors  */
454   /* ------------------------------------------ */
455   /* determine row ownership */
456   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
457   rowmap->n  = pn;
458   rowmap->bs = 1;
459   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
460   owners = rowmap->range;
461 
462   /* determine the number of messages to send, their lengths */
463   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
464   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
465   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
466 
467   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
468   coi   = c_oth->i; coj = c_oth->j;
469   con   = ptap->C_oth->rmap->n;
470   proc  = 0;
471   for (i=0; i<con; i++) {
472     while (prmap[i] >= owners[proc+1]) proc++;
473     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
474     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
475   }
476 
477   len          = 0; /* max length of buf_si[], see (4) */
478   owners_co[0] = 0;
479   nsend        = 0;
480   for (proc=0; proc<size; proc++) {
481     owners_co[proc+1] = owners_co[proc] + len_si[proc];
482     if (len_s[proc]) {
483       nsend++;
484       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
485       len         += len_si[proc];
486     }
487   }
488 
489   /* determine the number and length of messages to receive for coi and coj  */
490   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
491   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
492 
493   /* post the Irecv and Isend of coj */
494   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
495   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
496   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
497   for (proc=0, k=0; proc<size; proc++) {
498     if (!len_s[proc]) continue;
499     i    = owners_co[proc];
500     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
501     k++;
502   }
503 
504   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
505   /* ---------------------------------------- */
506   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
507   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
508 
509   /* receives coj are complete */
510   for (i=0; i<nrecv; i++) {
511     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
512   }
513   ierr = PetscFree(rwaits);CHKERRQ(ierr);
514   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
515 
516   /* add received column indices into ta to update Crmax */
517   for (k=0; k<nrecv; k++) {/* k-th received message */
518     Jptr = buf_rj[k];
519     for (j=0; j<len_r[k]; j++) {
520       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
521     }
522   }
523   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
524   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
525 
526   /* (4) send and recv coi */
527   /*-----------------------*/
528   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
529   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
530   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
531   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
532   for (proc=0,k=0; proc<size; proc++) {
533     if (!len_s[proc]) continue;
534     /* form outgoing message for i-structure:
535          buf_si[0]:                 nrows to be sent
536                [1:nrows]:           row index (global)
537                [nrows+1:2*nrows+1]: i-structure index
538     */
539     /*-------------------------------------------*/
540     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
541     buf_si_i    = buf_si + nrows+1;
542     buf_si[0]   = nrows;
543     buf_si_i[0] = 0;
544     nrows       = 0;
545     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
546       nzi = coi[i+1] - coi[i];
547       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
548       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
549       nrows++;
550     }
551     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
552     k++;
553     buf_si += len_si[proc];
554   }
555   for (i=0; i<nrecv; i++) {
556     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
557   }
558   ierr = PetscFree(rwaits);CHKERRQ(ierr);
559   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
560 
561   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
562   ierr = PetscFree(len_ri);CHKERRQ(ierr);
563   ierr = PetscFree(swaits);CHKERRQ(ierr);
564   ierr = PetscFree(buf_s);CHKERRQ(ierr);
565 
566   /* (5) compute the local portion of Cmpi      */
567   /* ------------------------------------------ */
568   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
569   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
570   current_space = free_space;
571 
572   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
573   for (k=0; k<nrecv; k++) {
574     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
575     nrows       = *buf_ri_k[k];
576     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
577     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
578   }
579 
580   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
581   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
582   for (i=0; i<pn; i++) {
583     /* add C_loc into Cmpi */
584     nzi  = c_loc->i[i+1] - c_loc->i[i];
585     Jptr = c_loc->j + c_loc->i[i];
586     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
587 
588     /* add received col data into lnk */
589     for (k=0; k<nrecv; k++) { /* k-th received message */
590       if (i == *nextrow[k]) { /* i-th row */
591         nzi  = *(nextci[k]+1) - *nextci[k];
592         Jptr = buf_rj[k] + *nextci[k];
593         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
594         nextrow[k]++; nextci[k]++;
595       }
596     }
597     nzi = lnk[0];
598 
599     /* copy data into free space, then initialize lnk */
600     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
601     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
602   }
603   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
604   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
605   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
606 
607   /* local sizes and preallocation */
608   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
609   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
610   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
611   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
612 
613   /* members in merge */
614   ierr = PetscFree(id_r);CHKERRQ(ierr);
615   ierr = PetscFree(len_r);CHKERRQ(ierr);
616   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
617   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
618   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
619   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
620   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
621 
622   /* attach the supporting struct to Cmpi for reuse */
623   c = (Mat_MPIAIJ*)Cmpi->data;
624   c->ap           = ptap;
625   ptap->duplicate = Cmpi->ops->duplicate;
626   ptap->destroy   = Cmpi->ops->destroy;
627   ptap->view      = Cmpi->ops->view;
628 
629   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
630   Cmpi->assembled        = PETSC_FALSE;
631   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
632   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
633   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
634   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
635   *C                     = Cmpi;
636   PetscFunctionReturn(0);
637 }
638 
639 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
640 {
641   PetscErrorCode      ierr;
642   Mat_APMPI           *ptap;
643   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
644   MPI_Comm            comm;
645   PetscMPIInt         size,rank;
646   Mat                 Cmpi;
647   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
648   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
649   PetscInt            *lnk,i,k,pnz,row,nsend;
650   PetscBT             lnkbt;
651   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
652   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
653   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
654   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
655   MPI_Request         *swaits,*rwaits;
656   MPI_Status          *sstatus,rstatus;
657   PetscLayout         rowmap;
658   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
659   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
660   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
661   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
662   PetscScalar         *apv;
663   PetscTable          ta;
664   MatType             mtype;
665 #if defined(PETSC_USE_INFO)
666   PetscReal           apfill;
667 #endif
668 
669   PetscFunctionBegin;
670   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
671   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
672   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
673 
674   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
675 
676   /* create symbolic parallel matrix Cmpi */
677   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
678   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
679   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
680 
681   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
682   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
683 
684   /* create struct Mat_APMPI and attached it to C later */
685   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
686   ptap->reuse = MAT_INITIAL_MATRIX;
687   ptap->algType = 1;
688 
689   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
690   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
691   /* get P_loc by taking all local rows of P */
692   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
693 
694   /* (0) compute Rd = Pd^T, Ro = Po^T  */
695   /* --------------------------------- */
696   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
697   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
698 
699   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
700   /* ----------------------------------------------------------------- */
701   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
702   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
703 
704   /* create and initialize a linked list */
705   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
706   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
707   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
708   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
709   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
710 
711   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
712 
713   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
714   if (ao) {
715     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
716   } else {
717     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
718   }
719   current_space = free_space;
720   nspacedouble  = 0;
721 
722   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
723   api[0] = 0;
724   for (i=0; i<am; i++) {
725     /* diagonal portion: Ad[i,:]*P */
726     ai = ad->i; pi = p_loc->i;
727     nzi = ai[i+1] - ai[i];
728     aj  = ad->j + ai[i];
729     for (j=0; j<nzi; j++) {
730       row  = aj[j];
731       pnz  = pi[row+1] - pi[row];
732       Jptr = p_loc->j + pi[row];
733       /* add non-zero cols of P into the sorted linked list lnk */
734       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
735     }
736     /* off-diagonal portion: Ao[i,:]*P */
737     if (ao) {
738       ai = ao->i; pi = p_oth->i;
739       nzi = ai[i+1] - ai[i];
740       aj  = ao->j + ai[i];
741       for (j=0; j<nzi; j++) {
742         row  = aj[j];
743         pnz  = pi[row+1] - pi[row];
744         Jptr = p_oth->j + pi[row];
745         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
746       }
747     }
748     apnz     = lnk[0];
749     api[i+1] = api[i] + apnz;
750     if (ap_rmax < apnz) ap_rmax = apnz;
751 
752     /* if free space is not available, double the total space in the list */
753     if (current_space->local_remaining<apnz) {
754       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
755       nspacedouble++;
756     }
757 
758     /* Copy data into free space, then initialize lnk */
759     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
760 
761     current_space->array           += apnz;
762     current_space->local_used      += apnz;
763     current_space->local_remaining -= apnz;
764   }
765   /* Allocate space for apj and apv, initialize apj, and */
766   /* destroy list of free space and other temporary array(s) */
767   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
768   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
769   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
770 
771   /* Create AP_loc for reuse */
772   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
773 
774 #if defined(PETSC_USE_INFO)
775   if (ao) {
776     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
777   } else {
778     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
779   }
780   ptap->AP_loc->info.mallocs           = nspacedouble;
781   ptap->AP_loc->info.fill_ratio_given  = fill;
782   ptap->AP_loc->info.fill_ratio_needed = apfill;
783 
784   if (api[am]) {
785     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
786     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
787   } else {
788     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
789   }
790 #endif
791 
792   /* (2-1) compute symbolic Co = Ro*AP_loc  */
793   /* ------------------------------------ */
794   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
795 
796   /* (3) send coj of C_oth to other processors  */
797   /* ------------------------------------------ */
798   /* determine row ownership */
799   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
800   rowmap->n  = pn;
801   rowmap->bs = 1;
802   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
803   owners = rowmap->range;
804 
805   /* determine the number of messages to send, their lengths */
806   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
807   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
808   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
809 
810   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
811   coi   = c_oth->i; coj = c_oth->j;
812   con   = ptap->C_oth->rmap->n;
813   proc  = 0;
814   for (i=0; i<con; i++) {
815     while (prmap[i] >= owners[proc+1]) proc++;
816     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
817     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
818   }
819 
820   len          = 0; /* max length of buf_si[], see (4) */
821   owners_co[0] = 0;
822   nsend        = 0;
823   for (proc=0; proc<size; proc++) {
824     owners_co[proc+1] = owners_co[proc] + len_si[proc];
825     if (len_s[proc]) {
826       nsend++;
827       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
828       len         += len_si[proc];
829     }
830   }
831 
832   /* determine the number and length of messages to receive for coi and coj  */
833   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
834   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
835 
836   /* post the Irecv and Isend of coj */
837   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
838   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
839   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
840   for (proc=0, k=0; proc<size; proc++) {
841     if (!len_s[proc]) continue;
842     i    = owners_co[proc];
843     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
844     k++;
845   }
846 
847   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
848   /* ---------------------------------------- */
849   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
850   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
851 
852   /* receives coj are complete */
853   for (i=0; i<nrecv; i++) {
854     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
855   }
856   ierr = PetscFree(rwaits);CHKERRQ(ierr);
857   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
858 
859   /* add received column indices into ta to update Crmax */
860   for (k=0; k<nrecv; k++) {/* k-th received message */
861     Jptr = buf_rj[k];
862     for (j=0; j<len_r[k]; j++) {
863       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
864     }
865   }
866   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
867   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
868 
869   /* (4) send and recv coi */
870   /*-----------------------*/
871   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
872   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
873   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
874   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
875   for (proc=0,k=0; proc<size; proc++) {
876     if (!len_s[proc]) continue;
877     /* form outgoing message for i-structure:
878          buf_si[0]:                 nrows to be sent
879                [1:nrows]:           row index (global)
880                [nrows+1:2*nrows+1]: i-structure index
881     */
882     /*-------------------------------------------*/
883     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
884     buf_si_i    = buf_si + nrows+1;
885     buf_si[0]   = nrows;
886     buf_si_i[0] = 0;
887     nrows       = 0;
888     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
889       nzi = coi[i+1] - coi[i];
890       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
891       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
892       nrows++;
893     }
894     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
895     k++;
896     buf_si += len_si[proc];
897   }
898   for (i=0; i<nrecv; i++) {
899     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
900   }
901   ierr = PetscFree(rwaits);CHKERRQ(ierr);
902   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
903 
904   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
905   ierr = PetscFree(len_ri);CHKERRQ(ierr);
906   ierr = PetscFree(swaits);CHKERRQ(ierr);
907   ierr = PetscFree(buf_s);CHKERRQ(ierr);
908 
909   /* (5) compute the local portion of Cmpi      */
910   /* ------------------------------------------ */
911   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
912   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
913   current_space = free_space;
914 
915   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
916   for (k=0; k<nrecv; k++) {
917     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
918     nrows       = *buf_ri_k[k];
919     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
920     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
921   }
922 
923   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
924   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
925   for (i=0; i<pn; i++) {
926     /* add C_loc into Cmpi */
927     nzi  = c_loc->i[i+1] - c_loc->i[i];
928     Jptr = c_loc->j + c_loc->i[i];
929     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
930 
931     /* add received col data into lnk */
932     for (k=0; k<nrecv; k++) { /* k-th received message */
933       if (i == *nextrow[k]) { /* i-th row */
934         nzi  = *(nextci[k]+1) - *nextci[k];
935         Jptr = buf_rj[k] + *nextci[k];
936         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
937         nextrow[k]++; nextci[k]++;
938       }
939     }
940     nzi = lnk[0];
941 
942     /* copy data into free space, then initialize lnk */
943     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
944     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
945   }
946   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
947   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
948   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
949 
950   /* local sizes and preallocation */
951   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
952   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
953   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
954   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
955 
956   /* members in merge */
957   ierr = PetscFree(id_r);CHKERRQ(ierr);
958   ierr = PetscFree(len_r);CHKERRQ(ierr);
959   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
960   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
961   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
962   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
963   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
964 
965   /* attach the supporting struct to Cmpi for reuse */
966   c = (Mat_MPIAIJ*)Cmpi->data;
967   c->ap           = ptap;
968   ptap->duplicate = Cmpi->ops->duplicate;
969   ptap->destroy   = Cmpi->ops->destroy;
970   ptap->view      = Cmpi->ops->view;
971   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
972 
973   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
974   Cmpi->assembled        = PETSC_FALSE;
975   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
976   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
977   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
978   *C                     = Cmpi;
979   PetscFunctionReturn(0);
980 }
981 
982 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
983 {
984   PetscErrorCode    ierr;
985   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
986   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
987   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
988   Mat_APMPI         *ptap = c->ap;
989   Mat               AP_loc,C_loc,C_oth;
990   PetscInt          i,rstart,rend,cm,ncols,row;
991   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
992   PetscScalar       *apa;
993   const PetscInt    *cols;
994   const PetscScalar *vals;
995   PetscBool         freestruct;
996 
997   PetscFunctionBegin;
998   if (!ptap) {
999     MPI_Comm comm;
1000     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1001     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1002   }
1003 
1004   ierr = MatZeroEntries(C);CHKERRQ(ierr);
1005   /* 1) get R = Pd^T,Ro = Po^T */
1006   if (ptap->reuse == MAT_REUSE_MATRIX) {
1007     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
1008     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
1009   }
1010 
1011   /* 2) get AP_loc */
1012   AP_loc = ptap->AP_loc;
1013   ap = (Mat_SeqAIJ*)AP_loc->data;
1014 
1015   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1016   /*-----------------------------------------------------*/
1017   if (ptap->reuse == MAT_REUSE_MATRIX) {
1018     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1019     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1020     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1021   }
1022 
1023   /* 2-2) compute numeric A_loc*P - dominating part */
1024   /* ---------------------------------------------- */
1025   /* get data from symbolic products */
1026   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1027   if (ptap->P_oth) {
1028     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1029   }
1030   apa   = ptap->apa;
1031   api   = ap->i;
1032   apj   = ap->j;
1033   for (i=0; i<am; i++) {
1034     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1035     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1036     apnz = api[i+1] - api[i];
1037     for (j=0; j<apnz; j++) {
1038       col = apj[j+api[i]];
1039       ap->a[j+ap->i[i]] = apa[col];
1040       apa[col] = 0.0;
1041     }
1042     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1043   }
1044 
1045   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1046   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1047   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1048   C_loc = ptap->C_loc;
1049   C_oth = ptap->C_oth;
1050 
1051   /* add C_loc and Co to to C */
1052   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1053 
1054   /* C_loc -> C */
1055   cm    = C_loc->rmap->N;
1056   c_seq = (Mat_SeqAIJ*)C_loc->data;
1057   cols = c_seq->j;
1058   vals = c_seq->a;
1059 
1060 
1061   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1062   /* when there are no off-processor parts.  */
1063   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1064   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1065   /* a table, and other, more complex stuff has to be done. */
1066   if (C->assembled) {
1067     C->was_assembled = PETSC_TRUE;
1068     C->assembled     = PETSC_FALSE;
1069   }
1070   if (C->was_assembled) {
1071     for (i=0; i<cm; i++) {
1072       ncols = c_seq->i[i+1] - c_seq->i[i];
1073       row = rstart + i;
1074       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1075       cols += ncols; vals += ncols;
1076     }
1077   } else {
1078     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1079   }
1080 
1081   /* Co -> C, off-processor part */
1082   cm = C_oth->rmap->N;
1083   c_seq = (Mat_SeqAIJ*)C_oth->data;
1084   cols = c_seq->j;
1085   vals = c_seq->a;
1086   for (i=0; i<cm; i++) {
1087     ncols = c_seq->i[i+1] - c_seq->i[i];
1088     row = p->garray[i];
1089     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1090     cols += ncols; vals += ncols;
1091   }
1092 
1093   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1094   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1095 
1096   ptap->reuse = MAT_REUSE_MATRIX;
1097 
1098   ierr = PetscObjectOptionsBegin((PetscObject)C);CHKERRQ(ierr);
1099   PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
1100   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1101   freestruct = PETSC_FALSE;
1102   ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr);
1103   if (freestruct) {
1104     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
1105   }
1106   ierr = PetscOptionsEnd();CHKERRQ(ierr);
1107   PetscFunctionReturn(0);
1108 }
1109