xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision de817e96340c6676a88709a354a002e2c99a76b3)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 #define PTAP_PROFILE
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a   =(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32 
33     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
34     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
35     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
36     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
37     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
38 
39     if (ptap->api) {ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
40     if (ptap->apj) {ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
41     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
42     if (merge) {
43       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
44       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
45       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
46       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
47       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
48       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
49       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
50       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
51       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
52       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
53       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
54       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
55       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
56       ierr = merge->destroy(A);CHKERRQ(ierr);
57       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
58     }
59     ierr = PetscFree(ptap);CHKERRQ(ierr);
60   }
61   PetscFunctionReturn(0);
62 }
63 
64 #undef __FUNCT__
65 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
66 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
67 {
68   PetscErrorCode      ierr;
69   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
70   Mat_PtAPMPI         *ptap  = a->ptap;
71   Mat_Merge_SeqsToMPI *merge = ptap->merge;
72 
73   PetscFunctionBegin;
74   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
75 
76   (*M)->ops->destroy   = merge->destroy;
77   (*M)->ops->duplicate = merge->duplicate;
78   PetscFunctionReturn(0);
79 }
80 
81 #undef __FUNCT__
82 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
83 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
84 {
85   PetscErrorCode ierr;
86   PetscBool      newalg=PETSC_FALSE;
87 
88   PetscFunctionBegin;
89   ierr = PetscOptionsGetBool(NULL,"-matptap_new",&newalg,NULL);CHKERRQ(ierr);
90   if (scall == MAT_INITIAL_MATRIX) {
91     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
92     if (newalg) {
93       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(A,P,fill,C);CHKERRQ(ierr);
94     } else {
95       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
96     }
97     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
98   }
99   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
100   if (newalg) {
101     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ_new(A,P,*C);CHKERRQ(ierr);
102   } else {
103     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
104   }
105   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
106   PetscFunctionReturn(0);
107 }
108 
109 #undef __FUNCT__
110 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ_new"
111 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(Mat A,Mat P,PetscReal fill,Mat *C)
112 {
113   PetscErrorCode ierr;
114   Mat_PtAPMPI    *ptap;
115   Mat_MPIAIJ     *p=(Mat_MPIAIJ*)P->data;
116   Mat            AP;
117   Mat_MPIAIJ     *c;
118 
119   PetscFunctionBegin;
120   MPI_Comm            comm;
121   PetscMPIInt         size,rank;
122   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
123   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
124   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
125 
126   /* check if matrix local sizes are compatible -- MV! */
127   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
128     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
129   }
130   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
131     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
132   }
133 
134   //ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); //rm later !!!
135   //=============================================================================
136   Mat                 Cmpi;
137   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
138   Mat_SeqAIJ          *p_loc;
139   PetscInt            *pi_loc;
140   PetscInt            *pdti,*pdtj,*poti,*potj,*ptJ,nnz;
141   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
142   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
143   PetscBT             lnkbt;
144   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
145   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
146   PetscInt            len,proc,*dnz,*onz,*owners;
147   PetscInt            nzi,*pti,*ptj;
148   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
149   MPI_Request         *swaits,*rwaits;
150   MPI_Status          *sstatus,rstatus;
151   Mat_Merge_SeqsToMPI *merge;
152   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
153   PetscReal           afill=1.0,afill_tmp;
154   PetscInt            rmax;
155 
156   /* create struct Mat_PtAPMPI and attached it to C later */
157   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
158   ierr        = PetscNew(&merge);CHKERRQ(ierr);
159   ptap->merge = merge;
160   ptap->reuse = MAT_INITIAL_MATRIX;
161 
162   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
163   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
164 
165   /* get P_loc by taking all local rows of P */
166   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
167 
168   ptap->reuse = MAT_INITIAL_MATRIX;
169 
170   /* (1) compute symbolic AP = A*P, then get AP_loc */
171   /*--------------------------------------------------------------------------*/
172   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
173   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
174 
175   ierr = MatMatMult(A,P,MAT_INITIAL_MATRIX,2.0,&AP);CHKERRQ(ierr);
176   ierr = MatMPIAIJGetLocalMat(AP,MAT_INITIAL_MATRIX,&ptap->AP_loc);CHKERRQ(ierr);
177   ierr = MatDestroy(&AP);CHKERRQ(ierr);
178 
179   /* (2) compute C_loc=Rd*AP_loc, Co=Ro*AP_loc */
180   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,MAT_INITIAL_MATRIX,2.0,&ptap->C_loc);CHKERRQ(ierr);
181   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,MAT_INITIAL_MATRIX,2.0,&ptap->C_oth);CHKERRQ(ierr);
182 
183 
184   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
185   pi_loc = p_loc->i;
186 
187   Mat_SeqAIJ *ap = (Mat_SeqAIJ*)ptap->AP_loc->data;
188   api = ap->i;
189   apj = ap->j;
190 
191   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
192   /*-----------------------------------------------------------------*/
193   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
194 
195   /* then, compute symbolic Co = (p->B)^T*AP */
196   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
197                          >= (num of nonzero rows of C_seq) - pn */
198   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
199   coi[0] = 0;
200 
201   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
202   nnz           = fill*(poti[pon] + api[am]);
203   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
204   current_space = free_space;
205 
206   /* create and initialize a linked list */
207   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
208 
209   for (i=0; i<pon; i++) {
210     pnz = poti[i+1] - poti[i];
211     ptJ = potj + poti[i];
212     for (j=0; j<pnz; j++) {
213       row  = ptJ[j]; /* row of AP == col of Pot */
214       apnz = api[row+1] - api[row];
215       Jptr = apj + api[row];
216       /* add non-zero cols of AP into the sorted linked list lnk */
217       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
218     }
219     nnz = lnk[0];
220 
221     /* If free space is not available, double the total space in the list */
222     if (current_space->local_remaining<nnz) {
223       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
224       nspacedouble++;
225     }
226 
227     /* Copy data into free space, and zero out denserows */
228     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
229 
230     current_space->array           += nnz;
231     current_space->local_used      += nnz;
232     current_space->local_remaining -= nnz;
233 
234     coi[i+1] = coi[i] + nnz;
235   }
236 
237   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
238   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
239   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
240   if (afill_tmp > afill) afill = afill_tmp;
241   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
242 
243   /* (3) send j-array (coj) of Co to other processors */
244   /*--------------------------------------------------*/
245   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
246   len_s        = merge->len_s;
247   merge->nsend = 0;
248 
249   /* determine row ownership */
250   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
251   merge->rowmap->n  = pn;
252   merge->rowmap->bs = 1;
253 
254   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
255   owners = merge->rowmap->range;
256 
257   /* determine the number of messages to send, their lengths */
258   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
259   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
260   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
261 
262   proc = 0;
263   for (i=0; i<pon; i++) {
264     while (prmap[i] >= owners[proc+1]) proc++;
265     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
266     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
267   }
268 
269   len          = 0; /* max length of buf_si[], see (4) */
270   owners_co[0] = 0;
271   for (proc=0; proc<size; proc++) {
272     owners_co[proc+1] = owners_co[proc] + len_si[proc];
273     if (len_s[proc]) {
274       merge->nsend++;
275       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
276       len         += len_si[proc];
277     }
278   }
279 
280   /* determine the number and length of messages to receive for coi and coj  */
281   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
282   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
283 
284   /* post the Irecv and Isend of coj */
285   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
286   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
287   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
288   for (proc=0, k=0; proc<size; proc++) {
289     if (!len_s[proc]) continue;
290     i    = owners_co[proc];
291     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
292     k++;
293   }
294 
295   /* receives and sends of coj are complete */
296   for (i=0; i<merge->nrecv; i++) {
297     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
298   }
299   ierr = PetscFree(rwaits);CHKERRQ(ierr);
300   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
301 
302   /* (4) send and recv coi */
303   /*-----------------------*/
304   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
305   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
306   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
307   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
308   for (proc=0,k=0; proc<size; proc++) {
309     if (!len_s[proc]) continue;
310     /* form outgoing message for i-structure:
311          buf_si[0]:                 nrows to be sent
312                [1:nrows]:           row index (global)
313                [nrows+1:2*nrows+1]: i-structure index
314     */
315     /*-------------------------------------------*/
316     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
317     buf_si_i    = buf_si + nrows+1;
318     buf_si[0]   = nrows;
319     buf_si_i[0] = 0;
320     nrows       = 0;
321     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
322       nzi = coi[i+1] - coi[i];
323       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
324       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
325       nrows++;
326     }
327     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
328     k++;
329     buf_si += len_si[proc];
330   }
331   i = merge->nrecv;
332   while (i--) {
333     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
334   }
335   ierr = PetscFree(rwaits);CHKERRQ(ierr);
336   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
337 
338   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
339   ierr = PetscFree(len_ri);CHKERRQ(ierr);
340   ierr = PetscFree(swaits);CHKERRQ(ierr);
341   ierr = PetscFree(buf_s);CHKERRQ(ierr);
342 
343   /* (5) compute the local portion of C (mpi mat) */
344   /*----------------------------------------------*/
345   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
346 
347   /* allocate pti array and free space for accumulating nonzero column info */
348   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
349   pti[0] = 0;
350 
351   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
352   nnz           = fill*(pi_loc[pm] + api[am]);
353   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
354   current_space = free_space;
355 
356   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
357   for (k=0; k<merge->nrecv; k++) {
358     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
359     nrows       = *buf_ri_k[k];
360     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
361     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
362   }
363   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
364   rmax = 0;
365   for (i=0; i<pn; i++) {
366     /* add pdt[i,:]*AP into lnk */
367     pnz = pdti[i+1] - pdti[i];
368     ptJ = pdtj + pdti[i];
369     for (j=0; j<pnz; j++) {
370       row  = ptJ[j];  /* row of AP == col of Pt */
371       apnz = api[row+1] - api[row];
372       Jptr = apj + api[row];
373       /* add non-zero cols of AP into the sorted linked list lnk */
374       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
375     }
376 
377     /* add received col data into lnk */
378     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
379       if (i == *nextrow[k]) { /* i-th row */
380         nzi  = *(nextci[k]+1) - *nextci[k];
381         Jptr = buf_rj[k] + *nextci[k];
382         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
383         nextrow[k]++; nextci[k]++;
384       }
385     }
386     nnz = lnk[0];
387 
388     /* if free space is not available, make more free space */
389     if (current_space->local_remaining<nnz) {
390       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
391       nspacedouble++;
392     }
393     /* copy data into free space, then initialize lnk */
394     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
395     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
396 
397     current_space->array           += nnz;
398     current_space->local_used      += nnz;
399     current_space->local_remaining -= nnz;
400 
401     pti[i+1] = pti[i] + nnz;
402     if (nnz > rmax) rmax = nnz;
403   }
404   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
405   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
406 
407   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
408   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
409   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
410   if (afill_tmp > afill) afill = afill_tmp;
411   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
412 
413   /* (6) create symbolic parallel matrix Cmpi */
414   /*------------------------------------------*/
415   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
416   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
417   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
418   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
419   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
420   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
421 
422   merge->bi        = pti;      /* Cseq->i */
423   merge->bj        = ptj;      /* Cseq->j */
424   merge->coi       = coi;      /* Co->i   */
425   merge->coj       = coj;      /* Co->j   */
426   merge->buf_ri    = buf_ri;
427   merge->buf_rj    = buf_rj;
428   merge->owners_co = owners_co;
429   merge->destroy   = Cmpi->ops->destroy;
430   merge->duplicate = Cmpi->ops->duplicate;
431 
432   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
433   Cmpi->assembled      = PETSC_FALSE;
434   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
435   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
436 
437   /* attach the supporting struct to Cmpi for reuse */
438   c           = (Mat_MPIAIJ*)Cmpi->data;
439   c->ptap     = ptap;
440   ptap->api   = NULL;
441   ptap->apj   = NULL;
442   ptap->rmax  = ap_rmax;
443   *C          = Cmpi;
444 
445   /* flag 'scalable' determines which implementations to be used:
446        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
447        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
448   /* set default scalable */
449   ptap->scalable = PETSC_FALSE; //PETSC_TRUE;
450 
451   ierr = PetscOptionsGetBool(((PetscObject)(*C))->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
452   if (!ptap->scalable) {  /* Do dense axpy */
453     ierr = PetscCalloc1(P->cmap->N,&ptap->apa);CHKERRQ(ierr);
454   } else {
455     //ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
456   }
457 
458   PetscFunctionReturn(0);
459 }
460 
461 #undef __FUNCT__
462 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ_new"
463 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_new(Mat A,Mat P,Mat C)
464 {
465   PetscErrorCode    ierr;
466   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
467   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
468   Mat_PtAPMPI       *ptap = c->ptap;
469   Mat               AP_loc,C_loc,C_oth;
470   PetscInt          i,rstart,rend,cm,ncols,row;
471   PetscMPIInt       rank;
472   MPI_Comm          comm;
473   const PetscInt    *cols;
474   const PetscScalar *vals;
475   PetscLogDouble    t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
476 
477   PetscFunctionBegin;
478   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
479   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
480 
481   ierr = MatZeroEntries(C);CHKERRQ(ierr);
482 
483   /* 1) get R = Pd^T,Ro = Po^T */
484   ierr = PetscTime(&t0);CHKERRQ(ierr);
485   ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
486   ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
487   ierr = PetscTime(&t1);CHKERRQ(ierr);
488   eR = t1 - t0;
489 
490   /* 2) get AP_loc */
491   AP_loc = ptap->AP_loc;
492   Mat_SeqAIJ    *ap=(Mat_SeqAIJ*)AP_loc->data;
493 
494   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
495   /*-----------------------------------------------------*/
496   if (ptap->reuse == MAT_INITIAL_MATRIX) {
497     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
498     ptap->reuse = MAT_REUSE_MATRIX;
499   } else { /* update numerical values of P_oth and P_loc */
500     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
501     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
502   }
503 
504   /* 2-2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
505   /*--------------------------------------------------------------*/
506   /* get data from symbolic products */
507   Mat_SeqAIJ  *p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
508   Mat_SeqAIJ  *p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
509   PetscInt    *api,*apj,am = A->rmap->n,j,col,apnz;
510   PetscScalar *apa = ptap->apa;
511 
512   api = ap->i;
513   apj = ap->j;
514   for (i=0; i<am; i++) {
515     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
516     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
517     apnz = api[i+1] - api[i];
518     for (j=0; j<apnz; j++) {
519       col = apj[j+api[i]];
520       ap->a[j+ap->i[i]] = apa[col];
521       apa[col] = 0.0;
522     }
523   }
524 
525   ierr = PetscTime(&t2);CHKERRQ(ierr);
526   eAP = t2 - t1;
527 
528   /* 3) C_loc = R*AP_loc, Co = Ro*AP_loc */
529   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Rd,AP_loc,MAT_REUSE_MATRIX,2.0,&ptap->C_loc);CHKERRQ(ierr);
530   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Ro,AP_loc,MAT_REUSE_MATRIX,2.0,&ptap->C_oth);CHKERRQ(ierr);
531   C_loc = ptap->C_loc;
532   C_oth = ptap->C_oth;
533   //printf("[%d] Co %d, %d\n", rank,Co->rmap->N,Co->cmap->N);
534   ierr = PetscTime(&t3);CHKERRQ(ierr);
535   eCseq = t3 - t2;
536 
537   /* add C_loc and Co to to C */
538   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
539 
540   /* C_loc -> C */
541   cm = C_loc->rmap->N;
542   Mat_SeqAIJ *c_seq;
543   c_seq = (Mat_SeqAIJ*)C_loc->data;
544   for (i=0; i<cm; i++) {
545     ncols = c_seq->i[i+1] - c_seq->i[i];
546     row = rstart + i;
547     cols = c_seq->j + c_seq->i[i];
548     vals = c_seq->a + c_seq->i[i];
549     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
550   }
551 
552   /* Co -> C, off-processor part */
553   //printf("[%d] p->B %d, %d\n",rank,p->B->rmap->N,p->B->cmap->N);
554   cm = C_oth->rmap->N;
555   c_seq = (Mat_SeqAIJ*)C_oth->data;
556   for (i=0; i<cm; i++) {
557     ncols = c_seq->i[i+1] - c_seq->i[i];
558     row = p->garray[i];
559     cols = c_seq->j + c_seq->i[i];
560     vals = c_seq->a + c_seq->i[i];
561     //printf("[%d] row[%d] = %d\n",rank,i,row);
562     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
563   }
564   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
565   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
566   ierr = PetscTime(&t4);CHKERRQ(ierr);
567   eCmpi = t4 - t3;
568 
569   if (rank==1) {
570     ierr = PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);CHKERRQ(ierr);
571   }
572   PetscFunctionReturn(0);
573 }
574 
575 #undef __FUNCT__
576 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
577 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
578 {
579   PetscErrorCode      ierr;
580   Mat                 Cmpi;
581   Mat_PtAPMPI         *ptap;
582   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
583   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
584   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
585   Mat_SeqAIJ          *p_loc,*p_oth;
586   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
587   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
588   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
589   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
590   PetscBT             lnkbt;
591   MPI_Comm            comm;
592   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
593   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
594   PetscInt            len,proc,*dnz,*onz,*owners;
595   PetscInt            nzi,*pti,*ptj;
596   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
597   MPI_Request         *swaits,*rwaits;
598   MPI_Status          *sstatus,rstatus;
599   Mat_Merge_SeqsToMPI *merge;
600   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
601   PetscReal           afill=1.0,afill_tmp;
602   PetscInt            rmax;
603 
604   PetscFunctionBegin;
605   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
606 
607   /* check if matrix local sizes are compatible */
608   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
609     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
610   }
611   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
612     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
613   }
614 
615   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
616   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
617 
618   /* create struct Mat_PtAPMPI and attached it to C later */
619   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
620   ierr        = PetscNew(&merge);CHKERRQ(ierr);
621   ptap->merge = merge;
622   ptap->reuse = MAT_INITIAL_MATRIX;
623 
624   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
625   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
626 
627   /* get P_loc by taking all local rows of P */
628   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
629 
630   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
631   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
632   pi_loc = p_loc->i; pj_loc = p_loc->j;
633   pi_oth = p_oth->i; pj_oth = p_oth->j;
634 
635   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
636   /*--------------------------------------------------------------------------*/
637   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
638   api[0] = 0;
639 
640   /* create and initialize a linked list */
641   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
642 
643   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
644   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
645 
646   current_space = free_space;
647 
648   for (i=0; i<am; i++) {
649     /* diagonal portion of A */
650     nzi = adi[i+1] - adi[i];
651     aj  = ad->j + adi[i];
652     for (j=0; j<nzi; j++) {
653       row  = aj[j];
654       pnz  = pi_loc[row+1] - pi_loc[row];
655       Jptr = pj_loc + pi_loc[row];
656       /* add non-zero cols of P into the sorted linked list lnk */
657       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
658     }
659     /* off-diagonal portion of A */
660     nzi = aoi[i+1] - aoi[i];
661     aj  = ao->j + aoi[i];
662     for (j=0; j<nzi; j++) {
663       row  = aj[j];
664       pnz  = pi_oth[row+1] - pi_oth[row];
665       Jptr = pj_oth + pi_oth[row];
666       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
667     }
668     apnz     = lnk[0];
669     api[i+1] = api[i] + apnz;
670     if (ap_rmax < apnz) ap_rmax = apnz;
671 
672     /* if free space is not available, double the total space in the list */
673     if (current_space->local_remaining<apnz) {
674       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
675       nspacedouble++;
676     }
677 
678     /* Copy data into free space, then initialize lnk */
679     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
680 
681     current_space->array           += apnz;
682     current_space->local_used      += apnz;
683     current_space->local_remaining -= apnz;
684   }
685 
686   /* Allocate space for apj, initialize apj, and */
687   /* destroy list of free space and other temporary array(s) */
688   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
689   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
690   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
691   if (afill_tmp > afill) afill = afill_tmp;
692 
693   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
694   /*-----------------------------------------------------------------*/
695   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
696 
697   /* then, compute symbolic Co = (p->B)^T*AP */
698   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
699                          >= (num of nonzero rows of C_seq) - pn */
700   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
701   coi[0] = 0;
702 
703   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
704   nnz           = fill*(poti[pon] + api[am]);
705   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
706   current_space = free_space;
707 
708   for (i=0; i<pon; i++) {
709     pnz = poti[i+1] - poti[i];
710     ptJ = potj + poti[i];
711     for (j=0; j<pnz; j++) {
712       row  = ptJ[j]; /* row of AP == col of Pot */
713       apnz = api[row+1] - api[row];
714       Jptr = apj + api[row];
715       /* add non-zero cols of AP into the sorted linked list lnk */
716       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
717     }
718     nnz = lnk[0];
719 
720     /* If free space is not available, double the total space in the list */
721     if (current_space->local_remaining<nnz) {
722       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
723       nspacedouble++;
724     }
725 
726     /* Copy data into free space, and zero out denserows */
727     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
728 
729     current_space->array           += nnz;
730     current_space->local_used      += nnz;
731     current_space->local_remaining -= nnz;
732 
733     coi[i+1] = coi[i] + nnz;
734   }
735 
736   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
737   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
738   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
739   if (afill_tmp > afill) afill = afill_tmp;
740   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
741 
742   /* (3) send j-array (coj) of Co to other processors */
743   /*--------------------------------------------------*/
744   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
745   len_s        = merge->len_s;
746   merge->nsend = 0;
747 
748 
749   /* determine row ownership */
750   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
751   merge->rowmap->n  = pn;
752   merge->rowmap->bs = 1;
753 
754   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
755   owners = merge->rowmap->range;
756 
757   /* determine the number of messages to send, their lengths */
758   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
759   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
760   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
761 
762   proc = 0;
763   for (i=0; i<pon; i++) {
764     while (prmap[i] >= owners[proc+1]) proc++;
765     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
766     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
767   }
768 
769   len          = 0; /* max length of buf_si[], see (4) */
770   owners_co[0] = 0;
771   for (proc=0; proc<size; proc++) {
772     owners_co[proc+1] = owners_co[proc] + len_si[proc];
773     if (len_s[proc]) {
774       merge->nsend++;
775       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
776       len         += len_si[proc];
777     }
778   }
779 
780   /* determine the number and length of messages to receive for coi and coj  */
781   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
782   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
783 
784   /* post the Irecv and Isend of coj */
785   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
786   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
787   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
788   for (proc=0, k=0; proc<size; proc++) {
789     if (!len_s[proc]) continue;
790     i    = owners_co[proc];
791     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
792     k++;
793   }
794 
795   /* receives and sends of coj are complete */
796   for (i=0; i<merge->nrecv; i++) {
797     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
798   }
799   ierr = PetscFree(rwaits);CHKERRQ(ierr);
800   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
801 
802   /* (4) send and recv coi */
803   /*-----------------------*/
804   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
805   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
806   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
807   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
808   for (proc=0,k=0; proc<size; proc++) {
809     if (!len_s[proc]) continue;
810     /* form outgoing message for i-structure:
811          buf_si[0]:                 nrows to be sent
812                [1:nrows]:           row index (global)
813                [nrows+1:2*nrows+1]: i-structure index
814     */
815     /*-------------------------------------------*/
816     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
817     buf_si_i    = buf_si + nrows+1;
818     buf_si[0]   = nrows;
819     buf_si_i[0] = 0;
820     nrows       = 0;
821     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
822       nzi = coi[i+1] - coi[i];
823       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
824       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
825       nrows++;
826     }
827     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
828     k++;
829     buf_si += len_si[proc];
830   }
831   i = merge->nrecv;
832   while (i--) {
833     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
834   }
835   ierr = PetscFree(rwaits);CHKERRQ(ierr);
836   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
837 
838   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
839   ierr = PetscFree(len_ri);CHKERRQ(ierr);
840   ierr = PetscFree(swaits);CHKERRQ(ierr);
841   ierr = PetscFree(buf_s);CHKERRQ(ierr);
842 
843   /* (5) compute the local portion of C (mpi mat) */
844   /*----------------------------------------------*/
845   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
846 
847   /* allocate pti array and free space for accumulating nonzero column info */
848   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
849   pti[0] = 0;
850 
851   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
852   nnz           = fill*(pi_loc[pm] + api[am]);
853   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
854   current_space = free_space;
855 
856   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
857   for (k=0; k<merge->nrecv; k++) {
858     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
859     nrows       = *buf_ri_k[k];
860     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
861     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
862   }
863   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
864   rmax = 0;
865   for (i=0; i<pn; i++) {
866     /* add pdt[i,:]*AP into lnk */
867     pnz = pdti[i+1] - pdti[i];
868     ptJ = pdtj + pdti[i];
869     for (j=0; j<pnz; j++) {
870       row  = ptJ[j];  /* row of AP == col of Pt */
871       apnz = api[row+1] - api[row];
872       Jptr = apj + api[row];
873       /* add non-zero cols of AP into the sorted linked list lnk */
874       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
875     }
876 
877     /* add received col data into lnk */
878     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
879       if (i == *nextrow[k]) { /* i-th row */
880         nzi  = *(nextci[k]+1) - *nextci[k];
881         Jptr = buf_rj[k] + *nextci[k];
882         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
883         nextrow[k]++; nextci[k]++;
884       }
885     }
886     nnz = lnk[0];
887 
888     /* if free space is not available, make more free space */
889     if (current_space->local_remaining<nnz) {
890       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
891       nspacedouble++;
892     }
893     /* copy data into free space, then initialize lnk */
894     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
895     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
896 
897     current_space->array           += nnz;
898     current_space->local_used      += nnz;
899     current_space->local_remaining -= nnz;
900 
901     pti[i+1] = pti[i] + nnz;
902     if (nnz > rmax) rmax = nnz;
903   }
904   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
905   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
906 
907   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
908   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
909   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
910   if (afill_tmp > afill) afill = afill_tmp;
911   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
912 
913   /* (6) create symbolic parallel matrix Cmpi */
914   /*------------------------------------------*/
915   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
916   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
917   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
918   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
919   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
920   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
921 
922   merge->bi        = pti;      /* Cseq->i */
923   merge->bj        = ptj;      /* Cseq->j */
924   merge->coi       = coi;      /* Co->i   */
925   merge->coj       = coj;      /* Co->j   */
926   merge->buf_ri    = buf_ri;
927   merge->buf_rj    = buf_rj;
928   merge->owners_co = owners_co;
929   merge->destroy   = Cmpi->ops->destroy;
930   merge->duplicate = Cmpi->ops->duplicate;
931 
932   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
933   Cmpi->assembled      = PETSC_FALSE;
934   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
935   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
936 
937   /* attach the supporting struct to Cmpi for reuse */
938   c           = (Mat_MPIAIJ*)Cmpi->data;
939   c->ptap     = ptap;
940   ptap->api   = api;
941   ptap->apj   = apj;
942   ptap->rmax  = ap_rmax;
943   *C          = Cmpi;
944 
945   /* flag 'scalable' determines which implementations to be used:
946        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
947        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
948   /* set default scalable */
949   ptap->scalable = PETSC_FALSE; //PETSC_TRUE;
950 
951   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
952   if (!ptap->scalable) {  /* Do dense axpy */
953     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
954   } else {
955     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
956   }
957 
958 #if defined(PETSC_USE_INFO)
959   if (pti[pn] != 0) {
960     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
961     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
962   } else {
963     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
964   }
965 #endif
966   PetscFunctionReturn(0);
967 }
968 
969 #undef __FUNCT__
970 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
971 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
972 {
973   PetscErrorCode      ierr;
974   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
975   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
976   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
977   Mat_SeqAIJ          *p_loc,*p_oth;
978   Mat_PtAPMPI         *ptap;
979   Mat_Merge_SeqsToMPI *merge;
980   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
981   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
982   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
983   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
984   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
985   MPI_Comm            comm;
986   PetscMPIInt         size,rank,taga,*len_s;
987   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
988   PetscInt            **buf_ri,**buf_rj;
989   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
990   MPI_Request         *s_waits,*r_waits;
991   MPI_Status          *status;
992   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
993   PetscInt            *api,*apj,*coi,*coj;
994   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
995   PetscBool           scalable;
996 #if defined(PTAP_PROFILE)
997   PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
998 #endif
999 
1000   PetscFunctionBegin;
1001   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1002 #if defined(PTAP_PROFILE)
1003   ierr = PetscTime(&t0);CHKERRQ(ierr);
1004 #endif
1005   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1006   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1007 
1008   ptap = c->ptap;
1009   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
1010   merge    = ptap->merge;
1011   apa      = ptap->apa;
1012   scalable = ptap->scalable;
1013 
1014   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1015   /*-----------------------------------------------------*/
1016   if (ptap->reuse == MAT_INITIAL_MATRIX) {
1017     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1018     ptap->reuse = MAT_REUSE_MATRIX;
1019   } else { /* update numerical values of P_oth and P_loc */
1020     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1021     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1022   }
1023 #if defined(PTAP_PROFILE)
1024   ierr = PetscTime(&t1);CHKERRQ(ierr);
1025   eP = t1-t0;
1026 #endif
1027   /*
1028   printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1029          a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1030          ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1031          ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1032    */
1033 
1034   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1035   /*--------------------------------------------------------------*/
1036   /* get data from symbolic products */
1037   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1038   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1039   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
1040   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1041 
1042   coi  = merge->coi; coj = merge->coj;
1043   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
1044 
1045   bi     = merge->bi; bj = merge->bj;
1046   owners = merge->rowmap->range;
1047   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
1048 
1049   api = ptap->api; apj = ptap->apj;
1050 
1051   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1052     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
1053 #if 0
1054     /* ------ 10x slower -------------- */
1055     /*==================================*/
1056     Mat         R = ptap->R;
1057     Mat_SeqAIJ  *r = (Mat_SeqAIJ*)R->data;
1058     PetscInt    *ri=r->i,*rj=r->j,rnz,arow,l,prow,pcol,pN=P->cmap->N;
1059     PetscScalar *ra=r->a,tmp,cdense[pN];
1060 
1061     ierr = PetscMemzero(cdense,pN*sizeof(PetscScalar));CHKERRQ(ierr);
1062     for (i=0; i<cm; i++) { /* each row of C or R */
1063       rnz = ri[i+1] - ri[i];
1064 
1065       for (j=0; j<rnz; j++) { /* each nz of R */
1066         arow = rj[ri[i] + j];
1067 
1068         /* diagonal portion of A */
1069         anz  = ad->i[arow+1] - ad->i[arow];
1070         for (k=0; k<anz; k++) { /* each nz of Ad */
1071           tmp  = ra[ri[i] + j]*ad->a[ad->i[arow] + k];
1072           prow = ad->j[ad->i[arow] + k];
1073           pnz  = pi_loc[prow+1] - pi_loc[prow];
1074 
1075           for (l=0; l<pnz; l++) { /* each nz of P_loc */
1076             pcol = pj_loc[pi_loc[prow] + l];
1077             cdense[pcol] += tmp*pa_loc[pi_loc[prow] + l];
1078           }
1079         }
1080 
1081         /* off-diagonal portion of A */
1082         anz  = ao->i[arow+1] - ao->i[arow];
1083         for (k=0; k<anz; k++) { /* each nz of Ao */
1084           tmp  = ra[ri[i] + j]*ao->a[ao->i[arow] + k];
1085           prow = ao->j[ao->i[arow] + k];
1086           pnz  = pi_oth[prow+1] - pi_oth[prow];
1087 
1088           for (l=0; l<pnz; l++) { /* each nz of P_oth */
1089             pcol = pj_oth[pi_oth[prow] + l];
1090             cdense[pcol] += tmp*pa_oth[pi_oth[prow] + l];
1091           }
1092         }
1093 
1094       } //for (j=0; j<rnz; j++)
1095 
1096       /* copy cdense[] into ca; zero cdense[] */
1097       cnz = bi[i+1] - bi[i];
1098       cj  = bj + bi[i];
1099       ca  = ba + bi[i];
1100       for (j=0; j<cnz; j++) {
1101         ca[j] += cdense[cj[j]];
1102         cdense[cj[j]] = 0.0;
1103       }
1104 #if 0
1105       if (rank == 0) {
1106         printf("[%d] row %d: ",rank,i);
1107         for (j=0; j<pN; j++) printf(" %g,",cdense[j]);
1108         printf("\n");
1109       }
1110       for (j=0; j<pN; j++) cdense[j]=0.0; // zero cdnese[]
1111 #endif
1112     } //for (i=0; i<cm; i++) {
1113 #endif
1114 
1115     //==========================================
1116 
1117     ierr = PetscTime(&t1);CHKERRQ(ierr);
1118     for (i=0; i<am; i++) {
1119 #if defined(PTAP_PROFILE)
1120       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1121 #endif
1122       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1123       /*------------------------------------------------------------*/
1124       apJ = apj + api[i];
1125 
1126       /* diagonal portion of A */
1127       anz = adi[i+1] - adi[i];
1128       adj = ad->j + adi[i];
1129       ada = ad->a + adi[i];
1130       for (j=0; j<anz; j++) {
1131         row = adj[j];
1132         pnz = pi_loc[row+1] - pi_loc[row];
1133         pj  = pj_loc + pi_loc[row];
1134         pa  = pa_loc + pi_loc[row];
1135 
1136         /* perform dense axpy */
1137         valtmp = ada[j];
1138         for (k=0; k<pnz; k++) {
1139           apa[pj[k]] += valtmp*pa[k];
1140         }
1141         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1142       }
1143 
1144       /* off-diagonal portion of A */
1145       anz = aoi[i+1] - aoi[i];
1146       aoj = ao->j + aoi[i];
1147       aoa = ao->a + aoi[i];
1148       for (j=0; j<anz; j++) {
1149         row = aoj[j];
1150         pnz = pi_oth[row+1] - pi_oth[row];
1151         pj  = pj_oth + pi_oth[row];
1152         pa  = pa_oth + pi_oth[row];
1153 
1154         /* perform dense axpy */
1155         valtmp = aoa[j];
1156         for (k=0; k<pnz; k++) {
1157           apa[pj[k]] += valtmp*pa[k];
1158         }
1159         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1160       }
1161 #if defined(PTAP_PROFILE)
1162       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1163       et2_AP += t2_1 - t2_0;
1164 #endif
1165 
1166       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1167       /*--------------------------------------------------------------*/
1168       apnz = api[i+1] - api[i];
1169       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1170       pnz = po->i[i+1] - po->i[i];
1171       poJ = po->j + po->i[i];
1172       pA  = po->a + po->i[i];
1173       for (j=0; j<pnz; j++) {
1174         row = poJ[j];
1175         cnz = coi[row+1] - coi[row];
1176         cj  = coj + coi[row];
1177         ca  = coa + coi[row];
1178         /* perform dense axpy */
1179         valtmp = pA[j];
1180         for (k=0; k<cnz; k++) {
1181           ca[k] += valtmp*apa[cj[k]];
1182         }
1183         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1184       }
1185 #if 1
1186       /* put the value into Cd (diagonal part) */
1187       pnz = pd->i[i+1] - pd->i[i];
1188       pdJ = pd->j + pd->i[i];
1189       pA  = pd->a + pd->i[i];
1190       for (j=0; j<pnz; j++) {
1191         row = pdJ[j];
1192         cnz = bi[row+1] - bi[row];
1193         cj  = bj + bi[row];
1194         ca  = ba + bi[row];
1195         /* perform dense axpy */
1196         valtmp = pA[j];
1197         for (k=0; k<cnz; k++) {
1198           ca[k] += valtmp*apa[cj[k]];
1199         }
1200         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1201       }
1202 #endif
1203       /* zero the current row of A*P */
1204       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1205 #if defined(PTAP_PROFILE)
1206       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1207       ePtAP += t2_2 - t2_1;
1208 #endif
1209     }
1210 
1211     if (rank == 100) {
1212     for (row=0; row<cm; row++) {
1213       printf("[%d] row %d: ",rank,row);
1214       cnz = bi[row+1] - bi[row];
1215       for (j=0; j<cnz; j++) printf(" %g,",ba[bi[row]+j]);
1216       printf("\n");
1217     }
1218     }
1219 
1220   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1221     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
1222     /*-----------------------------------------------------------------------------------------*/
1223     pA=pa_loc;
1224     for (i=0; i<am; i++) {
1225 #if defined(PTAP_PROFILE)
1226       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1227 #endif
1228       /* form i-th sparse row of A*P */
1229       apnz = api[i+1] - api[i];
1230       apJ  = apj + api[i];
1231       /* diagonal portion of A */
1232       anz = adi[i+1] - adi[i];
1233       adj = ad->j + adi[i];
1234       ada = ad->a + adi[i];
1235       for (j=0; j<anz; j++) {
1236         row    = adj[j];
1237         pnz    = pi_loc[row+1] - pi_loc[row];
1238         pj     = pj_loc + pi_loc[row];
1239         pa     = pa_loc + pi_loc[row];
1240         valtmp = ada[j];
1241         nextp  = 0;
1242         for (k=0; nextp<pnz; k++) {
1243           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1244             apa[k] += valtmp*pa[nextp++];
1245           }
1246         }
1247         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1248       }
1249       /* off-diagonal portion of A */
1250       anz = aoi[i+1] - aoi[i];
1251       aoj = ao->j + aoi[i];
1252       aoa = ao->a + aoi[i];
1253       for (j=0; j<anz; j++) {
1254         row    = aoj[j];
1255         pnz    = pi_oth[row+1] - pi_oth[row];
1256         pj     = pj_oth + pi_oth[row];
1257         pa     = pa_oth + pi_oth[row];
1258         valtmp = aoa[j];
1259         nextp  = 0;
1260         for (k=0; nextp<pnz; k++) {
1261           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1262             apa[k] += valtmp*pa[nextp++];
1263           }
1264         }
1265         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1266       }
1267 #if defined(PTAP_PROFILE)
1268       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1269       et2_AP += t2_1 - t2_0;
1270 #endif
1271 
1272       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1273       /*--------------------------------------------------------------*/
1274       pnz = pi_loc[i+1] - pi_loc[i];
1275       pJ  = pj_loc + pi_loc[i];
1276       for (j=0; j<pnz; j++) {
1277         nextap = 0;
1278         row    = pJ[j]; /* global index */
1279         if (row < pcstart || row >=pcend) { /* put the value into Co */
1280           row = *poJ;
1281           cj  = coj + coi[row];
1282           ca  = coa + coi[row]; poJ++;
1283         } else {                            /* put the value into Cd */
1284           row = *pdJ;
1285           cj  = bj + bi[row];
1286           ca  = ba + bi[row]; pdJ++;
1287         }
1288         valtmp = pA[j];
1289         for (k=0; nextap<apnz; k++) {
1290           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1291         }
1292         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1293       }
1294       pA += pnz;
1295       /* zero the current row info for A*P */
1296       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
1297 #if defined(PTAP_PROFILE)
1298       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1299       ePtAP += t2_2 - t2_1;
1300 #endif
1301     }
1302   }
1303 #if defined(PTAP_PROFILE)
1304   ierr = PetscTime(&t2);CHKERRQ(ierr);
1305 #endif
1306 
1307   /* 3) send and recv matrix values coa */
1308   /*------------------------------------*/
1309   buf_ri = merge->buf_ri;
1310   buf_rj = merge->buf_rj;
1311   len_s  = merge->len_s;
1312   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1313   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1314 
1315   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
1316   for (proc=0,k=0; proc<size; proc++) {
1317     if (!len_s[proc]) continue;
1318     i    = merge->owners_co[proc];
1319     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1320     k++;
1321   }
1322   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1323   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1324 
1325   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1326   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1327   ierr = PetscFree(coa);CHKERRQ(ierr);
1328 #if defined(PTAP_PROFILE)
1329   ierr = PetscTime(&t3);CHKERRQ(ierr);
1330 #endif
1331 
1332   /* 4) insert local Cseq and received values into Cmpi */
1333   /*------------------------------------------------------*/
1334   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
1335   for (k=0; k<merge->nrecv; k++) {
1336     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1337     nrows       = *(buf_ri_k[k]);
1338     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1339     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1340   }
1341 
1342   for (i=0; i<cm; i++) {
1343     row  = owners[rank] + i; /* global row index of C_seq */
1344     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1345     ba_i = ba + bi[i];
1346     bnz  = bi[i+1] - bi[i];
1347     /* add received vals into ba */
1348     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1349       /* i-th row */
1350       if (i == *nextrow[k]) {
1351         cnz    = *(nextci[k]+1) - *nextci[k];
1352         cj     = buf_rj[k] + *(nextci[k]);
1353         ca     = abuf_r[k] + *(nextci[k]);
1354         nextcj = 0;
1355         for (j=0; nextcj<cnz; j++) {
1356           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1357             ba_i[j] += ca[nextcj++];
1358           }
1359         }
1360         nextrow[k]++; nextci[k]++;
1361         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1362       }
1363     }
1364     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1365   }
1366   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1367   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1368 
1369   ierr = PetscFree(ba);CHKERRQ(ierr);
1370   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1371   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1372   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1373 #if defined(PTAP_PROFILE)
1374   ierr = PetscTime(&t4);CHKERRQ(ierr);
1375   if (rank==1) {
1376     ierr = PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
1377   }
1378 #endif
1379   PetscFunctionReturn(0);
1380 }
1381