xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 8cb82516f81bfd980fd30024070904b3c8d24cd3)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 #define PTAP_PROFILE
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32 
33     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
34     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
35 
36     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
37     ierr = PetscFree2(ptap->apj,ptap->apv);CHKERRQ(ierr);
38     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
39     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
40     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
41 
42     if (ptap->api) {ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
43     if (ptap->apj) {ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
44     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
45     if (merge) {
46       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
47       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
48       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
49       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
50       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
51       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
52       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
53       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
54       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
55       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
56       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
57       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
58       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
59       ierr = merge->destroy(A);CHKERRQ(ierr);
60       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
61     } else {
62       ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
63     }
64     ierr = PetscFree(ptap);CHKERRQ(ierr);
65   }
66   PetscFunctionReturn(0);
67 }
68 
69 #undef __FUNCT__
70 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
71 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
72 {
73   PetscErrorCode      ierr;
74   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
75   Mat_PtAPMPI         *ptap  = a->ptap;
76   Mat_Merge_SeqsToMPI *merge = ptap->merge;
77 
78   PetscFunctionBegin;
79   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
80 
81   (*M)->ops->destroy   = merge->destroy;
82   (*M)->ops->duplicate = merge->duplicate;
83   PetscFunctionReturn(0);
84 }
85 
86 #undef __FUNCT__
87 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
88 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
89 {
90   PetscErrorCode ierr;
91   PetscBool      newalg=PETSC_FALSE;
92   MPI_Comm       comm;
93 
94   PetscFunctionBegin;
95   /* check if matrix local sizes are compatible */
96   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
97   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
98     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
99   }
100   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
101     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
102   }
103 
104   ierr = PetscOptionsGetBool(NULL,"-matptap_new",&newalg,NULL);CHKERRQ(ierr);
105   if (scall == MAT_INITIAL_MATRIX) {
106     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
107     if (newalg) {
108       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(A,P,fill,C);CHKERRQ(ierr);
109     } else {
110       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
111     }
112     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
113   }
114   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
115   if (newalg) {
116     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ_new(A,P,*C);CHKERRQ(ierr);
117   } else {
118     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
119   }
120   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
121   PetscFunctionReturn(0);
122 }
123 
124 /* requires array of size pN, thus nonscalable version */
125 #undef __FUNCT__
126 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ_new"
127 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(Mat A,Mat P,PetscReal fill,Mat *C)
128 {
129   PetscErrorCode ierr;
130   Mat_PtAPMPI    *ptap;
131   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
132   Mat_MPIAIJ     *c;
133   MPI_Comm       comm;
134   PetscMPIInt    size,rank;
135 #if defined(PTAP_PROFILE)
136   PetscLogDouble      t0,t1,t11,t12,t2,t3,t4;
137 #endif
138   Mat                 Cmpi;
139   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
140   PetscInt            am=A->rmap->n,pm=P->rmap->n;
141   PetscInt            *lnk,i,k,pnz,row,nnz;
142   PetscInt            pN=P->cmap->N,pn=P->cmap->n;
143   PetscBT             lnkbt;
144   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
145   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
146   PetscInt            len,proc,*dnz,*onz,*owners;
147   PetscInt            nzi,nspacedouble;
148   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
149   MPI_Request         *swaits,*rwaits;
150   MPI_Status          *sstatus,rstatus;
151   PetscLayout         rowmap;
152   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
153   PetscInt            nsend,nrecv;
154   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
155   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0;
156   PetscInt            rmax,*aj,*ai,*pi;
157   Mat_SeqAIJ          *p_loc,*p_oth,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*c_loc,*c_oth;
158   PetscScalar         *apv;
159   PetscReal           apfill;
160 
161   PetscFunctionBegin;
162   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
163   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
164   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
165 #if defined(PTAP_PROFILE)
166   ierr = PetscTime(&t0);CHKERRQ(ierr);
167 #endif
168 
169   /* create struct Mat_PtAPMPI and attached it to C later */
170   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
171   ptap->reuse = MAT_INITIAL_MATRIX;
172 
173   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
174   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
175   /* get P_loc by taking all local rows of P */
176   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
177 
178   /* (0) compute Rd = Pd^T, Ro = Po^T  */
179   /* --------------------------------- */
180   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
181   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
182 #if defined(PTAP_PROFILE)
183   ierr = PetscTime(&t11);CHKERRQ(ierr);
184 #endif
185 
186   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
187   /* ----------------------------------------------------------------- */
188   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
189   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
190 
191   /* create and initialize a linked list - nonscalable version */
192   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
193 
194   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
195   ierr = PetscFreeSpaceGet((PetscInt)(fill*(ad->i[am]+ao->i[am]+p_loc->i[pm])),&free_space);CHKERRQ(ierr);
196   current_space = free_space;
197   nspacedouble  = 0;
198 
199   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
200   api[0] = 0;
201   for (i=0; i<am; i++) {
202     /* diagonal portion: Ad[i,:]*P */
203     ai = ad->i; pi = p_loc->i;
204     nzi = ai[i+1] - ai[i];
205     aj  = ad->j + ai[i];
206     for (j=0; j<nzi; j++) {
207       row  = aj[j];
208       pnz  = pi[row+1] - pi[row];
209       Jptr = p_loc->j + pi[row];
210       /* add non-zero cols of P into the sorted linked list lnk */
211       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
212     }
213     /* off-diagonal portion: Ao[i,:]*P */
214     ai = ao->i; pi = p_oth->i;
215     nzi = ai[i+1] - ai[i];
216     aj  = ao->j + ai[i];
217     for (j=0; j<nzi; j++) {
218       row  = aj[j];
219       pnz  = pi[row+1] - pi[row];
220       Jptr = p_oth->j + pi[row];
221       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
222     }
223     apnz     = lnk[0];
224     api[i+1] = api[i] + apnz;
225     if (ap_rmax < apnz) ap_rmax = apnz;
226 
227     /* if free space is not available, double the total space in the list */
228     if (current_space->local_remaining<apnz) {
229       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
230       nspacedouble++;
231     }
232 
233     /* Copy data into free space, then initialize lnk */
234     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
235 
236     current_space->array           += apnz;
237     current_space->local_used      += apnz;
238     current_space->local_remaining -= apnz;
239   }
240   /* Allocate space for apj, initialize apj, and */
241   /* destroy list of free space and other temporary array(s) */
242   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
243   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
244   apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
245 
246   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
247 #if defined(PTAP_PROFILE)
248   ierr = PetscTime(&t12);CHKERRQ(ierr);
249 #endif
250 
251   /* (2) compute symbolic C_loc = Rd*AP_loc, Co = Ro*AP_loc  */
252   /* ------------------------------------------------------- */
253   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
254   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
255   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
256   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
257 #if defined(PTAP_PROFILE)
258   ierr = PetscTime(&t1);CHKERRQ(ierr);
259 #endif
260 
261   /* (3) send coj of C_oth to other processors  */
262   /* ------------------------------------------ */
263   /* determine row ownership */
264   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
265   rowmap->n  = pn;
266   rowmap->bs = 1;
267   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
268   owners = rowmap->range;
269 
270   /* determine the number of messages to send, their lengths */
271   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
272   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
273   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
274 
275   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
276   coi   = c_oth->i; coj = c_oth->j;
277   con   = ptap->C_oth->rmap->n;
278   proc  = 0;
279   for (i=0; i<con; i++) {
280     while (prmap[i] >= owners[proc+1]) proc++;
281     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
282     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
283   }
284 
285   len          = 0; /* max length of buf_si[], see (4) */
286   owners_co[0] = 0;
287   nsend        = 0;
288   for (proc=0; proc<size; proc++) {
289     owners_co[proc+1] = owners_co[proc] + len_si[proc];
290     if (len_s[proc]) {
291       nsend++;
292       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
293       len         += len_si[proc];
294     }
295   }
296 
297   /* determine the number and length of messages to receive for coi and coj  */
298   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
299   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
300 
301   /* post the Irecv and Isend of coj */
302   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
303   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
304   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
305   for (proc=0, k=0; proc<size; proc++) {
306     if (!len_s[proc]) continue;
307     i    = owners_co[proc];
308     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
309     k++;
310   }
311 
312   /* receives and sends of coj are complete */
313   for (i=0; i<nrecv; i++) {
314     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
315   }
316   ierr = PetscFree(rwaits);CHKERRQ(ierr);
317   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
318 
319   /* (4) send and recv coi */
320   /*-----------------------*/
321   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
322   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
323   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
324   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
325   for (proc=0,k=0; proc<size; proc++) {
326     if (!len_s[proc]) continue;
327     /* form outgoing message for i-structure:
328          buf_si[0]:                 nrows to be sent
329                [1:nrows]:           row index (global)
330                [nrows+1:2*nrows+1]: i-structure index
331     */
332     /*-------------------------------------------*/
333     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
334     buf_si_i    = buf_si + nrows+1;
335     buf_si[0]   = nrows;
336     buf_si_i[0] = 0;
337     nrows       = 0;
338     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
339       nzi = coi[i+1] - coi[i];
340       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
341       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
342       nrows++;
343     }
344     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
345     k++;
346     buf_si += len_si[proc];
347   }
348   i = nrecv;
349   while (i--) {
350     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
351   }
352   ierr = PetscFree(rwaits);CHKERRQ(ierr);
353   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
354 
355   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
356   ierr = PetscFree(len_ri);CHKERRQ(ierr);
357   ierr = PetscFree(swaits);CHKERRQ(ierr);
358   ierr = PetscFree(buf_s);CHKERRQ(ierr);
359 #if defined(PTAP_PROFILE)
360   ierr = PetscTime(&t2);CHKERRQ(ierr);
361 #endif
362   /* (5) compute the local portion of Cmpi      */
363   /* ------------------------------------------ */
364   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
365   ierr          = PetscFreeSpaceGet(pN,&free_space);CHKERRQ(ierr); /* non-scalable version */
366   current_space = free_space;
367 
368   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
369   for (k=0; k<nrecv; k++) {
370     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
371     nrows       = *buf_ri_k[k];
372     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
373     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
374   }
375   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
376 
377   rmax = 0;
378   for (i=0; i<pn; i++) {
379     /* add C_loc into Cmpi */
380     nzi  = c_loc->i[i+1] - c_loc->i[i];
381     Jptr = c_loc->j + c_loc->i[i];
382     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
383 
384     /* add received col data into lnk */
385     for (k=0; k<nrecv; k++) { /* k-th received message */
386       if (i == *nextrow[k]) { /* i-th row */
387         nzi  = *(nextci[k]+1) - *nextci[k];
388         Jptr = buf_rj[k] + *nextci[k];
389         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
390         nextrow[k]++; nextci[k]++;
391       }
392     }
393     nnz = lnk[0];
394 
395     /* copy data into free space, then initialize lnk */
396     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
397     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
398     if (nnz > rmax) rmax = nnz;
399   }
400   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
401   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
402   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
403 #if defined(PTAP_PROFILE)
404   ierr = PetscTime(&t3);CHKERRQ(ierr);
405 #endif
406 
407   /* (6) create symbolic parallel matrix Cmpi */
408   /*------------------------------------------*/
409   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
410   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
411   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
412   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
413   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
414   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
415 
416   /* members in merge */
417   ierr = PetscFree(id_r);CHKERRQ(ierr);
418   ierr = PetscFree(len_r);CHKERRQ(ierr);
419   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
420   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
421   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
422   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
423   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
424 
425   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
426   Cmpi->assembled        = PETSC_FALSE;
427   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
428   Cmpi->ops->duplicate   = 0; //MatDuplicate_MPIAIJ_MatPtAP; //not done yet
429   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_new;
430 
431   /* attach the supporting struct to Cmpi for reuse */
432   c           = (Mat_MPIAIJ*)Cmpi->data;
433   c->ptap     = ptap;
434   ptap->api   = api;
435   ptap->apj   = apj;
436   ptap->apv   = apv;
437   ptap->rmax  = ap_rmax;
438   *C          = Cmpi;
439 
440   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ_new() */
441   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
442 
443 #if defined(PTAP_PROFILE)
444   ierr = PetscTime(&t4);CHKERRQ(ierr);
445   if (rank == 1) {
446     printf("PtAPSym: %g + %g + %g + %g + %g + %g = %g\n",t11-t0,t1-t11,t12-t11,t2-t2,t3-t2,t4-t3,t4-t0);
447   }
448 #endif
449   PetscFunctionReturn(0);
450 }
451 
452 #undef __FUNCT__
453 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ_new"
454 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_new(Mat A,Mat P,Mat C)
455 {
456   PetscErrorCode    ierr;
457   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
458   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
459   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
460   Mat_PtAPMPI       *ptap = c->ptap;
461   Mat               AP_loc,C_loc,C_oth;
462   PetscInt          i,rstart,rend,cm,ncols,row;
463   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
464   PetscScalar       *apa;
465   const PetscInt    *cols;
466   const PetscScalar *vals;
467 #if defined(PTAP_PROFILE)
468   PetscMPIInt       rank;
469   MPI_Comm          comm;
470   PetscLogDouble    t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
471 #endif
472 
473   PetscFunctionBegin;
474   ierr = MatZeroEntries(C);CHKERRQ(ierr);
475 
476   /* 1) get R = Pd^T,Ro = Po^T */
477 #if defined(PTAP_PROFILE)
478   ierr = PetscTime(&t0);CHKERRQ(ierr);
479 #endif
480   if (ptap->reuse == MAT_REUSE_MATRIX) {
481     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
482     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
483   }
484 #if defined(PTAP_PROFILE)
485   ierr = PetscTime(&t1);CHKERRQ(ierr);
486   eR = t1 - t0;
487 #endif
488 
489   /* 2) get AP_loc */
490   AP_loc = ptap->AP_loc;
491   ap = (Mat_SeqAIJ*)AP_loc->data;
492 
493   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
494   /*-----------------------------------------------------*/
495   if (ptap->reuse == MAT_REUSE_MATRIX) {
496     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
497     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
498     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
499   }
500 
501   /* 2-2) compute numeric A_loc*P - dominating part */
502   /* ---------------------------------------------- */
503   /* get data from symbolic products */
504   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
505   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
506   apa   = ptap->apa;
507   api = ptap->api;
508   apj = ptap->apj;
509   for (i=0; i<am; i++) {
510     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
511     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
512     apnz = api[i+1] - api[i];
513     for (j=0; j<apnz; j++) {
514       col = apj[j+api[i]];
515       ap->a[j+ap->i[i]] = apa[col];
516       apa[col] = 0.0;
517     }
518   }
519 #if defined(PTAP_PROFILE)
520   ierr = PetscTime(&t2);CHKERRQ(ierr);
521   eAP = t2 - t1;
522 #endif
523 
524   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
525   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
526   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
527   C_loc = ptap->C_loc;
528   C_oth = ptap->C_oth;
529 #if defined(PTAP_PROFILE)
530   ierr = PetscTime(&t3);CHKERRQ(ierr);
531   eCseq = t3 - t2;
532 #endif
533 
534   /* add C_loc and Co to to C */
535   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
536 
537   /* C_loc -> C */
538   cm    = C_loc->rmap->N;
539   c_seq = (Mat_SeqAIJ*)C_loc->data;
540   cols = c_seq->j;
541   vals = c_seq->a;
542   for (i=0; i<cm; i++) {
543     ncols = c_seq->i[i+1] - c_seq->i[i];
544     row = rstart + i;
545     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
546     cols += ncols; vals += ncols;
547   }
548 
549   /* Co -> C, off-processor part */
550   cm = C_oth->rmap->N;
551   c_seq = (Mat_SeqAIJ*)C_oth->data;
552   cols = c_seq->j;
553   vals = c_seq->a;
554   for (i=0; i<cm; i++) {
555     ncols = c_seq->i[i+1] - c_seq->i[i];
556     row = p->garray[i];
557     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
558     cols += ncols; vals += ncols;
559   }
560   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
561   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
562 #if defined(PTAP_PROFILE)
563   ierr = PetscTime(&t4);CHKERRQ(ierr);
564   eCmpi = t4 - t3;
565 
566   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
567   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
568   if (rank==1) {
569     ierr = PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);CHKERRQ(ierr);
570   }
571 #endif
572   ptap->reuse = MAT_REUSE_MATRIX;
573   PetscFunctionReturn(0);
574 }
575 
576 #undef __FUNCT__
577 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
578 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
579 {
580   PetscErrorCode      ierr;
581   Mat                 Cmpi;
582   Mat_PtAPMPI         *ptap;
583   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
584   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
585   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
586   Mat_SeqAIJ          *p_loc,*p_oth;
587   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
588   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
589   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
590   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
591   PetscBT             lnkbt;
592   MPI_Comm            comm;
593   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
594   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
595   PetscInt            len,proc,*dnz,*onz,*owners;
596   PetscInt            nzi,*pti,*ptj;
597   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
598   MPI_Request         *swaits,*rwaits;
599   MPI_Status          *sstatus,rstatus;
600   Mat_Merge_SeqsToMPI *merge;
601   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
602   PetscReal           afill=1.0,afill_tmp;
603   PetscInt            rmax;
604 
605   PetscFunctionBegin;
606   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
607 
608   /* check if matrix local sizes are compatible */
609   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
610     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
611   }
612   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
613     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
614   }
615 
616   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
617   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
618 
619   /* create struct Mat_PtAPMPI and attached it to C later */
620   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
621   ierr        = PetscNew(&merge);CHKERRQ(ierr);
622   ptap->merge = merge;
623   ptap->reuse = MAT_INITIAL_MATRIX;
624 
625   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
626   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
627 
628   /* get P_loc by taking all local rows of P */
629   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
630 
631   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
632   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
633   pi_loc = p_loc->i; pj_loc = p_loc->j;
634   pi_oth = p_oth->i; pj_oth = p_oth->j;
635 
636   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
637   /*--------------------------------------------------------------------------*/
638   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
639   api[0] = 0;
640 
641   /* create and initialize a linked list */
642   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
643 
644   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
645   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
646   current_space = free_space;
647 
648   for (i=0; i<am; i++) {
649     /* diagonal portion of A */
650     nzi = adi[i+1] - adi[i];
651     aj  = ad->j + adi[i];
652     for (j=0; j<nzi; j++) {
653       row  = aj[j];
654       pnz  = pi_loc[row+1] - pi_loc[row];
655       Jptr = pj_loc + pi_loc[row];
656       /* add non-zero cols of P into the sorted linked list lnk */
657       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
658     }
659     /* off-diagonal portion of A */
660     nzi = aoi[i+1] - aoi[i];
661     aj  = ao->j + aoi[i];
662     for (j=0; j<nzi; j++) {
663       row  = aj[j];
664       pnz  = pi_oth[row+1] - pi_oth[row];
665       Jptr = pj_oth + pi_oth[row];
666       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
667     }
668     apnz     = lnk[0];
669     api[i+1] = api[i] + apnz;
670     if (ap_rmax < apnz) ap_rmax = apnz;
671 
672     /* if free space is not available, double the total space in the list */
673     if (current_space->local_remaining<apnz) {
674       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
675       nspacedouble++;
676     }
677 
678     /* Copy data into free space, then initialize lnk */
679     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
680 
681     current_space->array           += apnz;
682     current_space->local_used      += apnz;
683     current_space->local_remaining -= apnz;
684   }
685 
686   /* Allocate space for apj, initialize apj, and */
687   /* destroy list of free space and other temporary array(s) */
688   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
689   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
690   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
691   if (afill_tmp > afill) afill = afill_tmp;
692 
693   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
694   /*-----------------------------------------------------------------*/
695   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
696 
697   /* then, compute symbolic Co = (p->B)^T*AP */
698   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
699                          >= (num of nonzero rows of C_seq) - pn */
700   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
701   coi[0] = 0;
702 
703   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
704   nnz           = fill*(poti[pon] + api[am]);
705   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
706   current_space = free_space;
707 
708   for (i=0; i<pon; i++) {
709     pnz = poti[i+1] - poti[i];
710     ptJ = potj + poti[i];
711     for (j=0; j<pnz; j++) {
712       row  = ptJ[j]; /* row of AP == col of Pot */
713       apnz = api[row+1] - api[row];
714       Jptr = apj + api[row];
715       /* add non-zero cols of AP into the sorted linked list lnk */
716       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
717     }
718     nnz = lnk[0];
719 
720     /* If free space is not available, double the total space in the list */
721     if (current_space->local_remaining<nnz) {
722       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
723       nspacedouble++;
724     }
725 
726     /* Copy data into free space, and zero out denserows */
727     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
728 
729     current_space->array           += nnz;
730     current_space->local_used      += nnz;
731     current_space->local_remaining -= nnz;
732 
733     coi[i+1] = coi[i] + nnz;
734   }
735 
736   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
737   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
738   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
739   if (afill_tmp > afill) afill = afill_tmp;
740   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
741 
742   /* (3) send j-array (coj) of Co to other processors */
743   /*--------------------------------------------------*/
744   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
745   len_s        = merge->len_s;
746   merge->nsend = 0;
747 
748 
749   /* determine row ownership */
750   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
751   merge->rowmap->n  = pn;
752   merge->rowmap->bs = 1;
753 
754   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
755   owners = merge->rowmap->range;
756 
757   /* determine the number of messages to send, their lengths */
758   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
759   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
760   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
761 
762   proc = 0;
763   for (i=0; i<pon; i++) {
764     while (prmap[i] >= owners[proc+1]) proc++;
765     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
766     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
767   }
768 
769   len          = 0; /* max length of buf_si[], see (4) */
770   owners_co[0] = 0;
771   for (proc=0; proc<size; proc++) {
772     owners_co[proc+1] = owners_co[proc] + len_si[proc];
773     if (len_s[proc]) {
774       merge->nsend++;
775       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
776       len         += len_si[proc];
777     }
778   }
779 
780   /* determine the number and length of messages to receive for coi and coj  */
781   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
782   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
783 
784   /* post the Irecv and Isend of coj */
785   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
786   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
787   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
788   for (proc=0, k=0; proc<size; proc++) {
789     if (!len_s[proc]) continue;
790     i    = owners_co[proc];
791     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
792     k++;
793   }
794 
795   /* receives and sends of coj are complete */
796   for (i=0; i<merge->nrecv; i++) {
797     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
798   }
799   ierr = PetscFree(rwaits);CHKERRQ(ierr);
800   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
801 
802   /* (4) send and recv coi */
803   /*-----------------------*/
804   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
805   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
806   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
807   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
808   for (proc=0,k=0; proc<size; proc++) {
809     if (!len_s[proc]) continue;
810     /* form outgoing message for i-structure:
811          buf_si[0]:                 nrows to be sent
812                [1:nrows]:           row index (global)
813                [nrows+1:2*nrows+1]: i-structure index
814     */
815     /*-------------------------------------------*/
816     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
817     buf_si_i    = buf_si + nrows+1;
818     buf_si[0]   = nrows;
819     buf_si_i[0] = 0;
820     nrows       = 0;
821     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
822       nzi = coi[i+1] - coi[i];
823       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
824       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
825       nrows++;
826     }
827     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
828     k++;
829     buf_si += len_si[proc];
830   }
831   i = merge->nrecv;
832   while (i--) {
833     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
834   }
835   ierr = PetscFree(rwaits);CHKERRQ(ierr);
836   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
837 
838   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
839   ierr = PetscFree(len_ri);CHKERRQ(ierr);
840   ierr = PetscFree(swaits);CHKERRQ(ierr);
841   ierr = PetscFree(buf_s);CHKERRQ(ierr);
842 
843   /* (5) compute the local portion of C (mpi mat) */
844   /*----------------------------------------------*/
845   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
846 
847   /* allocate pti array and free space for accumulating nonzero column info */
848   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
849   pti[0] = 0;
850 
851   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
852   nnz           = fill*(pi_loc[pm] + api[am]);
853   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
854   current_space = free_space;
855 
856   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
857   for (k=0; k<merge->nrecv; k++) {
858     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
859     nrows       = *buf_ri_k[k];
860     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
861     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
862   }
863   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
864   rmax = 0;
865   for (i=0; i<pn; i++) {
866     /* add pdt[i,:]*AP into lnk */
867     pnz = pdti[i+1] - pdti[i];
868     ptJ = pdtj + pdti[i];
869     for (j=0; j<pnz; j++) {
870       row  = ptJ[j];  /* row of AP == col of Pt */
871       apnz = api[row+1] - api[row];
872       Jptr = apj + api[row];
873       /* add non-zero cols of AP into the sorted linked list lnk */
874       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
875     }
876 
877     /* add received col data into lnk */
878     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
879       if (i == *nextrow[k]) { /* i-th row */
880         nzi  = *(nextci[k]+1) - *nextci[k];
881         Jptr = buf_rj[k] + *nextci[k];
882         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
883         nextrow[k]++; nextci[k]++;
884       }
885     }
886     nnz = lnk[0];
887 
888     /* if free space is not available, make more free space */
889     if (current_space->local_remaining<nnz) {
890       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
891       nspacedouble++;
892     }
893     /* copy data into free space, then initialize lnk */
894     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
895     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
896 
897     current_space->array           += nnz;
898     current_space->local_used      += nnz;
899     current_space->local_remaining -= nnz;
900 
901     pti[i+1] = pti[i] + nnz;
902     if (nnz > rmax) rmax = nnz;
903   }
904   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
905   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
906 
907   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
908   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
909   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
910   if (afill_tmp > afill) afill = afill_tmp;
911   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
912 
913   /* (6) create symbolic parallel matrix Cmpi */
914   /*------------------------------------------*/
915   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
916   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
917   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
918   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
919   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
920   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
921 
922   merge->bi        = pti;      /* Cseq->i */
923   merge->bj        = ptj;      /* Cseq->j */
924   merge->coi       = coi;      /* Co->i   */
925   merge->coj       = coj;      /* Co->j   */
926   merge->buf_ri    = buf_ri;
927   merge->buf_rj    = buf_rj;
928   merge->owners_co = owners_co;
929   merge->destroy   = Cmpi->ops->destroy;
930   merge->duplicate = Cmpi->ops->duplicate;
931 
932   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
933   Cmpi->assembled      = PETSC_FALSE;
934   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
935   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
936 
937   /* attach the supporting struct to Cmpi for reuse */
938   c           = (Mat_MPIAIJ*)Cmpi->data;
939   c->ptap     = ptap;
940   ptap->api   = api;
941   ptap->apj   = apj;
942   ptap->rmax  = ap_rmax;
943   *C          = Cmpi;
944 
945   /* flag 'scalable' determines which implementations to be used:
946        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
947        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
948   /* set default scalable */
949   ptap->scalable = PETSC_FALSE; //PETSC_TRUE;
950 
951   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
952   if (!ptap->scalable) {  /* Do dense axpy */
953     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
954   } else {
955     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
956   }
957 
958 #if defined(PETSC_USE_INFO)
959   if (pti[pn] != 0) {
960     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
961     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
962   } else {
963     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
964   }
965 #endif
966   PetscFunctionReturn(0);
967 }
968 
969 #undef __FUNCT__
970 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
971 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
972 {
973   PetscErrorCode      ierr;
974   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
975   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
976   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
977   Mat_SeqAIJ          *p_loc,*p_oth;
978   Mat_PtAPMPI         *ptap;
979   Mat_Merge_SeqsToMPI *merge;
980   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
981   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
982   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
983   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
984   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
985   MPI_Comm            comm;
986   PetscMPIInt         size,rank,taga,*len_s;
987   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
988   PetscInt            **buf_ri,**buf_rj;
989   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
990   MPI_Request         *s_waits,*r_waits;
991   MPI_Status          *status;
992   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
993   PetscInt            *api,*apj,*coi,*coj;
994   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
995   PetscBool           scalable;
996 #if defined(PTAP_PROFILE)
997   PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
998 #endif
999 
1000   PetscFunctionBegin;
1001   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1002 #if defined(PTAP_PROFILE)
1003   ierr = PetscTime(&t0);CHKERRQ(ierr);
1004 #endif
1005   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1006   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1007 
1008   ptap = c->ptap;
1009   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
1010   merge    = ptap->merge;
1011   apa      = ptap->apa;
1012   scalable = ptap->scalable;
1013 
1014   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1015   /*-----------------------------------------------------*/
1016   if (ptap->reuse == MAT_INITIAL_MATRIX) {
1017     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1018     ptap->reuse = MAT_REUSE_MATRIX;
1019   } else { /* update numerical values of P_oth and P_loc */
1020     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1021     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1022   }
1023 #if defined(PTAP_PROFILE)
1024   ierr = PetscTime(&t1);CHKERRQ(ierr);
1025   eP = t1-t0;
1026 #endif
1027   /*
1028   printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1029          a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1030          ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1031          ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1032    */
1033 
1034   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1035   /*--------------------------------------------------------------*/
1036   /* get data from symbolic products */
1037   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1038   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1039   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
1040   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1041 
1042   coi  = merge->coi; coj = merge->coj;
1043   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
1044 
1045   bi     = merge->bi; bj = merge->bj;
1046   owners = merge->rowmap->range;
1047   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
1048 
1049   api = ptap->api; apj = ptap->apj;
1050 
1051   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1052     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
1053 #if 0
1054     /* ------ 10x slower -------------- */
1055     /*==================================*/
1056     Mat         R = ptap->R;
1057     Mat_SeqAIJ  *r = (Mat_SeqAIJ*)R->data;
1058     PetscInt    *ri=r->i,*rj=r->j,rnz,arow,l,prow,pcol,pN=P->cmap->N;
1059     PetscScalar *ra=r->a,tmp,cdense[pN];
1060 
1061     ierr = PetscMemzero(cdense,pN*sizeof(PetscScalar));CHKERRQ(ierr);
1062     for (i=0; i<cm; i++) { /* each row of C or R */
1063       rnz = ri[i+1] - ri[i];
1064 
1065       for (j=0; j<rnz; j++) { /* each nz of R */
1066         arow = rj[ri[i] + j];
1067 
1068         /* diagonal portion of A */
1069         anz  = ad->i[arow+1] - ad->i[arow];
1070         for (k=0; k<anz; k++) { /* each nz of Ad */
1071           tmp  = ra[ri[i] + j]*ad->a[ad->i[arow] + k];
1072           prow = ad->j[ad->i[arow] + k];
1073           pnz  = pi_loc[prow+1] - pi_loc[prow];
1074 
1075           for (l=0; l<pnz; l++) { /* each nz of P_loc */
1076             pcol = pj_loc[pi_loc[prow] + l];
1077             cdense[pcol] += tmp*pa_loc[pi_loc[prow] + l];
1078           }
1079         }
1080 
1081         /* off-diagonal portion of A */
1082         anz  = ao->i[arow+1] - ao->i[arow];
1083         for (k=0; k<anz; k++) { /* each nz of Ao */
1084           tmp  = ra[ri[i] + j]*ao->a[ao->i[arow] + k];
1085           prow = ao->j[ao->i[arow] + k];
1086           pnz  = pi_oth[prow+1] - pi_oth[prow];
1087 
1088           for (l=0; l<pnz; l++) { /* each nz of P_oth */
1089             pcol = pj_oth[pi_oth[prow] + l];
1090             cdense[pcol] += tmp*pa_oth[pi_oth[prow] + l];
1091           }
1092         }
1093 
1094       } //for (j=0; j<rnz; j++)
1095 
1096       /* copy cdense[] into ca; zero cdense[] */
1097       cnz = bi[i+1] - bi[i];
1098       cj  = bj + bi[i];
1099       ca  = ba + bi[i];
1100       for (j=0; j<cnz; j++) {
1101         ca[j] += cdense[cj[j]];
1102         cdense[cj[j]] = 0.0;
1103       }
1104 #if 0
1105       if (rank == 0) {
1106         printf("[%d] row %d: ",rank,i);
1107         for (j=0; j<pN; j++) printf(" %g,",cdense[j]);
1108         printf("\n");
1109       }
1110       for (j=0; j<pN; j++) cdense[j]=0.0; // zero cdnese[]
1111 #endif
1112     } //for (i=0; i<cm; i++) {
1113 #endif
1114 
1115     //==========================================
1116 #if defined(PTAP_PROFILE)
1117     ierr = PetscTime(&t1);CHKERRQ(ierr);
1118 #endif
1119     for (i=0; i<am; i++) {
1120 #if defined(PTAP_PROFILE)
1121       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1122 #endif
1123       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1124       /*------------------------------------------------------------*/
1125       apJ = apj + api[i];
1126 
1127       /* diagonal portion of A */
1128       anz = adi[i+1] - adi[i];
1129       adj = ad->j + adi[i];
1130       ada = ad->a + adi[i];
1131       for (j=0; j<anz; j++) {
1132         row = adj[j];
1133         pnz = pi_loc[row+1] - pi_loc[row];
1134         pj  = pj_loc + pi_loc[row];
1135         pa  = pa_loc + pi_loc[row];
1136 
1137         /* perform dense axpy */
1138         valtmp = ada[j];
1139         for (k=0; k<pnz; k++) {
1140           apa[pj[k]] += valtmp*pa[k];
1141         }
1142         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1143       }
1144 
1145       /* off-diagonal portion of A */
1146       anz = aoi[i+1] - aoi[i];
1147       aoj = ao->j + aoi[i];
1148       aoa = ao->a + aoi[i];
1149       for (j=0; j<anz; j++) {
1150         row = aoj[j];
1151         pnz = pi_oth[row+1] - pi_oth[row];
1152         pj  = pj_oth + pi_oth[row];
1153         pa  = pa_oth + pi_oth[row];
1154 
1155         /* perform dense axpy */
1156         valtmp = aoa[j];
1157         for (k=0; k<pnz; k++) {
1158           apa[pj[k]] += valtmp*pa[k];
1159         }
1160         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1161       }
1162 #if defined(PTAP_PROFILE)
1163       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1164       et2_AP += t2_1 - t2_0;
1165 #endif
1166 
1167       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1168       /*--------------------------------------------------------------*/
1169       apnz = api[i+1] - api[i];
1170       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1171       pnz = po->i[i+1] - po->i[i];
1172       poJ = po->j + po->i[i];
1173       pA  = po->a + po->i[i];
1174       for (j=0; j<pnz; j++) {
1175         row = poJ[j];
1176         cnz = coi[row+1] - coi[row];
1177         cj  = coj + coi[row];
1178         ca  = coa + coi[row];
1179         /* perform dense axpy */
1180         valtmp = pA[j];
1181         for (k=0; k<cnz; k++) {
1182           ca[k] += valtmp*apa[cj[k]];
1183         }
1184         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1185       }
1186 #if 1
1187       /* put the value into Cd (diagonal part) */
1188       pnz = pd->i[i+1] - pd->i[i];
1189       pdJ = pd->j + pd->i[i];
1190       pA  = pd->a + pd->i[i];
1191       for (j=0; j<pnz; j++) {
1192         row = pdJ[j];
1193         cnz = bi[row+1] - bi[row];
1194         cj  = bj + bi[row];
1195         ca  = ba + bi[row];
1196         /* perform dense axpy */
1197         valtmp = pA[j];
1198         for (k=0; k<cnz; k++) {
1199           ca[k] += valtmp*apa[cj[k]];
1200         }
1201         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1202       }
1203 #endif
1204       /* zero the current row of A*P */
1205       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1206 #if defined(PTAP_PROFILE)
1207       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1208       ePtAP += t2_2 - t2_1;
1209 #endif
1210     }
1211 
1212     if (rank == 100) {
1213     for (row=0; row<cm; row++) {
1214       printf("[%d] row %d: ",rank,row);
1215       cnz = bi[row+1] - bi[row];
1216       for (j=0; j<cnz; j++) printf(" %g,",ba[bi[row]+j]);
1217       printf("\n");
1218     }
1219     }
1220 
1221   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1222     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
1223     /*-----------------------------------------------------------------------------------------*/
1224     pA=pa_loc;
1225     for (i=0; i<am; i++) {
1226 #if defined(PTAP_PROFILE)
1227       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1228 #endif
1229       /* form i-th sparse row of A*P */
1230       apnz = api[i+1] - api[i];
1231       apJ  = apj + api[i];
1232       /* diagonal portion of A */
1233       anz = adi[i+1] - adi[i];
1234       adj = ad->j + adi[i];
1235       ada = ad->a + adi[i];
1236       for (j=0; j<anz; j++) {
1237         row    = adj[j];
1238         pnz    = pi_loc[row+1] - pi_loc[row];
1239         pj     = pj_loc + pi_loc[row];
1240         pa     = pa_loc + pi_loc[row];
1241         valtmp = ada[j];
1242         nextp  = 0;
1243         for (k=0; nextp<pnz; k++) {
1244           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1245             apa[k] += valtmp*pa[nextp++];
1246           }
1247         }
1248         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1249       }
1250       /* off-diagonal portion of A */
1251       anz = aoi[i+1] - aoi[i];
1252       aoj = ao->j + aoi[i];
1253       aoa = ao->a + aoi[i];
1254       for (j=0; j<anz; j++) {
1255         row    = aoj[j];
1256         pnz    = pi_oth[row+1] - pi_oth[row];
1257         pj     = pj_oth + pi_oth[row];
1258         pa     = pa_oth + pi_oth[row];
1259         valtmp = aoa[j];
1260         nextp  = 0;
1261         for (k=0; nextp<pnz; k++) {
1262           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1263             apa[k] += valtmp*pa[nextp++];
1264           }
1265         }
1266         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1267       }
1268 #if defined(PTAP_PROFILE)
1269       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1270       et2_AP += t2_1 - t2_0;
1271 #endif
1272 
1273       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1274       /*--------------------------------------------------------------*/
1275       pnz = pi_loc[i+1] - pi_loc[i];
1276       pJ  = pj_loc + pi_loc[i];
1277       for (j=0; j<pnz; j++) {
1278         nextap = 0;
1279         row    = pJ[j]; /* global index */
1280         if (row < pcstart || row >=pcend) { /* put the value into Co */
1281           row = *poJ;
1282           cj  = coj + coi[row];
1283           ca  = coa + coi[row]; poJ++;
1284         } else {                            /* put the value into Cd */
1285           row = *pdJ;
1286           cj  = bj + bi[row];
1287           ca  = ba + bi[row]; pdJ++;
1288         }
1289         valtmp = pA[j];
1290         for (k=0; nextap<apnz; k++) {
1291           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1292         }
1293         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1294       }
1295       pA += pnz;
1296       /* zero the current row info for A*P */
1297       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
1298 #if defined(PTAP_PROFILE)
1299       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1300       ePtAP += t2_2 - t2_1;
1301 #endif
1302     }
1303   }
1304 #if defined(PTAP_PROFILE)
1305   ierr = PetscTime(&t2);CHKERRQ(ierr);
1306 #endif
1307 
1308   /* 3) send and recv matrix values coa */
1309   /*------------------------------------*/
1310   buf_ri = merge->buf_ri;
1311   buf_rj = merge->buf_rj;
1312   len_s  = merge->len_s;
1313   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1314   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1315 
1316   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
1317   for (proc=0,k=0; proc<size; proc++) {
1318     if (!len_s[proc]) continue;
1319     i    = merge->owners_co[proc];
1320     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1321     k++;
1322   }
1323   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1324   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1325 
1326   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1327   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1328   ierr = PetscFree(coa);CHKERRQ(ierr);
1329 #if defined(PTAP_PROFILE)
1330   ierr = PetscTime(&t3);CHKERRQ(ierr);
1331 #endif
1332 
1333   /* 4) insert local Cseq and received values into Cmpi */
1334   /*------------------------------------------------------*/
1335   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
1336   for (k=0; k<merge->nrecv; k++) {
1337     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1338     nrows       = *(buf_ri_k[k]);
1339     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1340     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1341   }
1342 
1343   for (i=0; i<cm; i++) {
1344     row  = owners[rank] + i; /* global row index of C_seq */
1345     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1346     ba_i = ba + bi[i];
1347     bnz  = bi[i+1] - bi[i];
1348     /* add received vals into ba */
1349     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1350       /* i-th row */
1351       if (i == *nextrow[k]) {
1352         cnz    = *(nextci[k]+1) - *nextci[k];
1353         cj     = buf_rj[k] + *(nextci[k]);
1354         ca     = abuf_r[k] + *(nextci[k]);
1355         nextcj = 0;
1356         for (j=0; nextcj<cnz; j++) {
1357           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1358             ba_i[j] += ca[nextcj++];
1359           }
1360         }
1361         nextrow[k]++; nextci[k]++;
1362         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1363       }
1364     }
1365     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1366   }
1367   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1368   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1369 
1370   ierr = PetscFree(ba);CHKERRQ(ierr);
1371   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1372   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1373   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1374 #if defined(PTAP_PROFILE)
1375   ierr = PetscTime(&t4);CHKERRQ(ierr);
1376   if (rank==1) {
1377     ierr = PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
1378   }
1379 #endif
1380   PetscFunctionReturn(0);
1381 }
1382