xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 9a6dcab793401456f60aaffcc860c1d95018a977)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
33     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
34     if (ptap->AP_loc) { /* used by alg_rap */
35       Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
36       ierr = PetscFree(ap->i);CHKERRQ(ierr);
37       ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
38       ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
39     } else { /* used by alg_ptap */
40       ierr = PetscFree(ptap->api);CHKERRQ(ierr);
41       ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
42     }
43     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
44     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
45     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
46     if (merge) { /* used by alg_ptap */
47       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
48       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
49       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
50       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
51       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
52       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
53       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
54       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
55       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
56       ierr = PetscFree(merge->coi);CHKERRQ(ierr);
57       ierr = PetscFree(merge->coj);CHKERRQ(ierr);
58       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
59       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
60       ierr = merge->destroy(A);CHKERRQ(ierr);
61       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
62     } else {
63       ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
64     }
65     ierr = PetscFree(ptap);CHKERRQ(ierr);
66   }
67   PetscFunctionReturn(0);
68 }
69 
70 #undef __FUNCT__
71 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
72 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
73 {
74   PetscErrorCode ierr;
75   Mat_MPIAIJ     *a     = (Mat_MPIAIJ*)A->data;
76   Mat_PtAPMPI    *ptap  = a->ptap;
77 
78   PetscFunctionBegin;
79   ierr = (*ptap->duplicate)(A,op,M);CHKERRQ(ierr);
80   (*M)->ops->destroy   = ptap->destroy;
81   (*M)->ops->duplicate = ptap->duplicate;
82   PetscFunctionReturn(0);
83 }
84 
85 #undef __FUNCT__
86 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
87 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
88 {
89   PetscErrorCode ierr;
90   PetscBool      rap=PETSC_TRUE; /* do R=P^T locally, then C=R*A*P */
91   MPI_Comm       comm;
92 
93   PetscFunctionBegin;
94   /* check if matrix local sizes are compatible */
95   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
96   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
97     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
98   }
99   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
100     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
101   }
102 
103   ierr = PetscOptionsGetBool(NULL,"-matrap",&rap,NULL);CHKERRQ(ierr);
104   if (scall == MAT_INITIAL_MATRIX) {
105     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
106     if (rap) { /* do R=P^T locally, then C=R*A*P */
107       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
108     } else {       /* do P^T*A*P */
109       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(A,P,fill,C);CHKERRQ(ierr);
110     }
111     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
112   }
113   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
114   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
115   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
116   PetscFunctionReturn(0);
117 }
118 
119 /* requires array of size pN, thus nonscalable version */
120 #undef __FUNCT__
121 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
122 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
123 {
124   PetscErrorCode      ierr;
125   Mat_PtAPMPI         *ptap;
126   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
127   MPI_Comm            comm;
128   PetscMPIInt         size,rank;
129   Mat                 Cmpi;
130   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
131   PetscInt            am=A->rmap->n,pm=P->rmap->n;
132   PetscInt            *lnk,i,k,pnz,row,nnz;
133   PetscInt            pN=P->cmap->N,pn=P->cmap->n;
134   PetscBT             lnkbt;
135   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
136   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
137   PetscInt            len,proc,*dnz,*onz,*owners;
138   PetscInt            nzi,nspacedouble;
139   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
140   MPI_Request         *swaits,*rwaits;
141   MPI_Status          *sstatus,rstatus;
142   PetscLayout         rowmap;
143   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
144   PetscInt            nsend;
145   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
146   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Prmax,APrmax;
147   PetscInt            rmax,*aj,*ai,*pi;
148   Mat_SeqAIJ          *p_loc,*p_oth,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data,*c_loc,*c_oth;
149   PetscScalar         *apv;
150   PetscTable          ta,Cta;
151 #if defined(PETSC_USE_INFO)
152   PetscReal           apfill;
153 #endif
154 #if defined(PTAP_PROFILE)
155   PetscLogDouble      t0,t1,t11,t12,t2,t3,t4;
156 #endif
157 
158   PetscFunctionBegin;
159   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
160   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
161   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
162 #if defined(PTAP_PROFILE)
163   ierr = PetscTime(&t0);CHKERRQ(ierr);
164 #endif
165 
166   /* create struct Mat_PtAPMPI and attached it to C later */
167   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
168   ptap->reuse = MAT_INITIAL_MATRIX;
169 
170   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
171   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
172   /* get P_loc by taking all local rows of P */
173   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
174 
175   /* (0) compute Rd = Pd^T, Ro = Po^T  */
176   /* --------------------------------- */
177   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
178   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
179 #if defined(PTAP_PROFILE)
180   ierr = PetscTime(&t11);CHKERRQ(ierr);
181 #endif
182 
183   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
184   /* ----------------------------------------------------------------- */
185   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
186   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
187 
188   /* create and initialize a linked list */
189   Prmax = p_loc->rmax+p_oth->rmax;
190 #if 0
191   printf("[%d] p_loc->rmax %d + p_oth->rmax %d = %d\n",rank,p_loc->rmax, p_oth->rmax,Prmax);
192 #endif
193   ierr = PetscTableCreate(2*Prmax,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc */
194   ierr = PetscTableCreate(2*Prmax,pN,&Cta);CHKERRQ(ierr);/* for compute Cmpi */
195   for (row=0; row<ptap->P_loc->rmap->N; row++) {
196     nzi = p_loc->i[row+1] - p_loc->i[row];
197     for (j=0; j<nzi; j++) {
198       Jptr = j + p_loc->j + p_loc->i[row];
199       ierr = PetscTableAdd(ta,*Jptr+1,1,INSERT_VALUES);CHKERRQ(ierr); /* key must be >0 */
200     }
201   }
202   for (row=0; row<ptap->P_oth->rmap->N; row++) {
203     nzi = p_oth->i[row+1] - p_oth->i[row];
204     for (j=0; j<nzi; j++) {
205       Jptr = j + p_oth->j + p_oth->i[row];
206       ierr = PetscTableAdd(ta,*Jptr+1,1,INSERT_VALUES);CHKERRQ(ierr); /* key must be >0 */
207     }
208   }
209   ierr = PetscTableGetCount(ta,&Prmax);CHKERRQ(ierr); /* Prmax = nnz(sum of Prows) */
210   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
211 #if 0
212   //printf("[%d] Prmax %d, pN %d\n",rank,Prmax,pN);
213   printf("[%d] p_loc->rmax %d + p_oth->rmax %d = %d; Prmax %d\n",rank,p_loc->rmax, p_oth->rmax,p_loc->rmax+p_oth->rmax,Prmax);
214 #endif
215   ierr = PetscLLCondensedCreate(Prmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
216 
217   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
218   ierr = PetscFreeSpaceGet((PetscInt)(fill*(ad->i[am]+ao->i[am]+p_loc->i[pm])),&free_space);CHKERRQ(ierr);
219   current_space = free_space;
220   nspacedouble  = 0;
221 
222   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
223   api[0] = 0;
224   for (i=0; i<am; i++) {
225     /* diagonal portion: Ad[i,:]*P */
226     ai = ad->i; pi = p_loc->i;
227     nzi = ai[i+1] - ai[i];
228     aj  = ad->j + ai[i];
229     for (j=0; j<nzi; j++) {
230       row  = aj[j];
231       pnz  = pi[row+1] - pi[row];
232       Jptr = p_loc->j + pi[row];
233       /* add non-zero cols of P into the sorted linked list lnk */
234       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
235     }
236     /* off-diagonal portion: Ao[i,:]*P */
237     ai = ao->i; pi = p_oth->i;
238     nzi = ai[i+1] - ai[i];
239     aj  = ao->j + ai[i];
240     for (j=0; j<nzi; j++) {
241       row  = aj[j];
242       pnz  = pi[row+1] - pi[row];
243       Jptr = p_oth->j + pi[row];
244       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
245     }
246     apnz     = lnk[0];
247     api[i+1] = api[i] + apnz;
248     if (ap_rmax < apnz) ap_rmax = apnz;
249 
250     /* if free space is not available, double the total space in the list */
251     if (current_space->local_remaining<apnz) {
252       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
253       nspacedouble++;
254     }
255 
256     /* Copy data into free space, then initialize lnk */
257     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
258 
259     /* add column indices into Cta */
260     for (j=0; j<apnz; j++) {
261       ierr = PetscTableAdd(Cta,current_space->array[j]+1,1,INSERT_VALUES);CHKERRQ(ierr); /* key must be >0 */
262     }
263 
264     current_space->array           += apnz;
265     current_space->local_used      += apnz;
266     current_space->local_remaining -= apnz;
267   }
268   /* Allocate space for apj and apv, initialize apj, and */
269   /* destroy list of free space and other temporary array(s) */
270   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
271   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
272   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
273 
274   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr); /* rm later! */
275 
276   /* Create AP_loc for reuse */
277   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
278 #if 0
279   Mat_SeqAIJ *ap_loc       = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
280   if (rank==1) printf("[%d] pN %d, AP_loc->rmax %d\n",rank,pN,ap_loc->rmax);
281   if (ap_loc->rmax == 0) {
282     printf("[%d] ap_loc->rmax == 0, empty AP_loc?, Prmax %d\n",rank,Prmax);
283   }
284 #endif
285 
286 #if defined(PETSC_USE_INFO)
287   apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
288   ptap->AP_loc->info.mallocs           = nspacedouble;
289   ptap->AP_loc->info.fill_ratio_given  = fill;
290   ptap->AP_loc->info.fill_ratio_needed = apfill;
291 
292   if (api[am]) {
293     ierr = PetscInfo3(ptap->AP_loc,"AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
294     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
295   } else {
296     ierr = PetscInfo(ptap->AP_loc,"AP_loc is empty \n");CHKERRQ(ierr);
297   }
298 #endif
299 
300 #if defined(PTAP_PROFILE)
301   ierr = PetscTime(&t12);CHKERRQ(ierr);
302 #endif
303 
304   /* (2-1) compute symbolic Co = Ro*AP_loc  */
305   /* ------------------------------------ */
306   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
307   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
308 #if defined(PTAP_PROFILE)
309   ierr = PetscTime(&t1);CHKERRQ(ierr);
310 #endif
311 
312   /* (3) send coj of C_oth to other processors  */
313   /* ------------------------------------------ */
314   /* determine row ownership */
315   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
316   rowmap->n  = pn;
317   rowmap->bs = 1;
318   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
319   owners = rowmap->range;
320 
321   /* determine the number of messages to send, their lengths */
322   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
323   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
324   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
325 
326   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
327   coi   = c_oth->i; coj = c_oth->j;
328   con   = ptap->C_oth->rmap->n;
329   proc  = 0;
330   for (i=0; i<con; i++) {
331     while (prmap[i] >= owners[proc+1]) proc++;
332     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
333     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
334   }
335 
336   len          = 0; /* max length of buf_si[], see (4) */
337   owners_co[0] = 0;
338   nsend        = 0;
339   for (proc=0; proc<size; proc++) {
340     owners_co[proc+1] = owners_co[proc] + len_si[proc];
341     if (len_s[proc]) {
342       nsend++;
343       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
344       len         += len_si[proc];
345     }
346   }
347 
348   /* determine the number and length of messages to receive for coi and coj  */
349   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
350   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
351 
352   /* post the Irecv and Isend of coj */
353   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
354   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
355   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
356   for (proc=0, k=0; proc<size; proc++) {
357     if (!len_s[proc]) continue;
358     i    = owners_co[proc];
359     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
360     k++;
361   }
362 
363   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
364   /* ---------------------------------------- */
365   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
366   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
367 
368   /* receives coj are complete */
369   for (i=0; i<nrecv; i++) {
370     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
371   }
372   ierr = PetscFree(rwaits);CHKERRQ(ierr);
373   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
374 
375   ierr = PetscTableGetCount(Cta,&APrmax);CHKERRQ(ierr); /* Prmax = nnz(sum of Prows) */
376   /* printf("[%d] APrmax %d, pN %d\n",rank,APrmax,pN); */
377 
378   /* (4) send and recv coi */
379   /*-----------------------*/
380   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
381   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
382   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
383   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
384   for (proc=0,k=0; proc<size; proc++) {
385     if (!len_s[proc]) continue;
386     /* form outgoing message for i-structure:
387          buf_si[0]:                 nrows to be sent
388                [1:nrows]:           row index (global)
389                [nrows+1:2*nrows+1]: i-structure index
390     */
391     /*-------------------------------------------*/
392     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
393     buf_si_i    = buf_si + nrows+1;
394     buf_si[0]   = nrows;
395     buf_si_i[0] = 0;
396     nrows       = 0;
397     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
398       nzi = coi[i+1] - coi[i];
399       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
400       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
401       nrows++;
402     }
403     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
404     k++;
405     buf_si += len_si[proc];
406   }
407   for (i=0; i<nrecv; i++) {
408     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
409   }
410   ierr = PetscFree(rwaits);CHKERRQ(ierr);
411   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
412 
413   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
414   ierr = PetscFree(len_ri);CHKERRQ(ierr);
415   ierr = PetscFree(swaits);CHKERRQ(ierr);
416   ierr = PetscFree(buf_s);CHKERRQ(ierr);
417 #if defined(PTAP_PROFILE)
418   ierr = PetscTime(&t2);CHKERRQ(ierr);
419 #endif
420   /* (5) compute the local portion of Cmpi      */
421   /* ------------------------------------------ */
422   ierr = PetscTableDestroy(&Cta);CHKERRQ(ierr);
423 
424   /* set initial free space to be pN */
425   ierr          = PetscFreeSpaceGet(pN,&free_space);CHKERRQ(ierr); /* non-scalable version */
426   current_space = free_space;
427 
428   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
429   for (k=0; k<nrecv; k++) {
430     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
431     nrows       = *buf_ri_k[k];
432     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
433     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
434   }
435   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
436 
437   rmax = 0;
438   for (i=0; i<pn; i++) {
439     /* add C_loc into Cmpi */
440     nzi  = c_loc->i[i+1] - c_loc->i[i];
441     Jptr = c_loc->j + c_loc->i[i];
442     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
443 
444     /* add received col data into lnk */
445     for (k=0; k<nrecv; k++) { /* k-th received message */
446       if (i == *nextrow[k]) { /* i-th row */
447         nzi  = *(nextci[k]+1) - *nextci[k];
448         Jptr = buf_rj[k] + *nextci[k];
449         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
450         nextrow[k]++; nextci[k]++;
451       }
452     }
453     nnz = lnk[0];
454 
455     /* copy data into free space, then initialize lnk */
456     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
457     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
458     if (nnz > rmax) rmax = nnz;
459   }
460   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
461   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
462   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
463 #if defined(PTAP_PROFILE)
464   ierr = PetscTime(&t3);CHKERRQ(ierr);
465 #endif
466 
467   /* (6) create symbolic parallel matrix Cmpi */
468   /*------------------------------------------*/
469   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
470   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
471   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
472   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
473   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
474   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
475 
476   /* members in merge */
477   ierr = PetscFree(id_r);CHKERRQ(ierr);
478   ierr = PetscFree(len_r);CHKERRQ(ierr);
479   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
480   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
481   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
482   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
483   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
484 
485   /* attach the supporting struct to Cmpi for reuse */
486   c = (Mat_MPIAIJ*)Cmpi->data;
487   c->ptap         = ptap;
488   ptap->rmax      = ap_rmax;
489   ptap->duplicate = Cmpi->ops->duplicate;
490   ptap->destroy   = Cmpi->ops->destroy;
491 
492   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ_new() */
493   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
494 
495 
496   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
497   Cmpi->assembled        = PETSC_FALSE;
498   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
499   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
500   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
501   *C                     = Cmpi;
502 
503 #if defined(PTAP_PROFILE)
504   ierr = PetscTime(&t4);CHKERRQ(ierr);
505   if (rank == 1) {
506     printf("PtAPSym: %g + %g + %g + %g + %g + %g = %g\n",t11-t0,t1-t11,t12-t11,t2-t2,t3-t2,t4-t3,t4-t0);
507   }
508 #endif
509   PetscFunctionReturn(0);
510 }
511 
512 #undef __FUNCT__
513 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
514 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
515 {
516   PetscErrorCode    ierr;
517   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
518   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
519   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
520   Mat_PtAPMPI       *ptap = c->ptap;
521   Mat               AP_loc,C_loc,C_oth;
522   PetscInt          i,rstart,rend,cm,ncols,row;
523   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
524   PetscScalar       *apa;
525   const PetscInt    *cols;
526   const PetscScalar *vals;
527 #if defined(PTAP_PROFILE)
528   PetscMPIInt       rank;
529   MPI_Comm          comm;
530   PetscLogDouble    t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
531 #endif
532 
533   PetscFunctionBegin;
534   ierr = MatZeroEntries(C);CHKERRQ(ierr);
535 
536   /* 1) get R = Pd^T,Ro = Po^T */
537 #if defined(PTAP_PROFILE)
538   ierr = PetscTime(&t0);CHKERRQ(ierr);
539 #endif
540   if (ptap->reuse == MAT_REUSE_MATRIX) {
541     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
542     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
543   }
544 #if defined(PTAP_PROFILE)
545   ierr = PetscTime(&t1);CHKERRQ(ierr);
546   eR = t1 - t0;
547 #endif
548 
549   /* 2) get AP_loc */
550   AP_loc = ptap->AP_loc;
551   ap = (Mat_SeqAIJ*)AP_loc->data;
552 
553   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
554   /*-----------------------------------------------------*/
555   if (ptap->reuse == MAT_REUSE_MATRIX) {
556     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
557     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
558     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
559   }
560 
561   /* 2-2) compute numeric A_loc*P - dominating part */
562   /* ---------------------------------------------- */
563   /* get data from symbolic products */
564   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
565   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
566   apa   = ptap->apa;
567   api   = ap->i;
568   apj   = ap->j;
569   for (i=0; i<am; i++) {
570     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
571     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
572     apnz = api[i+1] - api[i];
573     for (j=0; j<apnz; j++) {
574       col = apj[j+api[i]];
575       ap->a[j+ap->i[i]] = apa[col];
576       apa[col] = 0.0;
577     }
578     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
579   }
580 #if defined(PTAP_PROFILE)
581   ierr = PetscTime(&t2);CHKERRQ(ierr);
582   eAP = t2 - t1;
583 #endif
584 
585   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
586   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
587   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
588   C_loc = ptap->C_loc;
589   C_oth = ptap->C_oth;
590 #if defined(PTAP_PROFILE)
591   ierr = PetscTime(&t3);CHKERRQ(ierr);
592   eCseq = t3 - t2;
593 #endif
594 
595   /* add C_loc and Co to to C */
596   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
597 
598   /* C_loc -> C */
599   cm    = C_loc->rmap->N;
600   c_seq = (Mat_SeqAIJ*)C_loc->data;
601   cols = c_seq->j;
602   vals = c_seq->a;
603   for (i=0; i<cm; i++) {
604     ncols = c_seq->i[i+1] - c_seq->i[i];
605     row = rstart + i;
606     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
607     cols += ncols; vals += ncols;
608   }
609 
610   /* Co -> C, off-processor part */
611   cm = C_oth->rmap->N;
612   c_seq = (Mat_SeqAIJ*)C_oth->data;
613   cols = c_seq->j;
614   vals = c_seq->a;
615   for (i=0; i<cm; i++) {
616     ncols = c_seq->i[i+1] - c_seq->i[i];
617     row = p->garray[i];
618     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
619     cols += ncols; vals += ncols;
620   }
621   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
622   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
623 #if defined(PTAP_PROFILE)
624   ierr = PetscTime(&t4);CHKERRQ(ierr);
625   eCmpi = t4 - t3;
626 
627   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
628   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
629   if (rank==1) {
630     ierr = PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);CHKERRQ(ierr);
631   }
632 #endif
633   ptap->reuse = MAT_REUSE_MATRIX;
634   PetscFunctionReturn(0);
635 }
636 
637 #undef __FUNCT__
638 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap"
639 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,PetscReal fill,Mat *C)
640 {
641   PetscErrorCode      ierr;
642   Mat                 Cmpi;
643   Mat_PtAPMPI         *ptap;
644   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
645   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
646   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
647   Mat_SeqAIJ          *p_loc,*p_oth;
648   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
649   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
650   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
651   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
652   PetscBT             lnkbt;
653   MPI_Comm            comm;
654   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
655   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
656   PetscInt            len,proc,*dnz,*onz,*owners;
657   PetscInt            nzi,*pti,*ptj;
658   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
659   MPI_Request         *swaits,*rwaits;
660   MPI_Status          *sstatus,rstatus;
661   Mat_Merge_SeqsToMPI *merge;
662   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
663   PetscReal           afill=1.0,afill_tmp;
664   PetscInt            rmax;
665 
666   PetscFunctionBegin;
667   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
668   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
669   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
670 
671   /* create struct Mat_PtAPMPI and attached it to C later */
672   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
673   ierr        = PetscNew(&merge);CHKERRQ(ierr);
674   ptap->merge = merge;
675   ptap->reuse = MAT_INITIAL_MATRIX;
676 
677   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
678   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
679 
680   /* get P_loc by taking all local rows of P */
681   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
682 
683   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
684   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
685   pi_loc = p_loc->i; pj_loc = p_loc->j;
686   pi_oth = p_oth->i; pj_oth = p_oth->j;
687 
688   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
689   /*--------------------------------------------------------------------------*/
690   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
691   api[0] = 0;
692 
693   /* create and initialize a linked list */
694   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
695 
696   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
697   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
698   current_space = free_space;
699 
700   for (i=0; i<am; i++) {
701     /* diagonal portion of A */
702     nzi = adi[i+1] - adi[i];
703     aj  = ad->j + adi[i];
704     for (j=0; j<nzi; j++) {
705       row  = aj[j];
706       pnz  = pi_loc[row+1] - pi_loc[row];
707       Jptr = pj_loc + pi_loc[row];
708       /* add non-zero cols of P into the sorted linked list lnk */
709       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
710     }
711     /* off-diagonal portion of A */
712     nzi = aoi[i+1] - aoi[i];
713     aj  = ao->j + aoi[i];
714     for (j=0; j<nzi; j++) {
715       row  = aj[j];
716       pnz  = pi_oth[row+1] - pi_oth[row];
717       Jptr = pj_oth + pi_oth[row];
718       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
719     }
720     apnz     = lnk[0];
721     api[i+1] = api[i] + apnz;
722     if (ap_rmax < apnz) ap_rmax = apnz;
723 
724     /* if free space is not available, double the total space in the list */
725     if (current_space->local_remaining<apnz) {
726       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
727       nspacedouble++;
728     }
729 
730     /* Copy data into free space, then initialize lnk */
731     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
732 
733     current_space->array           += apnz;
734     current_space->local_used      += apnz;
735     current_space->local_remaining -= apnz;
736   }
737 
738   /* Allocate space for apj, initialize apj, and */
739   /* destroy list of free space and other temporary array(s) */
740   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
741   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
742   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
743   if (afill_tmp > afill) afill = afill_tmp;
744 
745   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
746   /*-----------------------------------------------------------------*/
747   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
748 
749   /* then, compute symbolic Co = (p->B)^T*AP */
750   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
751                          >= (num of nonzero rows of C_seq) - pn */
752   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
753   coi[0] = 0;
754 
755   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
756   nnz           = fill*(poti[pon] + api[am]);
757   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
758   current_space = free_space;
759 
760   for (i=0; i<pon; i++) {
761     pnz = poti[i+1] - poti[i];
762     ptJ = potj + poti[i];
763     for (j=0; j<pnz; j++) {
764       row  = ptJ[j]; /* row of AP == col of Pot */
765       apnz = api[row+1] - api[row];
766       Jptr = apj + api[row];
767       /* add non-zero cols of AP into the sorted linked list lnk */
768       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
769     }
770     nnz = lnk[0];
771 
772     /* If free space is not available, double the total space in the list */
773     if (current_space->local_remaining<nnz) {
774       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
775       nspacedouble++;
776     }
777 
778     /* Copy data into free space, and zero out denserows */
779     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
780 
781     current_space->array           += nnz;
782     current_space->local_used      += nnz;
783     current_space->local_remaining -= nnz;
784 
785     coi[i+1] = coi[i] + nnz;
786   }
787 
788   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
789   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
790   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
791   if (afill_tmp > afill) afill = afill_tmp;
792   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
793 
794   /* (3) send j-array (coj) of Co to other processors */
795   /*--------------------------------------------------*/
796   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
797   len_s        = merge->len_s;
798   merge->nsend = 0;
799 
800 
801   /* determine row ownership */
802   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
803   merge->rowmap->n  = pn;
804   merge->rowmap->bs = 1;
805 
806   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
807   owners = merge->rowmap->range;
808 
809   /* determine the number of messages to send, their lengths */
810   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
811   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
812   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
813 
814   proc = 0;
815   for (i=0; i<pon; i++) {
816     while (prmap[i] >= owners[proc+1]) proc++;
817     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
818     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
819   }
820 
821   len          = 0; /* max length of buf_si[], see (4) */
822   owners_co[0] = 0;
823   for (proc=0; proc<size; proc++) {
824     owners_co[proc+1] = owners_co[proc] + len_si[proc];
825     if (len_s[proc]) {
826       merge->nsend++;
827       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
828       len         += len_si[proc];
829     }
830   }
831 
832   /* determine the number and length of messages to receive for coi and coj  */
833   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
834   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
835 
836   /* post the Irecv and Isend of coj */
837   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
838   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
839   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
840   for (proc=0, k=0; proc<size; proc++) {
841     if (!len_s[proc]) continue;
842     i    = owners_co[proc];
843     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
844     k++;
845   }
846 
847   /* receives and sends of coj are complete */
848   for (i=0; i<merge->nrecv; i++) {
849     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
850   }
851   ierr = PetscFree(rwaits);CHKERRQ(ierr);
852   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
853 
854   /* (4) send and recv coi */
855   /*-----------------------*/
856   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
857   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
858   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
859   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
860   for (proc=0,k=0; proc<size; proc++) {
861     if (!len_s[proc]) continue;
862     /* form outgoing message for i-structure:
863          buf_si[0]:                 nrows to be sent
864                [1:nrows]:           row index (global)
865                [nrows+1:2*nrows+1]: i-structure index
866     */
867     /*-------------------------------------------*/
868     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
869     buf_si_i    = buf_si + nrows+1;
870     buf_si[0]   = nrows;
871     buf_si_i[0] = 0;
872     nrows       = 0;
873     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
874       nzi = coi[i+1] - coi[i];
875       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
876       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
877       nrows++;
878     }
879     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
880     k++;
881     buf_si += len_si[proc];
882   }
883   i = merge->nrecv;
884   while (i--) {
885     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
886   }
887   ierr = PetscFree(rwaits);CHKERRQ(ierr);
888   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
889 
890   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
891   ierr = PetscFree(len_ri);CHKERRQ(ierr);
892   ierr = PetscFree(swaits);CHKERRQ(ierr);
893   ierr = PetscFree(buf_s);CHKERRQ(ierr);
894 
895   /* (5) compute the local portion of C (mpi mat) */
896   /*----------------------------------------------*/
897   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
898 
899   /* allocate pti array and free space for accumulating nonzero column info */
900   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
901   pti[0] = 0;
902 
903   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
904   nnz           = fill*(pi_loc[pm] + api[am]);
905   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
906   current_space = free_space;
907 
908   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
909   for (k=0; k<merge->nrecv; k++) {
910     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
911     nrows       = *buf_ri_k[k];
912     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
913     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
914   }
915   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
916   rmax = 0;
917   for (i=0; i<pn; i++) {
918     /* add pdt[i,:]*AP into lnk */
919     pnz = pdti[i+1] - pdti[i];
920     ptJ = pdtj + pdti[i];
921     for (j=0; j<pnz; j++) {
922       row  = ptJ[j];  /* row of AP == col of Pt */
923       apnz = api[row+1] - api[row];
924       Jptr = apj + api[row];
925       /* add non-zero cols of AP into the sorted linked list lnk */
926       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
927     }
928 
929     /* add received col data into lnk */
930     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
931       if (i == *nextrow[k]) { /* i-th row */
932         nzi  = *(nextci[k]+1) - *nextci[k];
933         Jptr = buf_rj[k] + *nextci[k];
934         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
935         nextrow[k]++; nextci[k]++;
936       }
937     }
938     nnz = lnk[0];
939 
940     /* if free space is not available, make more free space */
941     if (current_space->local_remaining<nnz) {
942       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
943       nspacedouble++;
944     }
945     /* copy data into free space, then initialize lnk */
946     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
947     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
948 
949     current_space->array           += nnz;
950     current_space->local_used      += nnz;
951     current_space->local_remaining -= nnz;
952 
953     pti[i+1] = pti[i] + nnz;
954     if (nnz > rmax) rmax = nnz;
955   }
956   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
957   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
958 
959   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
960   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
961   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
962   if (afill_tmp > afill) afill = afill_tmp;
963   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
964 
965   /* (6) create symbolic parallel matrix Cmpi */
966   /*------------------------------------------*/
967   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
968   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
969   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
970   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
971   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
972   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
973 
974   merge->bi        = pti;      /* Cseq->i */
975   merge->bj        = ptj;      /* Cseq->j */
976   merge->coi       = coi;      /* Co->i   */
977   merge->coj       = coj;      /* Co->j   */
978   merge->buf_ri    = buf_ri;
979   merge->buf_rj    = buf_rj;
980   merge->owners_co = owners_co;
981   merge->destroy   = Cmpi->ops->destroy;
982 
983   /* attach the supporting struct to Cmpi for reuse */
984   c           = (Mat_MPIAIJ*)Cmpi->data;
985   c->ptap     = ptap;
986   ptap->api   = api;
987   ptap->apj   = apj;
988   ptap->rmax  = ap_rmax;
989   ptap->duplicate = Cmpi->ops->duplicate;
990   ptap->destroy   = Cmpi->ops->destroy;
991 
992   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
993   Cmpi->assembled        = PETSC_FALSE;
994   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
995   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
996   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap;
997   *C                     = Cmpi;
998 
999   /* flag 'scalable' determines which implementations to be used:
1000        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
1001        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
1002   /* set default scalable */
1003   ptap->scalable = PETSC_FALSE; /* PETSC_TRUE; */
1004 
1005   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
1006   if (!ptap->scalable) {  /* Do dense axpy */
1007     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
1008   } else {
1009     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
1010   }
1011 
1012 #if defined(PETSC_USE_INFO)
1013   if (pti[pn] != 0) {
1014     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
1015     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
1016   } else {
1017     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
1018   }
1019 #endif
1020   PetscFunctionReturn(0);
1021 }
1022 
1023 #undef __FUNCT__
1024 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap"
1025 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_ptap(Mat A,Mat P,Mat C)
1026 {
1027   PetscErrorCode      ierr;
1028   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
1029   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
1030   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
1031   Mat_SeqAIJ          *p_loc,*p_oth;
1032   Mat_PtAPMPI         *ptap;
1033   Mat_Merge_SeqsToMPI *merge;
1034   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
1035   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
1036   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
1037   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
1038   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
1039   MPI_Comm            comm;
1040   PetscMPIInt         size,rank,taga,*len_s;
1041   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
1042   PetscInt            **buf_ri,**buf_rj;
1043   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
1044   MPI_Request         *s_waits,*r_waits;
1045   MPI_Status          *status;
1046   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
1047   PetscInt            *api,*apj,*coi,*coj;
1048   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
1049   PetscBool           scalable;
1050 #if defined(PTAP_PROFILE)
1051   PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
1052 #endif
1053 
1054   PetscFunctionBegin;
1055   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
1056 #if defined(PTAP_PROFILE)
1057   ierr = PetscTime(&t0);CHKERRQ(ierr);
1058 #endif
1059   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
1060   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
1061 
1062   ptap = c->ptap;
1063   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
1064   merge    = ptap->merge;
1065   apa      = ptap->apa;
1066   scalable = ptap->scalable;
1067 
1068   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1069   /*-----------------------------------------------------*/
1070   if (ptap->reuse == MAT_INITIAL_MATRIX) {
1071     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1072     ptap->reuse = MAT_REUSE_MATRIX;
1073   } else { /* update numerical values of P_oth and P_loc */
1074     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1075     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1076   }
1077 #if defined(PTAP_PROFILE)
1078   ierr = PetscTime(&t1);CHKERRQ(ierr);
1079   eP = t1-t0;
1080 #endif
1081   /*
1082   printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1083          a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1084          ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1085          ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1086    */
1087 
1088   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1089   /*--------------------------------------------------------------*/
1090   /* get data from symbolic products */
1091   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1092   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1093   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
1094   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1095 
1096   coi  = merge->coi; coj = merge->coj;
1097   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
1098 
1099   bi     = merge->bi; bj = merge->bj;
1100   owners = merge->rowmap->range;
1101   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
1102 
1103   api = ptap->api; apj = ptap->apj;
1104 
1105   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1106     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
1107 #if defined(PTAP_PROFILE)
1108     ierr = PetscTime(&t1);CHKERRQ(ierr);
1109 #endif
1110     for (i=0; i<am; i++) {
1111 #if defined(PTAP_PROFILE)
1112       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1113 #endif
1114       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1115       /*------------------------------------------------------------*/
1116       apJ = apj + api[i];
1117 
1118       /* diagonal portion of A */
1119       anz = adi[i+1] - adi[i];
1120       adj = ad->j + adi[i];
1121       ada = ad->a + adi[i];
1122       for (j=0; j<anz; j++) {
1123         row = adj[j];
1124         pnz = pi_loc[row+1] - pi_loc[row];
1125         pj  = pj_loc + pi_loc[row];
1126         pa  = pa_loc + pi_loc[row];
1127 
1128         /* perform dense axpy */
1129         valtmp = ada[j];
1130         for (k=0; k<pnz; k++) {
1131           apa[pj[k]] += valtmp*pa[k];
1132         }
1133         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1134       }
1135 
1136       /* off-diagonal portion of A */
1137       anz = aoi[i+1] - aoi[i];
1138       aoj = ao->j + aoi[i];
1139       aoa = ao->a + aoi[i];
1140       for (j=0; j<anz; j++) {
1141         row = aoj[j];
1142         pnz = pi_oth[row+1] - pi_oth[row];
1143         pj  = pj_oth + pi_oth[row];
1144         pa  = pa_oth + pi_oth[row];
1145 
1146         /* perform dense axpy */
1147         valtmp = aoa[j];
1148         for (k=0; k<pnz; k++) {
1149           apa[pj[k]] += valtmp*pa[k];
1150         }
1151         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1152       }
1153 #if defined(PTAP_PROFILE)
1154       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1155       et2_AP += t2_1 - t2_0;
1156 #endif
1157 
1158       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1159       /*--------------------------------------------------------------*/
1160       apnz = api[i+1] - api[i];
1161       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1162       pnz = po->i[i+1] - po->i[i];
1163       poJ = po->j + po->i[i];
1164       pA  = po->a + po->i[i];
1165       for (j=0; j<pnz; j++) {
1166         row = poJ[j];
1167         cnz = coi[row+1] - coi[row];
1168         cj  = coj + coi[row];
1169         ca  = coa + coi[row];
1170         /* perform dense axpy */
1171         valtmp = pA[j];
1172         for (k=0; k<cnz; k++) {
1173           ca[k] += valtmp*apa[cj[k]];
1174         }
1175         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1176       }
1177       /* put the value into Cd (diagonal part) */
1178       pnz = pd->i[i+1] - pd->i[i];
1179       pdJ = pd->j + pd->i[i];
1180       pA  = pd->a + pd->i[i];
1181       for (j=0; j<pnz; j++) {
1182         row = pdJ[j];
1183         cnz = bi[row+1] - bi[row];
1184         cj  = bj + bi[row];
1185         ca  = ba + bi[row];
1186         /* perform dense axpy */
1187         valtmp = pA[j];
1188         for (k=0; k<cnz; k++) {
1189           ca[k] += valtmp*apa[cj[k]];
1190         }
1191         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1192       }
1193 
1194       /* zero the current row of A*P */
1195       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1196 #if defined(PTAP_PROFILE)
1197       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1198       ePtAP += t2_2 - t2_1;
1199 #endif
1200     }
1201   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1202     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
1203     /*-----------------------------------------------------------------------------------------*/
1204     pA=pa_loc;
1205     for (i=0; i<am; i++) {
1206 #if defined(PTAP_PROFILE)
1207       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1208 #endif
1209       /* form i-th sparse row of A*P */
1210       apnz = api[i+1] - api[i];
1211       apJ  = apj + api[i];
1212       /* diagonal portion of A */
1213       anz = adi[i+1] - adi[i];
1214       adj = ad->j + adi[i];
1215       ada = ad->a + adi[i];
1216       for (j=0; j<anz; j++) {
1217         row    = adj[j];
1218         pnz    = pi_loc[row+1] - pi_loc[row];
1219         pj     = pj_loc + pi_loc[row];
1220         pa     = pa_loc + pi_loc[row];
1221         valtmp = ada[j];
1222         nextp  = 0;
1223         for (k=0; nextp<pnz; k++) {
1224           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1225             apa[k] += valtmp*pa[nextp++];
1226           }
1227         }
1228         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1229       }
1230       /* off-diagonal portion of A */
1231       anz = aoi[i+1] - aoi[i];
1232       aoj = ao->j + aoi[i];
1233       aoa = ao->a + aoi[i];
1234       for (j=0; j<anz; j++) {
1235         row    = aoj[j];
1236         pnz    = pi_oth[row+1] - pi_oth[row];
1237         pj     = pj_oth + pi_oth[row];
1238         pa     = pa_oth + pi_oth[row];
1239         valtmp = aoa[j];
1240         nextp  = 0;
1241         for (k=0; nextp<pnz; k++) {
1242           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1243             apa[k] += valtmp*pa[nextp++];
1244           }
1245         }
1246         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1247       }
1248 #if defined(PTAP_PROFILE)
1249       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1250       et2_AP += t2_1 - t2_0;
1251 #endif
1252 
1253       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1254       /*--------------------------------------------------------------*/
1255       pnz = pi_loc[i+1] - pi_loc[i];
1256       pJ  = pj_loc + pi_loc[i];
1257       for (j=0; j<pnz; j++) {
1258         nextap = 0;
1259         row    = pJ[j]; /* global index */
1260         if (row < pcstart || row >=pcend) { /* put the value into Co */
1261           row = *poJ;
1262           cj  = coj + coi[row];
1263           ca  = coa + coi[row]; poJ++;
1264         } else {                            /* put the value into Cd */
1265           row = *pdJ;
1266           cj  = bj + bi[row];
1267           ca  = ba + bi[row]; pdJ++;
1268         }
1269         valtmp = pA[j];
1270         for (k=0; nextap<apnz; k++) {
1271           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1272         }
1273         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1274       }
1275       pA += pnz;
1276       /* zero the current row info for A*P */
1277       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
1278 #if defined(PTAP_PROFILE)
1279       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1280       ePtAP += t2_2 - t2_1;
1281 #endif
1282     }
1283   }
1284 #if defined(PTAP_PROFILE)
1285   ierr = PetscTime(&t2);CHKERRQ(ierr);
1286 #endif
1287 
1288   /* 3) send and recv matrix values coa */
1289   /*------------------------------------*/
1290   buf_ri = merge->buf_ri;
1291   buf_rj = merge->buf_rj;
1292   len_s  = merge->len_s;
1293   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1294   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1295 
1296   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
1297   for (proc=0,k=0; proc<size; proc++) {
1298     if (!len_s[proc]) continue;
1299     i    = merge->owners_co[proc];
1300     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1301     k++;
1302   }
1303   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1304   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1305 
1306   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1307   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1308   ierr = PetscFree(coa);CHKERRQ(ierr);
1309 #if defined(PTAP_PROFILE)
1310   ierr = PetscTime(&t3);CHKERRQ(ierr);
1311 #endif
1312 
1313   /* 4) insert local Cseq and received values into Cmpi */
1314   /*------------------------------------------------------*/
1315   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
1316   for (k=0; k<merge->nrecv; k++) {
1317     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1318     nrows       = *(buf_ri_k[k]);
1319     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1320     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1321   }
1322 
1323   for (i=0; i<cm; i++) {
1324     row  = owners[rank] + i; /* global row index of C_seq */
1325     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1326     ba_i = ba + bi[i];
1327     bnz  = bi[i+1] - bi[i];
1328     /* add received vals into ba */
1329     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1330       /* i-th row */
1331       if (i == *nextrow[k]) {
1332         cnz    = *(nextci[k]+1) - *nextci[k];
1333         cj     = buf_rj[k] + *(nextci[k]);
1334         ca     = abuf_r[k] + *(nextci[k]);
1335         nextcj = 0;
1336         for (j=0; nextcj<cnz; j++) {
1337           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1338             ba_i[j] += ca[nextcj++];
1339           }
1340         }
1341         nextrow[k]++; nextci[k]++;
1342         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1343       }
1344     }
1345     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1346   }
1347   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1348   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1349 
1350   ierr = PetscFree(ba);CHKERRQ(ierr);
1351   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1352   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1353   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1354 #if defined(PTAP_PROFILE)
1355   ierr = PetscTime(&t4);CHKERRQ(ierr);
1356   if (rank==1) {
1357     ierr = PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
1358   }
1359 #endif
1360   PetscFunctionReturn(0);
1361 }
1362