xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision e9c1f85f34ebbc72acff983ae651ebdd3da3fad7)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 #define PTAP_PROFILE
14 
15 extern PetscErrorCode MatDestroy_MPIAIJ(Mat);
16 #undef __FUNCT__
17 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
18 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
19 {
20   PetscErrorCode ierr;
21   Mat_MPIAIJ     *a   =(Mat_MPIAIJ*)A->data;
22   Mat_PtAPMPI    *ptap=a->ptap;
23 
24   PetscFunctionBegin;
25   if (ptap) {
26     Mat_Merge_SeqsToMPI *merge=ptap->merge;
27     ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
28     ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
29     ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
30     ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
31     ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
32 
33     ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
34     ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
35     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
36     ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
37     ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
38 
39     if (ptap->api) {ierr = PetscFree(ptap->api);CHKERRQ(ierr);}
40     if (ptap->apj) {ierr = PetscFree(ptap->apj);CHKERRQ(ierr);}
41     if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
42     if (merge) {
43       ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
44       ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
45       ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
46       ierr = PetscFree(merge->bi);CHKERRQ(ierr);
47       if (merge->bj) {ierr = PetscFree(merge->bj);CHKERRQ(ierr);}
48       ierr = PetscFree(merge->bj);CHKERRQ(ierr);
49       ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
50       ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
51       ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
52       ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
53       if (merge->coi) {ierr = PetscFree(merge->coi);CHKERRQ(ierr);}
54       if (merge->coj) {ierr = PetscFree(merge->coj);CHKERRQ(ierr);}
55       ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
56       ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
57       ierr = merge->destroy(A);CHKERRQ(ierr);
58       ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
59     }
60     ierr = PetscFree(ptap);CHKERRQ(ierr);
61   }
62   PetscFunctionReturn(0);
63 }
64 
65 #undef __FUNCT__
66 #define __FUNCT__ "MatDuplicate_MPIAIJ_MatPtAP"
67 PetscErrorCode MatDuplicate_MPIAIJ_MatPtAP(Mat A, MatDuplicateOption op, Mat *M)
68 {
69   PetscErrorCode      ierr;
70   Mat_MPIAIJ          *a     = (Mat_MPIAIJ*)A->data;
71   Mat_PtAPMPI         *ptap  = a->ptap;
72   Mat_Merge_SeqsToMPI *merge = ptap->merge;
73 
74   PetscFunctionBegin;
75   ierr = (*merge->duplicate)(A,op,M);CHKERRQ(ierr);
76 
77   (*M)->ops->destroy   = merge->destroy;
78   (*M)->ops->duplicate = merge->duplicate;
79   PetscFunctionReturn(0);
80 }
81 
82 #undef __FUNCT__
83 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
84 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
85 {
86   PetscErrorCode ierr;
87   PetscBool      newalg=PETSC_FALSE;
88 
89   PetscFunctionBegin;
90   ierr = PetscOptionsGetBool(NULL,"-matptap_new",&newalg,NULL);CHKERRQ(ierr);
91   if (scall == MAT_INITIAL_MATRIX) {
92     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
93     if (newalg) {
94       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(A,P,fill,C);CHKERRQ(ierr);
95     } else {
96       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
97     }
98     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
99   }
100   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
101   if (newalg) {
102     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ_new(A,P,*C);CHKERRQ(ierr);
103   } else {
104     ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
105   }
106   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
107   PetscFunctionReturn(0);
108 }
109 
110 #undef __FUNCT__
111 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ_new"
112 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_new(Mat A,Mat P,PetscReal fill,Mat *C)
113 {
114   PetscErrorCode ierr;
115   Mat_PtAPMPI    *ptap;
116   Mat_MPIAIJ     *p=(Mat_MPIAIJ*)P->data;
117   Mat            AP;
118   Mat_MPIAIJ     *c;
119   MPI_Comm       comm;
120   PetscMPIInt    size,rank;
121   PetscLogDouble      t0,t1,t11,t2,t3,t4;
122 
123   PetscFunctionBegin;
124   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
125   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
126   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
127 
128   /* check if matrix local sizes are compatible -- MV! */
129   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
130     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
131   }
132   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
133     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
134   }
135 
136   //ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr); //rm later !!!
137   //=============================================================================
138   Mat                 Cmpi;
139   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
140   Mat_SeqAIJ          *p_loc;
141   PetscInt            *pi_loc;
142   PetscInt            *pdti,*pdtj,*poti,*potj,*ptJ,nnz;
143   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
144   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
145   PetscBT             lnkbt;
146   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
147   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
148   PetscInt            len,proc,*dnz,*onz,*owners;
149   PetscInt            nzi,*pti,*ptj;
150   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
151   MPI_Request         *swaits,*rwaits;
152   MPI_Status          *sstatus,rstatus;
153   Mat_Merge_SeqsToMPI *merge;
154   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
155   PetscReal           afill=1.0,afill_tmp;
156   PetscInt            rmax;
157 
158   ierr = PetscTime(&t0);CHKERRQ(ierr);
159   /* create struct Mat_PtAPMPI and attached it to C later */
160   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
161   ierr        = PetscNew(&merge);CHKERRQ(ierr);
162   ptap->merge = merge;
163   ptap->reuse = MAT_INITIAL_MATRIX;
164 
165   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
166   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
167 
168   /* get P_loc by taking all local rows of P */
169   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
170 
171   ptap->reuse = MAT_INITIAL_MATRIX;
172 
173   /* (1) compute symbolic AP = A*P, then get AP_loc */
174   /*--------------------------------------------------------------------------*/
175   ierr = MatTranspose_SeqAIJ(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
176   ierr = MatTranspose_SeqAIJ(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
177 
178   ierr = MatMatMult(A,P,MAT_INITIAL_MATRIX,2.0,&AP);CHKERRQ(ierr);
179   ierr = MatMPIAIJGetLocalMat(AP,MAT_INITIAL_MATRIX,&ptap->AP_loc);CHKERRQ(ierr);
180   ierr = MatDestroy(&AP);CHKERRQ(ierr);
181   ierr = PetscTime(&t11);CHKERRQ(ierr);
182 
183   /* (2) compute C_loc=Rd*AP_loc, Co=Ro*AP_loc */
184   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,MAT_INITIAL_MATRIX,2.0,&ptap->C_loc);CHKERRQ(ierr);
185   ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,MAT_INITIAL_MATRIX,2.0,&ptap->C_oth);CHKERRQ(ierr);
186   ierr = PetscTime(&t1);CHKERRQ(ierr);
187 
188   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
189   pi_loc = p_loc->i;
190 
191   Mat_SeqAIJ *ap = (Mat_SeqAIJ*)ptap->AP_loc->data;
192   Mat_SeqAIJ *c_oth  = (Mat_SeqAIJ*)ptap->C_oth->data;
193   Mat_SeqAIJ *c_loc  = (Mat_SeqAIJ*)ptap->C_loc->data;
194   api = ap->i; apj = ap->j;
195 
196   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
197   /*-----------------------------------------------------------------*/
198   coi = c_oth->i; coj =  c_oth->j;
199 
200   Mat_SeqAIJ *po = (Mat_SeqAIJ *)ptap->Ro->data;
201   poti = po->i; potj = po->j;
202 
203   /* then, compute symbolic Co = (p->B)^T*AP */
204   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
205                          >= (num of nonzero rows of C_seq) - pn */
206 
207   /* create and initialize a linked list */
208   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
209 
210   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
211   if (afill_tmp > afill) afill = afill_tmp;
212 
213   /* (3) send j-array (coj) of Co to other processors */
214   /*--------------------------------------------------*/
215   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
216   len_s        = merge->len_s;
217   merge->nsend = 0;
218 
219   /* determine row ownership */
220   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
221   merge->rowmap->n  = pn;
222   merge->rowmap->bs = 1;
223 
224   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
225   owners = merge->rowmap->range;
226 
227   /* determine the number of messages to send, their lengths */
228   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
229   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
230   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
231 
232   proc = 0;
233   for (i=0; i<pon; i++) {
234     while (prmap[i] >= owners[proc+1]) proc++;
235     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
236     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
237   }
238 
239   len          = 0; /* max length of buf_si[], see (4) */
240   owners_co[0] = 0;
241   for (proc=0; proc<size; proc++) {
242     owners_co[proc+1] = owners_co[proc] + len_si[proc];
243     if (len_s[proc]) {
244       merge->nsend++;
245       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
246       len         += len_si[proc];
247     }
248   }
249 
250   /* determine the number and length of messages to receive for coi and coj  */
251   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
252   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
253 
254   /* post the Irecv and Isend of coj */
255   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
256   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
257   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
258   for (proc=0, k=0; proc<size; proc++) {
259     if (!len_s[proc]) continue;
260     i    = owners_co[proc];
261     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
262     k++;
263   }
264 
265   /* receives and sends of coj are complete */
266   for (i=0; i<merge->nrecv; i++) {
267     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
268   }
269   ierr = PetscFree(rwaits);CHKERRQ(ierr);
270   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
271 
272   /* (4) send and recv coi */
273   /*-----------------------*/
274   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
275   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
276   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
277   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
278   for (proc=0,k=0; proc<size; proc++) {
279     if (!len_s[proc]) continue;
280     /* form outgoing message for i-structure:
281          buf_si[0]:                 nrows to be sent
282                [1:nrows]:           row index (global)
283                [nrows+1:2*nrows+1]: i-structure index
284     */
285     /*-------------------------------------------*/
286     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
287     buf_si_i    = buf_si + nrows+1;
288     buf_si[0]   = nrows;
289     buf_si_i[0] = 0;
290     nrows       = 0;
291     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
292       nzi = coi[i+1] - coi[i];
293       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
294       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
295       nrows++;
296     }
297     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
298     k++;
299     buf_si += len_si[proc];
300   }
301   i = merge->nrecv;
302   while (i--) {
303     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
304   }
305   ierr = PetscFree(rwaits);CHKERRQ(ierr);
306   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
307 
308   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
309   ierr = PetscFree(len_ri);CHKERRQ(ierr);
310   ierr = PetscFree(swaits);CHKERRQ(ierr);
311   ierr = PetscFree(buf_s);CHKERRQ(ierr);
312 
313   ierr = PetscTime(&t2);CHKERRQ(ierr);
314 
315   //ierr = MPI_Barrier(comm);
316 
317   /* (5) compute the local portion of C (mpi mat) */
318   /*----------------------------------------------*/
319 
320   Mat_SeqAIJ *pd = (Mat_SeqAIJ *)ptap->Rd->data;
321   pdti = pd->i; pdtj = pd->j;
322 
323   /* allocate pti array and free space for accumulating nonzero column info */
324   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
325   pti[0] = 0;
326 
327   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
328   nnz           = fill*(pi_loc[pm] + api[am]);
329   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
330   current_space = free_space;
331 
332   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
333   for (k=0; k<merge->nrecv; k++) {
334     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
335     nrows       = *buf_ri_k[k];
336     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
337     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
338   }
339   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
340   rmax = 0;
341   for (i=0; i<pn; i++) {
342     /* add C_loc into lnk */
343     nzi  = c_loc->i[i+1] - c_loc->i[i];
344     Jptr = c_loc->j + c_loc->i[i];
345     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
346 
347     /* add received col data into lnk */
348     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
349       if (i == *nextrow[k]) { /* i-th row */
350         nzi  = *(nextci[k]+1) - *nextci[k];
351         Jptr = buf_rj[k] + *nextci[k];
352         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
353         nextrow[k]++; nextci[k]++;
354       }
355     }
356     nnz = lnk[0];
357 
358     /* if free space is not available, make more free space */
359     if (current_space->local_remaining<nnz) {
360       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
361       nspacedouble++;
362     }
363     /* copy data into free space, then initialize lnk */
364     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
365     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
366 
367 #if 0
368     current_space->array           += nnz;
369     current_space->local_used      += nnz;
370     current_space->local_remaining -= nnz;
371 #endif
372     for (j=0; j<nnz; j++) current_space->array[j] = 0.0;
373 
374     pti[i+1] = pti[i] + nnz;
375     if (nnz > rmax) rmax = nnz;
376   }
377   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
378 
379   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
380   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
381   ierr      = PetscFree(ptj);CHKERRQ(ierr);
382 
383   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
384   if (afill_tmp > afill) afill = afill_tmp;
385   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
386 
387   ierr = PetscTime(&t3);CHKERRQ(ierr);
388 
389   /* (6) create symbolic parallel matrix Cmpi */
390   /*------------------------------------------*/
391   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
392   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
393   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
394   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
395   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
396   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
397 
398   merge->bi        = pti;      /* Cseq->i */
399   merge->bj        = NULL; //ptj;      /* Cseq->j */
400   merge->coi = NULL; //merge->coi       = coi;      /* Co->i   */
401   merge->coj = NULL; //merge->coj       = coj;      /* Co->j   */
402   merge->buf_ri    = buf_ri;
403   merge->buf_rj    = buf_rj;
404   merge->owners_co = owners_co;
405   merge->destroy   = Cmpi->ops->destroy;
406   merge->duplicate = Cmpi->ops->duplicate;
407 
408   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
409   Cmpi->assembled        = PETSC_FALSE;
410   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
411   Cmpi->ops->duplicate   = MatDuplicate_MPIAIJ_MatPtAP;
412   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_new;
413 
414   /* attach the supporting struct to Cmpi for reuse */
415   c           = (Mat_MPIAIJ*)Cmpi->data;
416   c->ptap     = ptap;
417   ptap->api   = NULL;
418   ptap->apj   = NULL;
419   ptap->rmax  = ap_rmax;
420   *C          = Cmpi;
421 
422   /* flag 'scalable' determines which implementations to be used:
423        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
424        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
425   /* set default scalable */
426   ptap->scalable = PETSC_FALSE; //PETSC_TRUE;
427 
428   ierr = PetscOptionsGetBool(((PetscObject)(*C))->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
429   if (!ptap->scalable) {  /* Do dense axpy */
430     ierr = PetscCalloc1(P->cmap->N,&ptap->apa);CHKERRQ(ierr);
431   } else {
432     //ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
433   }
434 
435   ierr = PetscTime(&t4);CHKERRQ(ierr);
436   if (rank == 1) {
437     printf("PtAPSym: %g + %g +%g + %g + %g = %g\n",t11-t0,t1-t11,t2-t1,t3-t2,t4-t3,t4-t0);
438   }
439   PetscFunctionReturn(0);
440 }
441 
442 #undef __FUNCT__
443 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ_new"
444 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_new(Mat A,Mat P,Mat C)
445 {
446   PetscErrorCode    ierr;
447   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
448   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
449   Mat_PtAPMPI       *ptap = c->ptap;
450   Mat               AP_loc,C_loc,C_oth;
451   PetscInt          i,rstart,rend,cm,ncols,row;
452   PetscMPIInt       rank;
453   MPI_Comm          comm;
454   const PetscInt    *cols;
455   const PetscScalar *vals;
456   PetscLogDouble    t0,t1,t2,t3,t4,eR,eAP,eCseq,eCmpi;
457 
458   PetscFunctionBegin;
459   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
460   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
461 
462   ierr = MatZeroEntries(C);CHKERRQ(ierr);
463 
464   /* 1) get R = Pd^T,Ro = Po^T */
465   ierr = PetscTime(&t0);CHKERRQ(ierr);
466   if (ptap->reuse == MAT_REUSE_MATRIX) {
467     ierr = MatTranspose_SeqAIJ(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
468     ierr = MatTranspose_SeqAIJ(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
469   }
470   ierr = PetscTime(&t1);CHKERRQ(ierr);
471   eR = t1 - t0;
472 
473   /* 2) get AP_loc */
474   AP_loc = ptap->AP_loc;
475   Mat_SeqAIJ    *ap=(Mat_SeqAIJ*)AP_loc->data;
476 
477   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
478   /*-----------------------------------------------------*/
479   if (ptap->reuse == MAT_REUSE_MATRIX) {
480     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
481     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
482     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
483   }
484 
485   /* 2-2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
486   /*--------------------------------------------------------------*/
487   /* get data from symbolic products */
488   Mat_SeqAIJ  *p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
489   Mat_SeqAIJ  *p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
490   PetscInt    *api,*apj,am = A->rmap->n,j,col,apnz;
491   PetscScalar *apa = ptap->apa;
492   if (ptap->reuse == MAT_REUSE_MATRIX) {
493     api = ap->i;
494     apj = ap->j;
495     for (i=0; i<am; i++) {
496       /* AP[i,:] = A[i,:]*P = Ad*P_loc + Ao*P_oth */
497       AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
498       apnz = api[i+1] - api[i];
499       for (j=0; j<apnz; j++) {
500         col = apj[j+api[i]];
501         ap->a[j+ap->i[i]] = apa[col];
502         apa[col] = 0.0;
503       }
504     }
505   }
506 
507   ierr = PetscTime(&t2);CHKERRQ(ierr);
508   eAP = t2 - t1;
509 
510   /* 3) C_loc = R*AP_loc, Co = Ro*AP_loc */
511   if (ptap->reuse == MAT_REUSE_MATRIX) {
512     ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Rd,AP_loc,MAT_REUSE_MATRIX,2.0,&ptap->C_loc);CHKERRQ(ierr);
513     ierr = MatMatMult_SeqAIJ_SeqAIJ(ptap->Ro,AP_loc,MAT_REUSE_MATRIX,2.0,&ptap->C_oth);CHKERRQ(ierr);
514   }
515   C_loc = ptap->C_loc;
516   C_oth = ptap->C_oth;
517   //printf("[%d] Co %d, %d\n", rank,Co->rmap->N,Co->cmap->N);
518   ierr = PetscTime(&t3);CHKERRQ(ierr);
519   eCseq = t3 - t2;
520 
521   /* add C_loc and Co to to C */
522   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
523 
524   /* C_loc -> C */
525   cm = C_loc->rmap->N;
526   Mat_SeqAIJ *c_seq;
527   c_seq = (Mat_SeqAIJ*)C_loc->data;
528   for (i=0; i<cm; i++) {
529     ncols = c_seq->i[i+1] - c_seq->i[i];
530     row = rstart + i;
531     cols = c_seq->j + c_seq->i[i];
532     vals = c_seq->a + c_seq->i[i];
533     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
534   }
535 
536   /* Co -> C, off-processor part */
537   //printf("[%d] p->B %d, %d\n",rank,p->B->rmap->N,p->B->cmap->N);
538   cm = C_oth->rmap->N;
539   c_seq = (Mat_SeqAIJ*)C_oth->data;
540   for (i=0; i<cm; i++) {
541     ncols = c_seq->i[i+1] - c_seq->i[i];
542     row = p->garray[i];
543     cols = c_seq->j + c_seq->i[i];
544     vals = c_seq->a + c_seq->i[i];
545     //printf("[%d] row[%d] = %d\n",rank,i,row);
546     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
547   }
548   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
549   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
550   ierr = PetscTime(&t4);CHKERRQ(ierr);
551   eCmpi = t4 - t3;
552 
553   if (rank==1) {
554     ierr = PetscPrintf(MPI_COMM_SELF," R %g, AP %g, Cseq %g, Cmpi %g = %g\n", eR,eAP,eCseq,eCmpi,eR+eAP+eCseq+eCmpi);CHKERRQ(ierr);
555   }
556   ptap->reuse = MAT_REUSE_MATRIX;
557   PetscFunctionReturn(0);
558 }
559 
560 #undef __FUNCT__
561 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
562 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
563 {
564   PetscErrorCode      ierr;
565   Mat                 Cmpi;
566   Mat_PtAPMPI         *ptap;
567   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
568   Mat_MPIAIJ          *a        =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
569   Mat_SeqAIJ          *ad       =(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
570   Mat_SeqAIJ          *p_loc,*p_oth;
571   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ;
572   PetscInt            *adi=ad->i,*aj,*aoi=ao->i,nnz;
573   PetscInt            *lnk,*owners_co,*coi,*coj,i,k,pnz,row;
574   PetscInt            am=A->rmap->n,pN=P->cmap->N,pm=P->rmap->n,pn=P->cmap->n;
575   PetscBT             lnkbt;
576   MPI_Comm            comm;
577   PetscMPIInt         size,rank,tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0;
578   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
579   PetscInt            len,proc,*dnz,*onz,*owners;
580   PetscInt            nzi,*pti,*ptj;
581   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
582   MPI_Request         *swaits,*rwaits;
583   MPI_Status          *sstatus,rstatus;
584   Mat_Merge_SeqsToMPI *merge;
585   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,pon,nspacedouble=0,j,ap_rmax=0;
586   PetscReal           afill=1.0,afill_tmp;
587   PetscInt            rmax;
588 
589   PetscFunctionBegin;
590   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
591 
592   /* check if matrix local sizes are compatible */
593   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) {
594     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
595   }
596   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) {
597     SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
598   }
599 
600   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
601   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
602 
603   /* create struct Mat_PtAPMPI and attached it to C later */
604   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
605   ierr        = PetscNew(&merge);CHKERRQ(ierr);
606   ptap->merge = merge;
607   ptap->reuse = MAT_INITIAL_MATRIX;
608 
609   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
610   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
611 
612   /* get P_loc by taking all local rows of P */
613   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
614 
615   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
616   p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
617   pi_loc = p_loc->i; pj_loc = p_loc->j;
618   pi_oth = p_oth->i; pj_oth = p_oth->j;
619 
620   /* (1) compute symbolic AP = A_loc*P = A_diag*P_loc + A_off*P_oth (api,apj) */
621   /*--------------------------------------------------------------------------*/
622   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
623   api[0] = 0;
624 
625   /* create and initialize a linked list */
626   ierr = PetscLLCondensedCreate(pN,pN,&lnk,&lnkbt);CHKERRQ(ierr);
627 
628   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) -OOM for ex56, np=8k on Intrepid! */
629   ierr = PetscFreeSpaceGet((PetscInt)(fill*(adi[am]+aoi[am]+pi_loc[pm])),&free_space);CHKERRQ(ierr);
630 
631   current_space = free_space;
632 
633   for (i=0; i<am; i++) {
634     /* diagonal portion of A */
635     nzi = adi[i+1] - adi[i];
636     aj  = ad->j + adi[i];
637     for (j=0; j<nzi; j++) {
638       row  = aj[j];
639       pnz  = pi_loc[row+1] - pi_loc[row];
640       Jptr = pj_loc + pi_loc[row];
641       /* add non-zero cols of P into the sorted linked list lnk */
642       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
643     }
644     /* off-diagonal portion of A */
645     nzi = aoi[i+1] - aoi[i];
646     aj  = ao->j + aoi[i];
647     for (j=0; j<nzi; j++) {
648       row  = aj[j];
649       pnz  = pi_oth[row+1] - pi_oth[row];
650       Jptr = pj_oth + pi_oth[row];
651       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
652     }
653     apnz     = lnk[0];
654     api[i+1] = api[i] + apnz;
655     if (ap_rmax < apnz) ap_rmax = apnz;
656 
657     /* if free space is not available, double the total space in the list */
658     if (current_space->local_remaining<apnz) {
659       ierr = PetscFreeSpaceGet(apnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
660       nspacedouble++;
661     }
662 
663     /* Copy data into free space, then initialize lnk */
664     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
665 
666     current_space->array           += apnz;
667     current_space->local_used      += apnz;
668     current_space->local_remaining -= apnz;
669   }
670 
671   /* Allocate space for apj, initialize apj, and */
672   /* destroy list of free space and other temporary array(s) */
673   ierr      = PetscMalloc1(api[am]+1,&apj);CHKERRQ(ierr);
674   ierr      = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
675   afill_tmp = (PetscReal)api[am]/(adi[am]+aoi[am]+pi_loc[pm]+1);
676   if (afill_tmp > afill) afill = afill_tmp;
677 
678   /* (2) determine symbolic Co=(p->B)^T*AP - send to others (coi,coj)*/
679   /*-----------------------------------------------------------------*/
680   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
681 
682   /* then, compute symbolic Co = (p->B)^T*AP */
683   pon    = (p->B)->cmap->n; /* total num of rows to be sent to other processors
684                          >= (num of nonzero rows of C_seq) - pn */
685   ierr   = PetscMalloc1(pon+1,&coi);CHKERRQ(ierr);
686   coi[0] = 0;
687 
688   /* set initial free space to be fill*(nnz(p->B) + nnz(AP)) */
689   nnz           = fill*(poti[pon] + api[am]);
690   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
691   current_space = free_space;
692 
693   for (i=0; i<pon; i++) {
694     pnz = poti[i+1] - poti[i];
695     ptJ = potj + poti[i];
696     for (j=0; j<pnz; j++) {
697       row  = ptJ[j]; /* row of AP == col of Pot */
698       apnz = api[row+1] - api[row];
699       Jptr = apj + api[row];
700       /* add non-zero cols of AP into the sorted linked list lnk */
701       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
702     }
703     nnz = lnk[0];
704 
705     /* If free space is not available, double the total space in the list */
706     if (current_space->local_remaining<nnz) {
707       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
708       nspacedouble++;
709     }
710 
711     /* Copy data into free space, and zero out denserows */
712     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
713 
714     current_space->array           += nnz;
715     current_space->local_used      += nnz;
716     current_space->local_remaining -= nnz;
717 
718     coi[i+1] = coi[i] + nnz;
719   }
720 
721   ierr      = PetscMalloc1(coi[pon],&coj);CHKERRQ(ierr);
722   ierr      = PetscFreeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
723   afill_tmp = (PetscReal)coi[pon]/(poti[pon] + api[am]+1);
724   if (afill_tmp > afill) afill = afill_tmp;
725   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
726 
727   /* (3) send j-array (coj) of Co to other processors */
728   /*--------------------------------------------------*/
729   ierr = PetscCalloc1(size,&merge->len_s);CHKERRQ(ierr);
730   len_s        = merge->len_s;
731   merge->nsend = 0;
732 
733 
734   /* determine row ownership */
735   ierr = PetscLayoutCreate(comm,&merge->rowmap);CHKERRQ(ierr);
736   merge->rowmap->n  = pn;
737   merge->rowmap->bs = 1;
738 
739   ierr   = PetscLayoutSetUp(merge->rowmap);CHKERRQ(ierr);
740   owners = merge->rowmap->range;
741 
742   /* determine the number of messages to send, their lengths */
743   ierr = PetscMalloc2(size,&len_si,size,&sstatus);CHKERRQ(ierr);
744   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
745   ierr = PetscMalloc1(size+2,&owners_co);CHKERRQ(ierr);
746 
747   proc = 0;
748   for (i=0; i<pon; i++) {
749     while (prmap[i] >= owners[proc+1]) proc++;
750     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
751     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
752   }
753 
754   len          = 0; /* max length of buf_si[], see (4) */
755   owners_co[0] = 0;
756   for (proc=0; proc<size; proc++) {
757     owners_co[proc+1] = owners_co[proc] + len_si[proc];
758     if (len_s[proc]) {
759       merge->nsend++;
760       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
761       len         += len_si[proc];
762     }
763   }
764 
765   /* determine the number and length of messages to receive for coi and coj  */
766   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
767   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
768 
769   /* post the Irecv and Isend of coj */
770   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
771   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
772   ierr = PetscMalloc1(merge->nsend+1,&swaits);CHKERRQ(ierr);
773   for (proc=0, k=0; proc<size; proc++) {
774     if (!len_s[proc]) continue;
775     i    = owners_co[proc];
776     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
777     k++;
778   }
779 
780   /* receives and sends of coj are complete */
781   for (i=0; i<merge->nrecv; i++) {
782     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
783   }
784   ierr = PetscFree(rwaits);CHKERRQ(ierr);
785   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
786 
787   /* (4) send and recv coi */
788   /*-----------------------*/
789   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
790   ierr   = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
791   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
792   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
793   for (proc=0,k=0; proc<size; proc++) {
794     if (!len_s[proc]) continue;
795     /* form outgoing message for i-structure:
796          buf_si[0]:                 nrows to be sent
797                [1:nrows]:           row index (global)
798                [nrows+1:2*nrows+1]: i-structure index
799     */
800     /*-------------------------------------------*/
801     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
802     buf_si_i    = buf_si + nrows+1;
803     buf_si[0]   = nrows;
804     buf_si_i[0] = 0;
805     nrows       = 0;
806     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
807       nzi = coi[i+1] - coi[i];
808       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
809       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
810       nrows++;
811     }
812     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
813     k++;
814     buf_si += len_si[proc];
815   }
816   i = merge->nrecv;
817   while (i--) {
818     ierr = MPI_Waitany(merge->nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
819   }
820   ierr = PetscFree(rwaits);CHKERRQ(ierr);
821   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,swaits,sstatus);CHKERRQ(ierr);}
822 
823   ierr = PetscFree2(len_si,sstatus);CHKERRQ(ierr);
824   ierr = PetscFree(len_ri);CHKERRQ(ierr);
825   ierr = PetscFree(swaits);CHKERRQ(ierr);
826   ierr = PetscFree(buf_s);CHKERRQ(ierr);
827 
828   /* (5) compute the local portion of C (mpi mat) */
829   /*----------------------------------------------*/
830   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
831 
832   /* allocate pti array and free space for accumulating nonzero column info */
833   ierr   = PetscMalloc1(pn+1,&pti);CHKERRQ(ierr);
834   pti[0] = 0;
835 
836   /* set initial free space to be fill*(nnz(P) + nnz(AP)) */
837   nnz           = fill*(pi_loc[pm] + api[am]);
838   ierr          = PetscFreeSpaceGet(nnz,&free_space);CHKERRQ(ierr);
839   current_space = free_space;
840 
841   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
842   for (k=0; k<merge->nrecv; k++) {
843     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
844     nrows       = *buf_ri_k[k];
845     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
846     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
847   }
848   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
849   rmax = 0;
850   for (i=0; i<pn; i++) {
851     /* add pdt[i,:]*AP into lnk */
852     pnz = pdti[i+1] - pdti[i];
853     ptJ = pdtj + pdti[i];
854     for (j=0; j<pnz; j++) {
855       row  = ptJ[j];  /* row of AP == col of Pt */
856       apnz = api[row+1] - api[row];
857       Jptr = apj + api[row];
858       /* add non-zero cols of AP into the sorted linked list lnk */
859       ierr = PetscLLCondensedAddSorted(apnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
860     }
861 
862     /* add received col data into lnk */
863     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
864       if (i == *nextrow[k]) { /* i-th row */
865         nzi  = *(nextci[k]+1) - *nextci[k];
866         Jptr = buf_rj[k] + *nextci[k];
867         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
868         nextrow[k]++; nextci[k]++;
869       }
870     }
871     nnz = lnk[0];
872 
873     /* if free space is not available, make more free space */
874     if (current_space->local_remaining<nnz) {
875       ierr = PetscFreeSpaceGet(nnz+current_space->total_array_size,&current_space);CHKERRQ(ierr);
876       nspacedouble++;
877     }
878     /* copy data into free space, then initialize lnk */
879     ierr = PetscLLCondensedClean(pN,nnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
880     ierr = MatPreallocateSet(i+owners[rank],nnz,current_space->array,dnz,onz);CHKERRQ(ierr);
881 
882     current_space->array           += nnz;
883     current_space->local_used      += nnz;
884     current_space->local_remaining -= nnz;
885 
886     pti[i+1] = pti[i] + nnz;
887     if (nnz > rmax) rmax = nnz;
888   }
889   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
890   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
891 
892   ierr      = PetscMalloc1(pti[pn]+1,&ptj);CHKERRQ(ierr);
893   ierr      = PetscFreeSpaceContiguous(&free_space,ptj);CHKERRQ(ierr);
894   afill_tmp = (PetscReal)pti[pn]/(pi_loc[pm] + api[am]+1);
895   if (afill_tmp > afill) afill = afill_tmp;
896   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
897 
898   /* (6) create symbolic parallel matrix Cmpi */
899   /*------------------------------------------*/
900   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
901   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
902   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
903   ierr = MatSetType(Cmpi,MATMPIAIJ);CHKERRQ(ierr);
904   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
905   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
906 
907   merge->bi        = pti;      /* Cseq->i */
908   merge->bj        = ptj;      /* Cseq->j */
909   merge->coi       = coi;      /* Co->i   */
910   merge->coj       = coj;      /* Co->j   */
911   merge->buf_ri    = buf_ri;
912   merge->buf_rj    = buf_rj;
913   merge->owners_co = owners_co;
914   merge->destroy   = Cmpi->ops->destroy;
915   merge->duplicate = Cmpi->ops->duplicate;
916 
917   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
918   Cmpi->assembled      = PETSC_FALSE;
919   Cmpi->ops->destroy   = MatDestroy_MPIAIJ_PtAP;
920   Cmpi->ops->duplicate = MatDuplicate_MPIAIJ_MatPtAP;
921 
922   /* attach the supporting struct to Cmpi for reuse */
923   c           = (Mat_MPIAIJ*)Cmpi->data;
924   c->ptap     = ptap;
925   ptap->api   = api;
926   ptap->apj   = apj;
927   ptap->rmax  = ap_rmax;
928   *C          = Cmpi;
929 
930   /* flag 'scalable' determines which implementations to be used:
931        0: do dense axpy in MatPtAPNumeric() - fast, but requires storage of a nonscalable dense array apa;
932        1: do sparse axpy in MatPtAPNumeric() - might slow, uses a sparse array apa */
933   /* set default scalable */
934   ptap->scalable = PETSC_FALSE; //PETSC_TRUE;
935 
936   ierr = PetscOptionsGetBool(((PetscObject)Cmpi)->prefix,"-matptap_scalable",&ptap->scalable,NULL);CHKERRQ(ierr);
937   if (!ptap->scalable) {  /* Do dense axpy */
938     ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
939   } else {
940     ierr = PetscCalloc1(ap_rmax+1,&ptap->apa);CHKERRQ(ierr);
941   }
942 
943 #if defined(PETSC_USE_INFO)
944   if (pti[pn] != 0) {
945     ierr = PetscInfo3(Cmpi,"Reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)afill);CHKERRQ(ierr);
946     ierr = PetscInfo1(Cmpi,"Use MatPtAP(A,P,MatReuse,%g,&C) for best performance.\n",(double)afill);CHKERRQ(ierr);
947   } else {
948     ierr = PetscInfo(Cmpi,"Empty matrix product\n");CHKERRQ(ierr);
949   }
950 #endif
951   PetscFunctionReturn(0);
952 }
953 
954 #undef __FUNCT__
955 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
956 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
957 {
958   PetscErrorCode      ierr;
959   Mat_MPIAIJ          *a =(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
960   Mat_SeqAIJ          *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
961   Mat_SeqAIJ          *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
962   Mat_SeqAIJ          *p_loc,*p_oth;
963   Mat_PtAPMPI         *ptap;
964   Mat_Merge_SeqsToMPI *merge;
965   PetscInt            *adi=ad->i,*aoi=ao->i,*adj,*aoj,*apJ,nextp;
966   PetscInt            *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
967   PetscInt            i,j,k,anz,pnz,apnz,nextap,row,*cj;
968   MatScalar           *ada,*aoa,*apa,*pa,*ca,*pa_loc,*pa_oth,valtmp;
969   PetscInt            am  =A->rmap->n,cm=C->rmap->n,pon=(p->B)->cmap->n;
970   MPI_Comm            comm;
971   PetscMPIInt         size,rank,taga,*len_s;
972   PetscInt            *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
973   PetscInt            **buf_ri,**buf_rj;
974   PetscInt            cnz=0,*bj_i,*bi,*bj,bnz,nextcj;  /* bi,bj,ba: local array of C(mpi mat) */
975   MPI_Request         *s_waits,*r_waits;
976   MPI_Status          *status;
977   MatScalar           **abuf_r,*ba_i,*pA,*coa,*ba;
978   PetscInt            *api,*apj,*coi,*coj;
979   PetscInt            *poJ=po->j,*pdJ=pd->j,pcstart=P->cmap->rstart,pcend=P->cmap->rend;
980   PetscBool           scalable;
981 #if defined(PTAP_PROFILE)
982   PetscLogDouble t0,t1,t2,eP,t3,t4,et2_AP=0.0,ePtAP=0.0,t2_0,t2_1,t2_2;
983 #endif
984 
985   PetscFunctionBegin;
986   ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
987 #if defined(PTAP_PROFILE)
988   ierr = PetscTime(&t0);CHKERRQ(ierr);
989 #endif
990   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
991   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
992 
993   ptap = c->ptap;
994   if (!ptap) SETERRQ(PetscObjectComm((PetscObject)C),PETSC_ERR_ARG_INCOMP,"MatPtAP() has not been called to create matrix C yet, cannot use MAT_REUSE_MATRIX");
995   merge    = ptap->merge;
996   apa      = ptap->apa;
997   scalable = ptap->scalable;
998 
999   /* 1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1000   /*-----------------------------------------------------*/
1001   if (ptap->reuse == MAT_INITIAL_MATRIX) {
1002     /* P_oth and P_loc are obtained in MatPtASymbolic(), skip calling MatGetBrowsOfAoCols() and MatMPIAIJGetLocalMat() */
1003     ptap->reuse = MAT_REUSE_MATRIX;
1004   } else { /* update numerical values of P_oth and P_loc */
1005     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1006     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1007   }
1008 #if defined(PTAP_PROFILE)
1009   ierr = PetscTime(&t1);CHKERRQ(ierr);
1010   eP = t1-t0;
1011 #endif
1012   /*
1013   printf("[%d] Ad: %d, %d; Ao: %d, %d; P_loc: %d, %d; P_oth %d, %d;\n",rank,
1014          a->A->rmap->N,a->A->cmap->N,a->B->rmap->N,a->B->cmap->N,
1015          ptap->P_loc->rmap->N,ptap->P_loc->cmap->N,
1016          ptap->P_oth->rmap->N,ptap->P_oth->cmap->N);
1017    */
1018 
1019   /* 2) compute numeric C_seq = P_loc^T*A_loc*P - dominating part */
1020   /*--------------------------------------------------------------*/
1021   /* get data from symbolic products */
1022   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1023   p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1024   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a;
1025   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
1026 
1027   coi  = merge->coi; coj = merge->coj;
1028   ierr = PetscCalloc1(coi[pon]+1,&coa);CHKERRQ(ierr);
1029 
1030   bi     = merge->bi; bj = merge->bj;
1031   owners = merge->rowmap->range;
1032   ierr   = PetscCalloc1(bi[cm]+1,&ba);CHKERRQ(ierr);  /* ba: Cseq->a */
1033 
1034   api = ptap->api; apj = ptap->apj;
1035 
1036   if (!scalable) { /* Do dense axpy on apa (length of pN, stores A[i,:]*P) - nonscalable, but faster (could take 1/3 scalable time) */
1037     ierr = PetscInfo(C,"Using non-scalable dense axpy\n");CHKERRQ(ierr);
1038 #if 0
1039     /* ------ 10x slower -------------- */
1040     /*==================================*/
1041     Mat         R = ptap->R;
1042     Mat_SeqAIJ  *r = (Mat_SeqAIJ*)R->data;
1043     PetscInt    *ri=r->i,*rj=r->j,rnz,arow,l,prow,pcol,pN=P->cmap->N;
1044     PetscScalar *ra=r->a,tmp,cdense[pN];
1045 
1046     ierr = PetscMemzero(cdense,pN*sizeof(PetscScalar));CHKERRQ(ierr);
1047     for (i=0; i<cm; i++) { /* each row of C or R */
1048       rnz = ri[i+1] - ri[i];
1049 
1050       for (j=0; j<rnz; j++) { /* each nz of R */
1051         arow = rj[ri[i] + j];
1052 
1053         /* diagonal portion of A */
1054         anz  = ad->i[arow+1] - ad->i[arow];
1055         for (k=0; k<anz; k++) { /* each nz of Ad */
1056           tmp  = ra[ri[i] + j]*ad->a[ad->i[arow] + k];
1057           prow = ad->j[ad->i[arow] + k];
1058           pnz  = pi_loc[prow+1] - pi_loc[prow];
1059 
1060           for (l=0; l<pnz; l++) { /* each nz of P_loc */
1061             pcol = pj_loc[pi_loc[prow] + l];
1062             cdense[pcol] += tmp*pa_loc[pi_loc[prow] + l];
1063           }
1064         }
1065 
1066         /* off-diagonal portion of A */
1067         anz  = ao->i[arow+1] - ao->i[arow];
1068         for (k=0; k<anz; k++) { /* each nz of Ao */
1069           tmp  = ra[ri[i] + j]*ao->a[ao->i[arow] + k];
1070           prow = ao->j[ao->i[arow] + k];
1071           pnz  = pi_oth[prow+1] - pi_oth[prow];
1072 
1073           for (l=0; l<pnz; l++) { /* each nz of P_oth */
1074             pcol = pj_oth[pi_oth[prow] + l];
1075             cdense[pcol] += tmp*pa_oth[pi_oth[prow] + l];
1076           }
1077         }
1078 
1079       } //for (j=0; j<rnz; j++)
1080 
1081       /* copy cdense[] into ca; zero cdense[] */
1082       cnz = bi[i+1] - bi[i];
1083       cj  = bj + bi[i];
1084       ca  = ba + bi[i];
1085       for (j=0; j<cnz; j++) {
1086         ca[j] += cdense[cj[j]];
1087         cdense[cj[j]] = 0.0;
1088       }
1089 #if 0
1090       if (rank == 0) {
1091         printf("[%d] row %d: ",rank,i);
1092         for (j=0; j<pN; j++) printf(" %g,",cdense[j]);
1093         printf("\n");
1094       }
1095       for (j=0; j<pN; j++) cdense[j]=0.0; // zero cdnese[]
1096 #endif
1097     } //for (i=0; i<cm; i++) {
1098 #endif
1099 
1100     //==========================================
1101 
1102     ierr = PetscTime(&t1);CHKERRQ(ierr);
1103     for (i=0; i<am; i++) {
1104 #if defined(PTAP_PROFILE)
1105       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1106 #endif
1107       /* 2-a) form i-th sparse row of A_loc*P = Ad*P_loc + Ao*P_oth */
1108       /*------------------------------------------------------------*/
1109       apJ = apj + api[i];
1110 
1111       /* diagonal portion of A */
1112       anz = adi[i+1] - adi[i];
1113       adj = ad->j + adi[i];
1114       ada = ad->a + adi[i];
1115       for (j=0; j<anz; j++) {
1116         row = adj[j];
1117         pnz = pi_loc[row+1] - pi_loc[row];
1118         pj  = pj_loc + pi_loc[row];
1119         pa  = pa_loc + pi_loc[row];
1120 
1121         /* perform dense axpy */
1122         valtmp = ada[j];
1123         for (k=0; k<pnz; k++) {
1124           apa[pj[k]] += valtmp*pa[k];
1125         }
1126         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1127       }
1128 
1129       /* off-diagonal portion of A */
1130       anz = aoi[i+1] - aoi[i];
1131       aoj = ao->j + aoi[i];
1132       aoa = ao->a + aoi[i];
1133       for (j=0; j<anz; j++) {
1134         row = aoj[j];
1135         pnz = pi_oth[row+1] - pi_oth[row];
1136         pj  = pj_oth + pi_oth[row];
1137         pa  = pa_oth + pi_oth[row];
1138 
1139         /* perform dense axpy */
1140         valtmp = aoa[j];
1141         for (k=0; k<pnz; k++) {
1142           apa[pj[k]] += valtmp*pa[k];
1143         }
1144         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1145       }
1146 #if defined(PTAP_PROFILE)
1147       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1148       et2_AP += t2_1 - t2_0;
1149 #endif
1150 
1151       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1152       /*--------------------------------------------------------------*/
1153       apnz = api[i+1] - api[i];
1154       /* put the value into Co=(p->B)^T*AP (off-diagonal part, send to others) */
1155       pnz = po->i[i+1] - po->i[i];
1156       poJ = po->j + po->i[i];
1157       pA  = po->a + po->i[i];
1158       for (j=0; j<pnz; j++) {
1159         row = poJ[j];
1160         cnz = coi[row+1] - coi[row];
1161         cj  = coj + coi[row];
1162         ca  = coa + coi[row];
1163         /* perform dense axpy */
1164         valtmp = pA[j];
1165         for (k=0; k<cnz; k++) {
1166           ca[k] += valtmp*apa[cj[k]];
1167         }
1168         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1169       }
1170 #if 1
1171       /* put the value into Cd (diagonal part) */
1172       pnz = pd->i[i+1] - pd->i[i];
1173       pdJ = pd->j + pd->i[i];
1174       pA  = pd->a + pd->i[i];
1175       for (j=0; j<pnz; j++) {
1176         row = pdJ[j];
1177         cnz = bi[row+1] - bi[row];
1178         cj  = bj + bi[row];
1179         ca  = ba + bi[row];
1180         /* perform dense axpy */
1181         valtmp = pA[j];
1182         for (k=0; k<cnz; k++) {
1183           ca[k] += valtmp*apa[cj[k]];
1184         }
1185         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1186       }
1187 #endif
1188       /* zero the current row of A*P */
1189       for (k=0; k<apnz; k++) apa[apJ[k]] = 0.0;
1190 #if defined(PTAP_PROFILE)
1191       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1192       ePtAP += t2_2 - t2_1;
1193 #endif
1194     }
1195 
1196     if (rank == 100) {
1197     for (row=0; row<cm; row++) {
1198       printf("[%d] row %d: ",rank,row);
1199       cnz = bi[row+1] - bi[row];
1200       for (j=0; j<cnz; j++) printf(" %g,",ba[bi[row]+j]);
1201       printf("\n");
1202     }
1203     }
1204 
1205   } else { /* Do sparse axpy on apa (length of ap_rmax, stores A[i,:]*P) - scalable, but slower */
1206     ierr = PetscInfo(C,"Using scalable sparse axpy\n");CHKERRQ(ierr);
1207     /*-----------------------------------------------------------------------------------------*/
1208     pA=pa_loc;
1209     for (i=0; i<am; i++) {
1210 #if defined(PTAP_PROFILE)
1211       ierr = PetscTime(&t2_0);CHKERRQ(ierr);
1212 #endif
1213       /* form i-th sparse row of A*P */
1214       apnz = api[i+1] - api[i];
1215       apJ  = apj + api[i];
1216       /* diagonal portion of A */
1217       anz = adi[i+1] - adi[i];
1218       adj = ad->j + adi[i];
1219       ada = ad->a + adi[i];
1220       for (j=0; j<anz; j++) {
1221         row    = adj[j];
1222         pnz    = pi_loc[row+1] - pi_loc[row];
1223         pj     = pj_loc + pi_loc[row];
1224         pa     = pa_loc + pi_loc[row];
1225         valtmp = ada[j];
1226         nextp  = 0;
1227         for (k=0; nextp<pnz; k++) {
1228           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1229             apa[k] += valtmp*pa[nextp++];
1230           }
1231         }
1232         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1233       }
1234       /* off-diagonal portion of A */
1235       anz = aoi[i+1] - aoi[i];
1236       aoj = ao->j + aoi[i];
1237       aoa = ao->a + aoi[i];
1238       for (j=0; j<anz; j++) {
1239         row    = aoj[j];
1240         pnz    = pi_oth[row+1] - pi_oth[row];
1241         pj     = pj_oth + pi_oth[row];
1242         pa     = pa_oth + pi_oth[row];
1243         valtmp = aoa[j];
1244         nextp  = 0;
1245         for (k=0; nextp<pnz; k++) {
1246           if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
1247             apa[k] += valtmp*pa[nextp++];
1248           }
1249         }
1250         ierr = PetscLogFlops(2.0*pnz);CHKERRQ(ierr);
1251       }
1252 #if defined(PTAP_PROFILE)
1253       ierr    = PetscTime(&t2_1);CHKERRQ(ierr);
1254       et2_AP += t2_1 - t2_0;
1255 #endif
1256 
1257       /* 2-b) Compute Cseq = P_loc[i,:]^T*AP[i,:] using outer product */
1258       /*--------------------------------------------------------------*/
1259       pnz = pi_loc[i+1] - pi_loc[i];
1260       pJ  = pj_loc + pi_loc[i];
1261       for (j=0; j<pnz; j++) {
1262         nextap = 0;
1263         row    = pJ[j]; /* global index */
1264         if (row < pcstart || row >=pcend) { /* put the value into Co */
1265           row = *poJ;
1266           cj  = coj + coi[row];
1267           ca  = coa + coi[row]; poJ++;
1268         } else {                            /* put the value into Cd */
1269           row = *pdJ;
1270           cj  = bj + bi[row];
1271           ca  = ba + bi[row]; pdJ++;
1272         }
1273         valtmp = pA[j];
1274         for (k=0; nextap<apnz; k++) {
1275           if (cj[k]==apJ[nextap]) ca[k] += valtmp*apa[nextap++];
1276         }
1277         ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1278       }
1279       pA += pnz;
1280       /* zero the current row info for A*P */
1281       ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
1282 #if defined(PTAP_PROFILE)
1283       ierr      = PetscTime(&t2_2);CHKERRQ(ierr);
1284       ePtAP += t2_2 - t2_1;
1285 #endif
1286     }
1287   }
1288 #if defined(PTAP_PROFILE)
1289   ierr = PetscTime(&t2);CHKERRQ(ierr);
1290 #endif
1291 
1292   /* 3) send and recv matrix values coa */
1293   /*------------------------------------*/
1294   buf_ri = merge->buf_ri;
1295   buf_rj = merge->buf_rj;
1296   len_s  = merge->len_s;
1297   ierr   = PetscCommGetNewTag(comm,&taga);CHKERRQ(ierr);
1298   ierr   = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
1299 
1300   ierr = PetscMalloc2(merge->nsend+1,&s_waits,size,&status);CHKERRQ(ierr);
1301   for (proc=0,k=0; proc<size; proc++) {
1302     if (!len_s[proc]) continue;
1303     i    = merge->owners_co[proc];
1304     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
1305     k++;
1306   }
1307   if (merge->nrecv) {ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);}
1308   if (merge->nsend) {ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);}
1309 
1310   ierr = PetscFree2(s_waits,status);CHKERRQ(ierr);
1311   ierr = PetscFree(r_waits);CHKERRQ(ierr);
1312   ierr = PetscFree(coa);CHKERRQ(ierr);
1313 #if defined(PTAP_PROFILE)
1314   ierr = PetscTime(&t3);CHKERRQ(ierr);
1315 #endif
1316 
1317   /* 4) insert local Cseq and received values into Cmpi */
1318   /*------------------------------------------------------*/
1319   ierr = PetscMalloc3(merge->nrecv,&buf_ri_k,merge->nrecv,&nextrow,merge->nrecv,&nextci);CHKERRQ(ierr);
1320   for (k=0; k<merge->nrecv; k++) {
1321     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
1322     nrows       = *(buf_ri_k[k]);
1323     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
1324     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
1325   }
1326 
1327   for (i=0; i<cm; i++) {
1328     row  = owners[rank] + i; /* global row index of C_seq */
1329     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
1330     ba_i = ba + bi[i];
1331     bnz  = bi[i+1] - bi[i];
1332     /* add received vals into ba */
1333     for (k=0; k<merge->nrecv; k++) { /* k-th received message */
1334       /* i-th row */
1335       if (i == *nextrow[k]) {
1336         cnz    = *(nextci[k]+1) - *nextci[k];
1337         cj     = buf_rj[k] + *(nextci[k]);
1338         ca     = abuf_r[k] + *(nextci[k]);
1339         nextcj = 0;
1340         for (j=0; nextcj<cnz; j++) {
1341           if (bj_i[j] == cj[nextcj]) { /* bcol == ccol */
1342             ba_i[j] += ca[nextcj++];
1343           }
1344         }
1345         nextrow[k]++; nextci[k]++;
1346         ierr = PetscLogFlops(2.0*cnz);CHKERRQ(ierr);
1347       }
1348     }
1349     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
1350   }
1351   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1352   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1353 
1354   ierr = PetscFree(ba);CHKERRQ(ierr);
1355   ierr = PetscFree(abuf_r[0]);CHKERRQ(ierr);
1356   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
1357   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
1358 #if defined(PTAP_PROFILE)
1359   ierr = PetscTime(&t4);CHKERRQ(ierr);
1360   if (rank==1) {
1361     ierr = PetscPrintf(MPI_COMM_SELF,"  [%d] PtAPNum %g/P + %g/PtAP( %g/A*P + %g/Pt*AP ) + %g/comm + %g/Cloc = %g\n\n",rank,eP,t2-t1,et2_AP,ePtAP,t3-t2,t4-t3,t4-t0);CHKERRQ(ierr);
1362   }
1363 #endif
1364   PetscFunctionReturn(0);
1365 }
1366