xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision a9899c979f2dda621d6accdc0838ebb8823aa852)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_PtAPMPI       *ptap=a->ptap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatDestroy_MPIAIJ_PtAPMPI_private(Mat_PtAPMPI *ptap)
40 {
41   PetscErrorCode      ierr;
42   Mat_Merge_SeqsToMPI *merge=ptap->merge;
43 
44   PetscFunctionBegin;
45   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
46   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
47   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
48   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
49   ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
50   ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
51   ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
52   if (ptap->AP_loc) { /* used by alg_rap */
53     Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
54     ierr = PetscFree(ap->i);CHKERRQ(ierr);
55     ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
56     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
57   } else { /* used by alg_ptap */
58     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
59     ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
60   }
61   ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
62   ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
63   if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
64 
65   if (merge) { /* used by alg_ptap */
66     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
67     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
68     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
69     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
70     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
71     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
72     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
73     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
74     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
75     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
76     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
77     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
78     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
79     ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
80   }
81   PetscFunctionReturn(0);
82 }
83 
84 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
85 {
86   PetscErrorCode ierr;
87   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
88   Mat_PtAPMPI    *ptap=a->ptap;
89 
90   PetscFunctionBegin;
91   if (ptap) {
92     if (A->reuse) {
93       ierr = MatDestroy_MPIAIJ_PtAPMPI_private(a->ptap);CHKERRQ(ierr);
94     }
95     ierr = ptap->destroy(A);CHKERRQ(ierr);
96     ierr = PetscFree(ptap);CHKERRQ(ierr);
97   }
98   PetscFunctionReturn(0);
99 }
100 
101 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
102 {
103   PetscErrorCode ierr;
104   PetscBool      flg;
105   MPI_Comm       comm;
106 #if !defined(PETSC_HAVE_HYPRE)
107   const char          *algTypes[2] = {"scalable","nonscalable"};
108   PetscInt            nalg=2;
109 #else
110   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
111   PetscInt            nalg=3;
112 #endif
113   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
114 
115   PetscFunctionBegin;
116   /* check if matrix local sizes are compatible */
117   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
118   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
119   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
120 
121   if (scall == MAT_INITIAL_MATRIX) {
122     /* pick an algorithm */
123     ierr = PetscObjectOptionsBegin((PetscObject)A);CHKERRQ(ierr);
124     PetscOptionsObject->alreadyprinted = PETSC_FALSE; /* a hack to ensure the option shows in '-help' */
125     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
126     ierr = PetscOptionsEnd();CHKERRQ(ierr);
127 
128     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
129       MatInfo     Ainfo,Pinfo;
130       PetscInt    nz_local;
131       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
132 
133       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
134       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
135       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
136 
137       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
138       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
139 
140       if (alg_scalable) {
141         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
142       }
143     }
144 
145     switch (alg) {
146     case 1:
147       /* do R=P^T locally, then C=R*A*P */
148       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
149       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
150       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
151       break;
152 #if defined(PETSC_HAVE_HYPRE)
153     case 2:
154       /* Use boomerAMGBuildCoarseOperator */
155       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
156       PetscFunctionReturn(0);
157       break;
158 #endif
159     default:
160       /* do R=P^T locally, then C=R*A*P */
161       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
162       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
163       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
164       break;
165     }
166   }
167   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
168   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
169   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
170   PetscFunctionReturn(0);
171 }
172 
173 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
174 {
175   PetscErrorCode    ierr;
176   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
177   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
178   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
179   Mat_PtAPMPI       *ptap = c->ptap;
180   Mat               AP_loc,C_loc,C_oth;
181   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
182   PetscScalar       *apa;
183   const PetscInt    *cols;
184   const PetscScalar *vals;
185 
186   PetscFunctionBegin;
187   ierr = MatZeroEntries(C);CHKERRQ(ierr);
188 
189   /* 1) get R = Pd^T,Ro = Po^T */
190   if (ptap->reuse == MAT_REUSE_MATRIX) {
191     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
192     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
193   }
194 
195   /* 2) get AP_loc */
196   AP_loc = ptap->AP_loc;
197   ap = (Mat_SeqAIJ*)AP_loc->data;
198 
199   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
200   /*-----------------------------------------------------*/
201   if (ptap->reuse == MAT_REUSE_MATRIX) {
202     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
203     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
204     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
205   }
206 
207   /* 2-2) compute numeric A_loc*P - dominating part */
208   /* ---------------------------------------------- */
209   /* get data from symbolic products */
210   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
211   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
212   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
213 
214   api   = ap->i;
215   apj   = ap->j;
216   for (i=0; i<am; i++) {
217     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
218     apnz = api[i+1] - api[i];
219     apa = ap->a + api[i];
220     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
221     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
222     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
223   }
224 
225   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
226   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
227   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
228 
229   C_loc = ptap->C_loc;
230   C_oth = ptap->C_oth;
231 
232   /* add C_loc and Co to to C */
233   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
234 
235   /* C_loc -> C */
236   cm    = C_loc->rmap->N;
237   c_seq = (Mat_SeqAIJ*)C_loc->data;
238   cols = c_seq->j;
239   vals = c_seq->a;
240 
241   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
242   /* when there are no off-processor parts.  */
243   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
244   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
245   /* a table, and other, more complex stuff has to be done. */
246   if (C->assembled) {
247     C->was_assembled = PETSC_TRUE;
248     C->assembled     = PETSC_FALSE;
249   }
250   if (C->was_assembled) {
251     for (i=0; i<cm; i++) {
252       ncols = c_seq->i[i+1] - c_seq->i[i];
253       row = rstart + i;
254       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
255       cols += ncols; vals += ncols;
256     }
257   } else {
258     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
259   }
260 
261   /* Co -> C, off-processor part */
262   cm = C_oth->rmap->N;
263   c_seq = (Mat_SeqAIJ*)C_oth->data;
264   cols = c_seq->j;
265   vals = c_seq->a;
266   for (i=0; i<cm; i++) {
267     ncols = c_seq->i[i+1] - c_seq->i[i];
268     row = p->garray[i];
269     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
270     cols += ncols; vals += ncols;
271   }
272   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
273   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
274 
275   ptap->reuse = MAT_REUSE_MATRIX;
276   PetscFunctionReturn(0);
277 }
278 
279 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
280 {
281   PetscErrorCode      ierr;
282   Mat_PtAPMPI         *ptap;
283   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
284   MPI_Comm            comm;
285   PetscMPIInt         size,rank;
286   Mat                 Cmpi,P_loc,P_oth;
287   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
288   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
289   PetscInt            *lnk,i,k,pnz,row,nsend;
290   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
291   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
292   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
293   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
294   MPI_Request         *swaits,*rwaits;
295   MPI_Status          *sstatus,rstatus;
296   PetscLayout         rowmap;
297   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
298   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
299   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
300   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
301   PetscScalar         *apv;
302   PetscTable          ta;
303   MatType             mtype;
304 #if defined(PETSC_USE_INFO)
305   PetscReal           apfill;
306 #endif
307 
308   PetscFunctionBegin;
309   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
310   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
311   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
312 
313   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
314 
315   /* create symbolic parallel matrix Cmpi */
316   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
317   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
318   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
319 
320   /* create struct Mat_PtAPMPI and attached it to C later */
321   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
322   ptap->reuse = MAT_INITIAL_MATRIX;
323   ptap->algType = 0;
324 
325   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
326   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
327   /* get P_loc by taking all local rows of P */
328   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
329 
330   ptap->P_loc = P_loc;
331   ptap->P_oth = P_oth;
332 
333   /* (0) compute Rd = Pd^T, Ro = Po^T  */
334   /* --------------------------------- */
335   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
336   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
337 
338   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
339   /* ----------------------------------------------------------------- */
340   p_loc  = (Mat_SeqAIJ*)P_loc->data;
341   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
342 
343   /* create and initialize a linked list */
344   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
345   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
346   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
347   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
348 
349   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
350 
351   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
352   if (ao) {
353     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
354   } else {
355     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
356   }
357   current_space = free_space;
358   nspacedouble  = 0;
359 
360   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
361   api[0] = 0;
362   for (i=0; i<am; i++) {
363     /* diagonal portion: Ad[i,:]*P */
364     ai = ad->i; pi = p_loc->i;
365     nzi = ai[i+1] - ai[i];
366     aj  = ad->j + ai[i];
367     for (j=0; j<nzi; j++) {
368       row  = aj[j];
369       pnz  = pi[row+1] - pi[row];
370       Jptr = p_loc->j + pi[row];
371       /* add non-zero cols of P into the sorted linked list lnk */
372       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
373     }
374     /* off-diagonal portion: Ao[i,:]*P */
375     if (ao) {
376       ai = ao->i; pi = p_oth->i;
377       nzi = ai[i+1] - ai[i];
378       aj  = ao->j + ai[i];
379       for (j=0; j<nzi; j++) {
380         row  = aj[j];
381         pnz  = pi[row+1] - pi[row];
382         Jptr = p_oth->j + pi[row];
383         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
384       }
385     }
386     apnz     = lnk[0];
387     api[i+1] = api[i] + apnz;
388 
389     /* if free space is not available, double the total space in the list */
390     if (current_space->local_remaining<apnz) {
391       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
392       nspacedouble++;
393     }
394 
395     /* Copy data into free space, then initialize lnk */
396     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
397 
398     current_space->array           += apnz;
399     current_space->local_used      += apnz;
400     current_space->local_remaining -= apnz;
401   }
402   /* Allocate space for apj and apv, initialize apj, and */
403   /* destroy list of free space and other temporary array(s) */
404   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
405   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
406   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
407 
408   /* Create AP_loc for reuse */
409   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
410 
411 #if defined(PETSC_USE_INFO)
412   if (ao) {
413     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
414   } else {
415     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
416   }
417   ptap->AP_loc->info.mallocs           = nspacedouble;
418   ptap->AP_loc->info.fill_ratio_given  = fill;
419   ptap->AP_loc->info.fill_ratio_needed = apfill;
420 
421   if (api[am]) {
422     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
423     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
424   } else {
425     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
426   }
427 #endif
428 
429   /* (2-1) compute symbolic Co = Ro*AP_loc  */
430   /* ------------------------------------ */
431   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
432 
433   /* (3) send coj of C_oth to other processors  */
434   /* ------------------------------------------ */
435   /* determine row ownership */
436   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
437   rowmap->n  = pn;
438   rowmap->bs = 1;
439   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
440   owners = rowmap->range;
441 
442   /* determine the number of messages to send, their lengths */
443   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
444   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
445   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
446 
447   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
448   coi   = c_oth->i; coj = c_oth->j;
449   con   = ptap->C_oth->rmap->n;
450   proc  = 0;
451   for (i=0; i<con; i++) {
452     while (prmap[i] >= owners[proc+1]) proc++;
453     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
454     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
455   }
456 
457   len          = 0; /* max length of buf_si[], see (4) */
458   owners_co[0] = 0;
459   nsend        = 0;
460   for (proc=0; proc<size; proc++) {
461     owners_co[proc+1] = owners_co[proc] + len_si[proc];
462     if (len_s[proc]) {
463       nsend++;
464       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
465       len         += len_si[proc];
466     }
467   }
468 
469   /* determine the number and length of messages to receive for coi and coj  */
470   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
471   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
472 
473   /* post the Irecv and Isend of coj */
474   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
475   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
476   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
477   for (proc=0, k=0; proc<size; proc++) {
478     if (!len_s[proc]) continue;
479     i    = owners_co[proc];
480     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
481     k++;
482   }
483 
484   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
485   /* ---------------------------------------- */
486   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
487   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
488 
489   /* receives coj are complete */
490   for (i=0; i<nrecv; i++) {
491     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
492   }
493   ierr = PetscFree(rwaits);CHKERRQ(ierr);
494   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
495 
496   /* add received column indices into ta to update Crmax */
497   for (k=0; k<nrecv; k++) {/* k-th received message */
498     Jptr = buf_rj[k];
499     for (j=0; j<len_r[k]; j++) {
500       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
501     }
502   }
503   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
504   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
505 
506   /* (4) send and recv coi */
507   /*-----------------------*/
508   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
509   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
510   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
511   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
512   for (proc=0,k=0; proc<size; proc++) {
513     if (!len_s[proc]) continue;
514     /* form outgoing message for i-structure:
515          buf_si[0]:                 nrows to be sent
516                [1:nrows]:           row index (global)
517                [nrows+1:2*nrows+1]: i-structure index
518     */
519     /*-------------------------------------------*/
520     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
521     buf_si_i    = buf_si + nrows+1;
522     buf_si[0]   = nrows;
523     buf_si_i[0] = 0;
524     nrows       = 0;
525     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
526       nzi = coi[i+1] - coi[i];
527       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
528       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
529       nrows++;
530     }
531     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
532     k++;
533     buf_si += len_si[proc];
534   }
535   for (i=0; i<nrecv; i++) {
536     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
537   }
538   ierr = PetscFree(rwaits);CHKERRQ(ierr);
539   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
540 
541   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
542   ierr = PetscFree(len_ri);CHKERRQ(ierr);
543   ierr = PetscFree(swaits);CHKERRQ(ierr);
544   ierr = PetscFree(buf_s);CHKERRQ(ierr);
545 
546   /* (5) compute the local portion of Cmpi      */
547   /* ------------------------------------------ */
548   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
549   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
550   current_space = free_space;
551 
552   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
553   for (k=0; k<nrecv; k++) {
554     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
555     nrows       = *buf_ri_k[k];
556     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
557     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
558   }
559 
560   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
561   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
562   for (i=0; i<pn; i++) {
563     /* add C_loc into Cmpi */
564     nzi  = c_loc->i[i+1] - c_loc->i[i];
565     Jptr = c_loc->j + c_loc->i[i];
566     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
567 
568     /* add received col data into lnk */
569     for (k=0; k<nrecv; k++) { /* k-th received message */
570       if (i == *nextrow[k]) { /* i-th row */
571         nzi  = *(nextci[k]+1) - *nextci[k];
572         Jptr = buf_rj[k] + *nextci[k];
573         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
574         nextrow[k]++; nextci[k]++;
575       }
576     }
577     nzi = lnk[0];
578 
579     /* copy data into free space, then initialize lnk */
580     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
581     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
582   }
583   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
584   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
585   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
586 
587   /* local sizes and preallocation */
588   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
589   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
590   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
591   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
592 
593   /* members in merge */
594   ierr = PetscFree(id_r);CHKERRQ(ierr);
595   ierr = PetscFree(len_r);CHKERRQ(ierr);
596   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
597   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
598   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
599   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
600   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
601 
602   /* attach the supporting struct to Cmpi for reuse */
603   c = (Mat_MPIAIJ*)Cmpi->data;
604   c->ptap         = ptap;
605   ptap->duplicate = Cmpi->ops->duplicate;
606   ptap->destroy   = Cmpi->ops->destroy;
607   ptap->view      = Cmpi->ops->view;
608 
609   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
610   Cmpi->assembled        = PETSC_FALSE;
611   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
612   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
613   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
614   *C                     = Cmpi;
615   PetscFunctionReturn(0);
616 }
617 
618 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
619 {
620   PetscErrorCode      ierr;
621   Mat_PtAPMPI         *ptap;
622   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
623   MPI_Comm            comm;
624   PetscMPIInt         size,rank;
625   Mat                 Cmpi;
626   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
627   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
628   PetscInt            *lnk,i,k,pnz,row,nsend;
629   PetscBT             lnkbt;
630   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
631   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
632   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
633   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
634   MPI_Request         *swaits,*rwaits;
635   MPI_Status          *sstatus,rstatus;
636   PetscLayout         rowmap;
637   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
638   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
639   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
640   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
641   PetscScalar         *apv;
642   PetscTable          ta;
643   MatType             mtype;
644 #if defined(PETSC_USE_INFO)
645   PetscReal           apfill;
646 #endif
647 
648   PetscFunctionBegin;
649   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
650   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
651   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
652 
653   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
654 
655   /* create symbolic parallel matrix Cmpi */
656   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
657   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
658   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
659 
660   ierr = PetscOptionsGetBool(NULL,NULL,"-matptap_reuse",&Cmpi->reuse,NULL);CHKERRQ(ierr);
661 
662   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
663   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
664 
665   /* create struct Mat_PtAPMPI and attached it to C later */
666   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
667   ptap->reuse = MAT_INITIAL_MATRIX;
668   ptap->algType = 1;
669 
670   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
671   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
672   /* get P_loc by taking all local rows of P */
673   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
674 
675   /* (0) compute Rd = Pd^T, Ro = Po^T  */
676   /* --------------------------------- */
677   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
678   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
679 
680   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
681   /* ----------------------------------------------------------------- */
682   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
683   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
684 
685   /* create and initialize a linked list */
686   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
687   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
688   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
689   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
690   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
691 
692   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
693 
694   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
695   if (ao) {
696     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
697   } else {
698     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
699   }
700   current_space = free_space;
701   nspacedouble  = 0;
702 
703   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
704   api[0] = 0;
705   for (i=0; i<am; i++) {
706     /* diagonal portion: Ad[i,:]*P */
707     ai = ad->i; pi = p_loc->i;
708     nzi = ai[i+1] - ai[i];
709     aj  = ad->j + ai[i];
710     for (j=0; j<nzi; j++) {
711       row  = aj[j];
712       pnz  = pi[row+1] - pi[row];
713       Jptr = p_loc->j + pi[row];
714       /* add non-zero cols of P into the sorted linked list lnk */
715       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
716     }
717     /* off-diagonal portion: Ao[i,:]*P */
718     if (ao) {
719       ai = ao->i; pi = p_oth->i;
720       nzi = ai[i+1] - ai[i];
721       aj  = ao->j + ai[i];
722       for (j=0; j<nzi; j++) {
723         row  = aj[j];
724         pnz  = pi[row+1] - pi[row];
725         Jptr = p_oth->j + pi[row];
726         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
727       }
728     }
729     apnz     = lnk[0];
730     api[i+1] = api[i] + apnz;
731     if (ap_rmax < apnz) ap_rmax = apnz;
732 
733     /* if free space is not available, double the total space in the list */
734     if (current_space->local_remaining<apnz) {
735       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
736       nspacedouble++;
737     }
738 
739     /* Copy data into free space, then initialize lnk */
740     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
741 
742     current_space->array           += apnz;
743     current_space->local_used      += apnz;
744     current_space->local_remaining -= apnz;
745   }
746   /* Allocate space for apj and apv, initialize apj, and */
747   /* destroy list of free space and other temporary array(s) */
748   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
749   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
750   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
751 
752   /* Create AP_loc for reuse */
753   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
754 
755 #if defined(PETSC_USE_INFO)
756   if (ao) {
757     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
758   } else {
759     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
760   }
761   ptap->AP_loc->info.mallocs           = nspacedouble;
762   ptap->AP_loc->info.fill_ratio_given  = fill;
763   ptap->AP_loc->info.fill_ratio_needed = apfill;
764 
765   if (api[am]) {
766     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
767     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
768   } else {
769     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
770   }
771 #endif
772 
773   /* (2-1) compute symbolic Co = Ro*AP_loc  */
774   /* ------------------------------------ */
775   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
776 
777   /* (3) send coj of C_oth to other processors  */
778   /* ------------------------------------------ */
779   /* determine row ownership */
780   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
781   rowmap->n  = pn;
782   rowmap->bs = 1;
783   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
784   owners = rowmap->range;
785 
786   /* determine the number of messages to send, their lengths */
787   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
788   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
789   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
790 
791   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
792   coi   = c_oth->i; coj = c_oth->j;
793   con   = ptap->C_oth->rmap->n;
794   proc  = 0;
795   for (i=0; i<con; i++) {
796     while (prmap[i] >= owners[proc+1]) proc++;
797     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
798     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
799   }
800 
801   len          = 0; /* max length of buf_si[], see (4) */
802   owners_co[0] = 0;
803   nsend        = 0;
804   for (proc=0; proc<size; proc++) {
805     owners_co[proc+1] = owners_co[proc] + len_si[proc];
806     if (len_s[proc]) {
807       nsend++;
808       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
809       len         += len_si[proc];
810     }
811   }
812 
813   /* determine the number and length of messages to receive for coi and coj  */
814   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
815   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
816 
817   /* post the Irecv and Isend of coj */
818   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
819   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
820   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
821   for (proc=0, k=0; proc<size; proc++) {
822     if (!len_s[proc]) continue;
823     i    = owners_co[proc];
824     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
825     k++;
826   }
827 
828   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
829   /* ---------------------------------------- */
830   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
831   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
832 
833   /* receives coj are complete */
834   for (i=0; i<nrecv; i++) {
835     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
836   }
837   ierr = PetscFree(rwaits);CHKERRQ(ierr);
838   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
839 
840   /* add received column indices into ta to update Crmax */
841   for (k=0; k<nrecv; k++) {/* k-th received message */
842     Jptr = buf_rj[k];
843     for (j=0; j<len_r[k]; j++) {
844       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
845     }
846   }
847   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
848   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
849 
850   /* (4) send and recv coi */
851   /*-----------------------*/
852   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
853   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
854   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
855   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
856   for (proc=0,k=0; proc<size; proc++) {
857     if (!len_s[proc]) continue;
858     /* form outgoing message for i-structure:
859          buf_si[0]:                 nrows to be sent
860                [1:nrows]:           row index (global)
861                [nrows+1:2*nrows+1]: i-structure index
862     */
863     /*-------------------------------------------*/
864     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
865     buf_si_i    = buf_si + nrows+1;
866     buf_si[0]   = nrows;
867     buf_si_i[0] = 0;
868     nrows       = 0;
869     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
870       nzi = coi[i+1] - coi[i];
871       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
872       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
873       nrows++;
874     }
875     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
876     k++;
877     buf_si += len_si[proc];
878   }
879   for (i=0; i<nrecv; i++) {
880     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
881   }
882   ierr = PetscFree(rwaits);CHKERRQ(ierr);
883   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
884 
885   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
886   ierr = PetscFree(len_ri);CHKERRQ(ierr);
887   ierr = PetscFree(swaits);CHKERRQ(ierr);
888   ierr = PetscFree(buf_s);CHKERRQ(ierr);
889 
890   /* (5) compute the local portion of Cmpi      */
891   /* ------------------------------------------ */
892   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
893   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
894   current_space = free_space;
895 
896   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
897   for (k=0; k<nrecv; k++) {
898     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
899     nrows       = *buf_ri_k[k];
900     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
901     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
902   }
903 
904   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
905   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
906   for (i=0; i<pn; i++) {
907     /* add C_loc into Cmpi */
908     nzi  = c_loc->i[i+1] - c_loc->i[i];
909     Jptr = c_loc->j + c_loc->i[i];
910     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
911 
912     /* add received col data into lnk */
913     for (k=0; k<nrecv; k++) { /* k-th received message */
914       if (i == *nextrow[k]) { /* i-th row */
915         nzi  = *(nextci[k]+1) - *nextci[k];
916         Jptr = buf_rj[k] + *nextci[k];
917         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
918         nextrow[k]++; nextci[k]++;
919       }
920     }
921     nzi = lnk[0];
922 
923     /* copy data into free space, then initialize lnk */
924     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
925     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
926   }
927   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
928   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
929   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
930 
931   /* local sizes and preallocation */
932   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
933   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
934   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
935   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
936 
937   /* members in merge */
938   ierr = PetscFree(id_r);CHKERRQ(ierr);
939   ierr = PetscFree(len_r);CHKERRQ(ierr);
940   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
941   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
942   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
943   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
944   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
945 
946   /* attach the supporting struct to Cmpi for reuse */
947   c = (Mat_MPIAIJ*)Cmpi->data;
948   c->ptap         = ptap;
949   ptap->duplicate = Cmpi->ops->duplicate;
950   ptap->destroy   = Cmpi->ops->destroy;
951   ptap->view      = Cmpi->ops->view;
952   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
953 
954   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
955   Cmpi->assembled        = PETSC_FALSE;
956   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
957   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
958   *C                     = Cmpi;
959   PetscFunctionReturn(0);
960 }
961 
962 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
963 {
964   PetscErrorCode    ierr;
965   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
966   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
967   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
968   Mat_PtAPMPI       *ptap = c->ptap;
969   Mat               AP_loc,C_loc,C_oth;
970   PetscInt          i,rstart,rend,cm,ncols,row;
971   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
972   PetscScalar       *apa;
973   const PetscInt    *cols;
974   const PetscScalar *vals;
975 
976   PetscFunctionBegin;
977   if (!C->reuse && ptap->reuse == MAT_REUSE_MATRIX) {
978     MPI_Comm comm;
979     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
980     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Call MatSetOption(C,MAT_REUSE,PETSC_TRUE) or '-matptap_reuse'");
981   }
982 
983   ierr = MatZeroEntries(C);CHKERRQ(ierr);
984   /* 1) get R = Pd^T,Ro = Po^T */
985   if (ptap->reuse == MAT_REUSE_MATRIX) {
986     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
987     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
988   }
989 
990   /* 2) get AP_loc */
991   AP_loc = ptap->AP_loc;
992   ap = (Mat_SeqAIJ*)AP_loc->data;
993 
994   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
995   /*-----------------------------------------------------*/
996   if (ptap->reuse == MAT_REUSE_MATRIX) {
997     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
998     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
999     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1000   }
1001 
1002   /* 2-2) compute numeric A_loc*P - dominating part */
1003   /* ---------------------------------------------- */
1004   /* get data from symbolic products */
1005   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1006   if (ptap->P_oth) {
1007     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1008   }
1009   apa   = ptap->apa;
1010   api   = ap->i;
1011   apj   = ap->j;
1012   for (i=0; i<am; i++) {
1013     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1014     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1015     apnz = api[i+1] - api[i];
1016     for (j=0; j<apnz; j++) {
1017       col = apj[j+api[i]];
1018       ap->a[j+ap->i[i]] = apa[col];
1019       apa[col] = 0.0;
1020     }
1021     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1022   }
1023 
1024   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1025   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1026   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1027   C_loc = ptap->C_loc;
1028   C_oth = ptap->C_oth;
1029 
1030   /* add C_loc and Co to to C */
1031   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1032 
1033   /* C_loc -> C */
1034   cm    = C_loc->rmap->N;
1035   c_seq = (Mat_SeqAIJ*)C_loc->data;
1036   cols = c_seq->j;
1037   vals = c_seq->a;
1038 
1039 
1040   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1041   /* when there are no off-processor parts.  */
1042   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1043   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1044   /* a table, and other, more complex stuff has to be done. */
1045   if (C->assembled) {
1046     C->was_assembled = PETSC_TRUE;
1047     C->assembled     = PETSC_FALSE;
1048   }
1049   if (C->was_assembled) {
1050     for (i=0; i<cm; i++) {
1051       ncols = c_seq->i[i+1] - c_seq->i[i];
1052       row = rstart + i;
1053       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1054       cols += ncols; vals += ncols;
1055     }
1056   } else {
1057     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1058   }
1059 
1060   /* Co -> C, off-processor part */
1061   cm = C_oth->rmap->N;
1062   c_seq = (Mat_SeqAIJ*)C_oth->data;
1063   cols = c_seq->j;
1064   vals = c_seq->a;
1065   for (i=0; i<cm; i++) {
1066     ncols = c_seq->i[i+1] - c_seq->i[i];
1067     row = p->garray[i];
1068     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1069     cols += ncols; vals += ncols;
1070   }
1071 
1072   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1073   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1074 
1075   ptap->reuse = MAT_REUSE_MATRIX;
1076 
1077   if (!C->reuse) {
1078     /* Free supporting data structure ptap */
1079     ierr = MatDestroy_MPIAIJ_PtAPMPI_private(ptap);CHKERRQ(ierr);
1080   }
1081   PetscFunctionReturn(0);
1082 }
1083