xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 715a534632cc6029c3649e8dc2fa74cc65e81325)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include <../src/mat/impls/aij/seq/aij.h>   /*I "petscmat.h" I*/
8 #include <../src/mat/utils/freespace.h>
9 #include <../src/mat/impls/aij/mpi/mpiaij.h>
10 #include <petscbt.h>
11 #include <petsctime.h>
12 
13 /* #define PTAP_PROFILE */
14 
15 PetscErrorCode MatView_MPIAIJ_PtAP(Mat A,PetscViewer viewer)
16 {
17   PetscErrorCode    ierr;
18   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data;
19   Mat_APMPI         *ptap=a->ap;
20   PetscBool         iascii;
21   PetscViewerFormat format;
22 
23   PetscFunctionBegin;
24   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
25   if (iascii) {
26     ierr = PetscViewerGetFormat(viewer,&format);CHKERRQ(ierr);
27     if (format == PETSC_VIEWER_ASCII_INFO || format == PETSC_VIEWER_ASCII_INFO_DETAIL) {
28       if (ptap->algType == 0) {
29         ierr = PetscViewerASCIIPrintf(viewer,"using scalable MatPtAP() implementation\n");CHKERRQ(ierr);
30       } else if (ptap->algType == 1) {
31         ierr = PetscViewerASCIIPrintf(viewer,"using nonscalable MatPtAP() implementation\n");CHKERRQ(ierr);
32       }
33     }
34   }
35   ierr = (ptap->view)(A,viewer);CHKERRQ(ierr);
36   PetscFunctionReturn(0);
37 }
38 
39 PetscErrorCode MatFreeIntermediateDataStructures_MPIAIJ_AP(Mat A)
40 {
41   PetscErrorCode      ierr;
42   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data;
43   Mat_APMPI           *ptap=a->ap;
44   Mat_Merge_SeqsToMPI *merge;
45 
46   PetscFunctionBegin;
47   if (!ptap) PetscFunctionReturn(0);
48 
49   ierr = PetscFree2(ptap->startsj_s,ptap->startsj_r);CHKERRQ(ierr);
50   ierr = PetscFree(ptap->bufa);CHKERRQ(ierr);
51   ierr = MatDestroy(&ptap->P_loc);CHKERRQ(ierr);
52   ierr = MatDestroy(&ptap->P_oth);CHKERRQ(ierr);
53   ierr = MatDestroy(&ptap->A_loc);CHKERRQ(ierr); /* used by MatTransposeMatMult() */
54   ierr = MatDestroy(&ptap->Rd);CHKERRQ(ierr);
55   ierr = MatDestroy(&ptap->Ro);CHKERRQ(ierr);
56   if (ptap->AP_loc) { /* used by alg_rap */
57     Mat_SeqAIJ *ap = (Mat_SeqAIJ*)(ptap->AP_loc)->data;
58     ierr = PetscFree(ap->i);CHKERRQ(ierr);
59     ierr = PetscFree2(ap->j,ap->a);CHKERRQ(ierr);
60     ierr = MatDestroy(&ptap->AP_loc);CHKERRQ(ierr);
61   } else { /* used by alg_ptap */
62     ierr = PetscFree(ptap->api);CHKERRQ(ierr);
63     ierr = PetscFree(ptap->apj);CHKERRQ(ierr);
64   }
65   ierr = MatDestroy(&ptap->C_loc);CHKERRQ(ierr);
66   ierr = MatDestroy(&ptap->C_oth);CHKERRQ(ierr);
67   if (ptap->apa) {ierr = PetscFree(ptap->apa);CHKERRQ(ierr);}
68 
69   ierr = MatDestroy(&ptap->Pt);CHKERRQ(ierr);
70 
71   merge=ptap->merge;
72   if (merge) { /* used by alg_ptap */
73     ierr = PetscFree(merge->id_r);CHKERRQ(ierr);
74     ierr = PetscFree(merge->len_s);CHKERRQ(ierr);
75     ierr = PetscFree(merge->len_r);CHKERRQ(ierr);
76     ierr = PetscFree(merge->bi);CHKERRQ(ierr);
77     ierr = PetscFree(merge->bj);CHKERRQ(ierr);
78     ierr = PetscFree(merge->buf_ri[0]);CHKERRQ(ierr);
79     ierr = PetscFree(merge->buf_ri);CHKERRQ(ierr);
80     ierr = PetscFree(merge->buf_rj[0]);CHKERRQ(ierr);
81     ierr = PetscFree(merge->buf_rj);CHKERRQ(ierr);
82     ierr = PetscFree(merge->coi);CHKERRQ(ierr);
83     ierr = PetscFree(merge->coj);CHKERRQ(ierr);
84     ierr = PetscFree(merge->owners_co);CHKERRQ(ierr);
85     ierr = PetscLayoutDestroy(&merge->rowmap);CHKERRQ(ierr);
86     ierr = PetscFree(ptap->merge);CHKERRQ(ierr);
87   }
88 
89   A->ops->destroy = ptap->destroy;
90   ierr = PetscFree(a->ap);CHKERRQ(ierr);
91   PetscFunctionReturn(0);
92 }
93 
94 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
95 {
96   PetscErrorCode ierr;
97 
98   PetscFunctionBegin;
99   ierr = (*A->ops->freeintermediatedatastructures)(A);CHKERRQ(ierr);
100   ierr = (*A->ops->destroy)(A);CHKERRQ(ierr);
101   PetscFunctionReturn(0);
102 }
103 
104 PETSC_INTERN PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
105 {
106   PetscErrorCode ierr;
107   PetscBool      flg;
108   MPI_Comm       comm;
109 #if !defined(PETSC_HAVE_HYPRE)
110   const char          *algTypes[2] = {"scalable","nonscalable"};
111   PetscInt            nalg=2;
112 #else
113   const char          *algTypes[3] = {"scalable","nonscalable","hypre"};
114   PetscInt            nalg=3;
115 #endif
116   PetscInt            pN=P->cmap->N,alg=1; /* set default algorithm */
117 
118   PetscFunctionBegin;
119   /* check if matrix local sizes are compatible */
120   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
121   if (A->rmap->rstart != P->rmap->rstart || A->rmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Arow (%D, %D) != Prow (%D,%D)",A->rmap->rstart,A->rmap->rend,P->rmap->rstart,P->rmap->rend);
122   if (A->cmap->rstart != P->rmap->rstart || A->cmap->rend != P->rmap->rend) SETERRQ4(comm,PETSC_ERR_ARG_SIZ,"Matrix local dimensions are incompatible, Acol (%D, %D) != Prow (%D,%D)",A->cmap->rstart,A->cmap->rend,P->rmap->rstart,P->rmap->rend);
123 
124   if (scall == MAT_INITIAL_MATRIX) {
125     /* pick an algorithm */
126     ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)A),((PetscObject)A)->prefix,"MatPtAP","Mat");CHKERRQ(ierr);
127     ierr = PetscOptionsEList("-matptap_via","Algorithmic approach","MatPtAP",algTypes,nalg,algTypes[alg],&alg,&flg);CHKERRQ(ierr);
128     ierr = PetscOptionsEnd();CHKERRQ(ierr);
129 
130     if (!flg && pN > 100000) { /* may switch to scalable algorithm as default */
131       MatInfo     Ainfo,Pinfo;
132       PetscInt    nz_local;
133       PetscBool   alg_scalable_loc=PETSC_FALSE,alg_scalable;
134 
135       ierr = MatGetInfo(A,MAT_LOCAL,&Ainfo);CHKERRQ(ierr);
136       ierr = MatGetInfo(P,MAT_LOCAL,&Pinfo);CHKERRQ(ierr);
137       nz_local = (PetscInt)(Ainfo.nz_allocated + Pinfo.nz_allocated);
138 
139       if (pN > fill*nz_local) alg_scalable_loc = PETSC_TRUE;
140       ierr = MPIU_Allreduce(&alg_scalable_loc,&alg_scalable,1,MPIU_BOOL,MPI_LOR,comm);CHKERRQ(ierr);
141 
142       if (alg_scalable) {
143         alg = 0; /* scalable algorithm would 50% slower than nonscalable algorithm */
144       }
145     }
146 
147     switch (alg) {
148     case 1:
149       /* do R=P^T locally, then C=R*A*P -- nonscalable */
150       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
151       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);
152       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
153       break;
154 #if defined(PETSC_HAVE_HYPRE)
155     case 2:
156       /* Use boomerAMGBuildCoarseOperator */
157       ierr = MatPtAPSymbolic_AIJ_AIJ_wHYPRE(A,P,fill,C);CHKERRQ(ierr);
158       PetscFunctionReturn(0);
159       break;
160 #endif
161     default:
162       /* do R=P^T locally, then C=R*A*P */
163       ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
164       ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(A,P,fill,C);CHKERRQ(ierr);
165       ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
166       break;
167     }
168   }
169   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
170   ierr = (*(*C)->ops->ptapnumeric)(A,P,*C);CHKERRQ(ierr);
171   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
172   PetscFunctionReturn(0);
173 }
174 
175 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,Mat C)
176 {
177   PetscErrorCode    ierr;
178   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
179   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
180   Mat_SeqAIJ        *ap,*p_loc,*p_oth,*c_seq;
181   Mat_APMPI         *ptap = c->ap;
182   Mat               AP_loc,C_loc,C_oth;
183   PetscInt          i,rstart,rend,cm,ncols,row,*api,*apj,am = A->rmap->n,apnz;
184   PetscScalar       *apa;
185   const PetscInt    *cols;
186   const PetscScalar *vals;
187   PetscBool         freestruct;
188 
189   PetscFunctionBegin;
190   if (!ptap) {
191     MPI_Comm comm;
192     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
193     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
194   }
195 
196   ierr = MatZeroEntries(C);CHKERRQ(ierr);
197 
198   /* 1) get R = Pd^T,Ro = Po^T */
199   if (ptap->reuse == MAT_REUSE_MATRIX) {
200     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
201     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
202   }
203 
204   /* 2) get AP_loc */
205   AP_loc = ptap->AP_loc;
206   ap = (Mat_SeqAIJ*)AP_loc->data;
207 
208   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
209   /*-----------------------------------------------------*/
210   if (ptap->reuse == MAT_REUSE_MATRIX) {
211     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
212     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
213     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
214   }
215 
216   /* 2-2) compute numeric A_loc*P - dominating part */
217   /* ---------------------------------------------- */
218   /* get data from symbolic products */
219   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
220   if (ptap->P_oth) p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
221   else SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONGSTATE,"ptap->P_oth is NULL. Cannot proceed!");
222 
223   api   = ap->i;
224   apj   = ap->j;
225   for (i=0; i<am; i++) {
226     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
227     apnz = api[i+1] - api[i];
228     apa = ap->a + api[i];
229     ierr = PetscMemzero(apa,sizeof(PetscScalar)*apnz);CHKERRQ(ierr);
230     AProw_scalable(i,ad,ao,p_loc,p_oth,api,apj,apa);
231     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
232   }
233 
234   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
235   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
236   ierr = MatMatMultNumeric_SeqAIJ_SeqAIJ_Scalable(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
237 
238   C_loc = ptap->C_loc;
239   C_oth = ptap->C_oth;
240 
241   /* add C_loc and Co to to C */
242   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
243 
244   /* C_loc -> C */
245   cm    = C_loc->rmap->N;
246   c_seq = (Mat_SeqAIJ*)C_loc->data;
247   cols = c_seq->j;
248   vals = c_seq->a;
249 
250   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
251   /* when there are no off-processor parts.  */
252   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
253   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
254   /* a table, and other, more complex stuff has to be done. */
255   if (C->assembled) {
256     C->was_assembled = PETSC_TRUE;
257     C->assembled     = PETSC_FALSE;
258   }
259   if (C->was_assembled) {
260     for (i=0; i<cm; i++) {
261       ncols = c_seq->i[i+1] - c_seq->i[i];
262       row = rstart + i;
263       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
264       cols += ncols; vals += ncols;
265     }
266   } else {
267     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
268   }
269 
270   /* Co -> C, off-processor part */
271   cm = C_oth->rmap->N;
272   c_seq = (Mat_SeqAIJ*)C_oth->data;
273   cols = c_seq->j;
274   vals = c_seq->a;
275   for (i=0; i<cm; i++) {
276     ncols = c_seq->i[i+1] - c_seq->i[i];
277     row = p->garray[i];
278     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
279     cols += ncols; vals += ncols;
280   }
281   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
282   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
283 
284   ptap->reuse = MAT_REUSE_MATRIX;
285 
286   ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr);
287   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
288   freestruct = PETSC_FALSE;
289   ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr);
290   if (freestruct) {
291     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
292   }
293   ierr = PetscOptionsEnd();CHKERRQ(ierr);
294   PetscFunctionReturn(0);
295 }
296 
297 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ_scalable(Mat A,Mat P,PetscReal fill,Mat *C)
298 {
299   PetscErrorCode      ierr;
300   Mat_APMPI           *ptap;
301   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
302   MPI_Comm            comm;
303   PetscMPIInt         size,rank;
304   Mat                 Cmpi,P_loc,P_oth;
305   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
306   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
307   PetscInt            *lnk,i,k,pnz,row,nsend;
308   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
309   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
310   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
311   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
312   MPI_Request         *swaits,*rwaits;
313   MPI_Status          *sstatus,rstatus;
314   PetscLayout         rowmap;
315   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
316   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
317   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,Crmax,*aj,*ai,*pi;
318   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
319   PetscScalar         *apv;
320   PetscTable          ta;
321   MatType             mtype;
322 #if defined(PETSC_USE_INFO)
323   PetscReal           apfill;
324 #endif
325 
326   PetscFunctionBegin;
327   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
328   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
329   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
330 
331   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
332 
333   /* create symbolic parallel matrix Cmpi */
334   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
335   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
336   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
337 
338   /* create struct Mat_APMPI and attached it to C later */
339   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
340   ptap->reuse = MAT_INITIAL_MATRIX;
341   ptap->algType = 0;
342 
343   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
344   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&P_oth);CHKERRQ(ierr);
345   /* get P_loc by taking all local rows of P */
346   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&P_loc);CHKERRQ(ierr);
347 
348   ptap->P_loc = P_loc;
349   ptap->P_oth = P_oth;
350 
351   /* (0) compute Rd = Pd^T, Ro = Po^T  */
352   /* --------------------------------- */
353   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
354   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
355 
356   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
357   /* ----------------------------------------------------------------- */
358   p_loc  = (Mat_SeqAIJ*)P_loc->data;
359   if (P_oth) p_oth = (Mat_SeqAIJ*)P_oth->data;
360 
361   /* create and initialize a linked list */
362   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
363   MatRowMergeMax_SeqAIJ(p_loc,P_loc->rmap->N,ta);
364   MatRowMergeMax_SeqAIJ(p_oth,P_oth->rmap->N,ta);
365   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
366 
367   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
368 
369   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
370   if (ao) {
371     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
372   } else {
373     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
374   }
375   current_space = free_space;
376   nspacedouble  = 0;
377 
378   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
379   api[0] = 0;
380   for (i=0; i<am; i++) {
381     /* diagonal portion: Ad[i,:]*P */
382     ai = ad->i; pi = p_loc->i;
383     nzi = ai[i+1] - ai[i];
384     aj  = ad->j + ai[i];
385     for (j=0; j<nzi; j++) {
386       row  = aj[j];
387       pnz  = pi[row+1] - pi[row];
388       Jptr = p_loc->j + pi[row];
389       /* add non-zero cols of P into the sorted linked list lnk */
390       ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
391     }
392     /* off-diagonal portion: Ao[i,:]*P */
393     if (ao) {
394       ai = ao->i; pi = p_oth->i;
395       nzi = ai[i+1] - ai[i];
396       aj  = ao->j + ai[i];
397       for (j=0; j<nzi; j++) {
398         row  = aj[j];
399         pnz  = pi[row+1] - pi[row];
400         Jptr = p_oth->j + pi[row];
401         ierr = PetscLLCondensedAddSorted_Scalable(pnz,Jptr,lnk);CHKERRQ(ierr);
402       }
403     }
404     apnz     = lnk[0];
405     api[i+1] = api[i] + apnz;
406 
407     /* if free space is not available, double the total space in the list */
408     if (current_space->local_remaining<apnz) {
409       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
410       nspacedouble++;
411     }
412 
413     /* Copy data into free space, then initialize lnk */
414     ierr = PetscLLCondensedClean_Scalable(apnz,current_space->array,lnk);CHKERRQ(ierr);
415 
416     current_space->array           += apnz;
417     current_space->local_used      += apnz;
418     current_space->local_remaining -= apnz;
419   }
420   /* Allocate space for apj and apv, initialize apj, and */
421   /* destroy list of free space and other temporary array(s) */
422   ierr = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
423   ierr = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
424   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
425 
426   /* Create AP_loc for reuse */
427   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
428 
429 #if defined(PETSC_USE_INFO)
430   if (ao) {
431     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
432   } else {
433     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
434   }
435   ptap->AP_loc->info.mallocs           = nspacedouble;
436   ptap->AP_loc->info.fill_ratio_given  = fill;
437   ptap->AP_loc->info.fill_ratio_needed = apfill;
438 
439   if (api[am]) {
440     ierr = PetscInfo3(ptap->AP_loc,"Scalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
441     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
442   } else {
443     ierr = PetscInfo(ptap->AP_loc,"Scalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
444   }
445 #endif
446 
447   /* (2-1) compute symbolic Co = Ro*AP_loc  */
448   /* ------------------------------------ */
449   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
450 
451   /* (3) send coj of C_oth to other processors  */
452   /* ------------------------------------------ */
453   /* determine row ownership */
454   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
455   rowmap->n  = pn;
456   rowmap->bs = 1;
457   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
458   owners = rowmap->range;
459 
460   /* determine the number of messages to send, their lengths */
461   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
462   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
463   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
464 
465   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
466   coi   = c_oth->i; coj = c_oth->j;
467   con   = ptap->C_oth->rmap->n;
468   proc  = 0;
469   for (i=0; i<con; i++) {
470     while (prmap[i] >= owners[proc+1]) proc++;
471     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
472     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
473   }
474 
475   len          = 0; /* max length of buf_si[], see (4) */
476   owners_co[0] = 0;
477   nsend        = 0;
478   for (proc=0; proc<size; proc++) {
479     owners_co[proc+1] = owners_co[proc] + len_si[proc];
480     if (len_s[proc]) {
481       nsend++;
482       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
483       len         += len_si[proc];
484     }
485   }
486 
487   /* determine the number and length of messages to receive for coi and coj  */
488   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
489   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
490 
491   /* post the Irecv and Isend of coj */
492   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
493   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
494   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
495   for (proc=0, k=0; proc<size; proc++) {
496     if (!len_s[proc]) continue;
497     i    = owners_co[proc];
498     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
499     k++;
500   }
501 
502   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
503   /* ---------------------------------------- */
504   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
505   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
506 
507   /* receives coj are complete */
508   for (i=0; i<nrecv; i++) {
509     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
510   }
511   ierr = PetscFree(rwaits);CHKERRQ(ierr);
512   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
513 
514   /* add received column indices into ta to update Crmax */
515   for (k=0; k<nrecv; k++) {/* k-th received message */
516     Jptr = buf_rj[k];
517     for (j=0; j<len_r[k]; j++) {
518       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
519     }
520   }
521   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
522   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
523 
524   /* (4) send and recv coi */
525   /*-----------------------*/
526   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
527   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
528   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
529   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
530   for (proc=0,k=0; proc<size; proc++) {
531     if (!len_s[proc]) continue;
532     /* form outgoing message for i-structure:
533          buf_si[0]:                 nrows to be sent
534                [1:nrows]:           row index (global)
535                [nrows+1:2*nrows+1]: i-structure index
536     */
537     /*-------------------------------------------*/
538     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
539     buf_si_i    = buf_si + nrows+1;
540     buf_si[0]   = nrows;
541     buf_si_i[0] = 0;
542     nrows       = 0;
543     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
544       nzi = coi[i+1] - coi[i];
545       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
546       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
547       nrows++;
548     }
549     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
550     k++;
551     buf_si += len_si[proc];
552   }
553   for (i=0; i<nrecv; i++) {
554     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
555   }
556   ierr = PetscFree(rwaits);CHKERRQ(ierr);
557   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
558 
559   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
560   ierr = PetscFree(len_ri);CHKERRQ(ierr);
561   ierr = PetscFree(swaits);CHKERRQ(ierr);
562   ierr = PetscFree(buf_s);CHKERRQ(ierr);
563 
564   /* (5) compute the local portion of Cmpi      */
565   /* ------------------------------------------ */
566   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
567   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
568   current_space = free_space;
569 
570   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
571   for (k=0; k<nrecv; k++) {
572     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
573     nrows       = *buf_ri_k[k];
574     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
575     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
576   }
577 
578   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
579   ierr = PetscLLCondensedCreate_Scalable(Crmax,&lnk);CHKERRQ(ierr);
580   for (i=0; i<pn; i++) {
581     /* add C_loc into Cmpi */
582     nzi  = c_loc->i[i+1] - c_loc->i[i];
583     Jptr = c_loc->j + c_loc->i[i];
584     ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
585 
586     /* add received col data into lnk */
587     for (k=0; k<nrecv; k++) { /* k-th received message */
588       if (i == *nextrow[k]) { /* i-th row */
589         nzi  = *(nextci[k]+1) - *nextci[k];
590         Jptr = buf_rj[k] + *nextci[k];
591         ierr = PetscLLCondensedAddSorted_Scalable(nzi,Jptr,lnk);CHKERRQ(ierr);
592         nextrow[k]++; nextci[k]++;
593       }
594     }
595     nzi = lnk[0];
596 
597     /* copy data into free space, then initialize lnk */
598     ierr = PetscLLCondensedClean_Scalable(nzi,current_space->array,lnk);CHKERRQ(ierr);
599     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
600   }
601   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
602   ierr = PetscLLCondensedDestroy_Scalable(lnk);CHKERRQ(ierr);
603   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
604 
605   /* local sizes and preallocation */
606   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
607   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
608   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
609   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
610 
611   /* members in merge */
612   ierr = PetscFree(id_r);CHKERRQ(ierr);
613   ierr = PetscFree(len_r);CHKERRQ(ierr);
614   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
615   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
616   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
617   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
618   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
619 
620   /* attach the supporting struct to Cmpi for reuse */
621   c = (Mat_MPIAIJ*)Cmpi->data;
622   c->ap           = ptap;
623   ptap->duplicate = Cmpi->ops->duplicate;
624   ptap->destroy   = Cmpi->ops->destroy;
625   ptap->view      = Cmpi->ops->view;
626 
627   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
628   Cmpi->assembled        = PETSC_FALSE;
629   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ_scalable;
630   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
631   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
632   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
633   *C                     = Cmpi;
634   PetscFunctionReturn(0);
635 }
636 
637 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
638 {
639   PetscErrorCode      ierr;
640   Mat_APMPI           *ptap;
641   Mat_MPIAIJ          *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c;
642   MPI_Comm            comm;
643   PetscMPIInt         size,rank;
644   Mat                 Cmpi;
645   PetscFreeSpaceList  free_space=NULL,current_space=NULL;
646   PetscInt            am=A->rmap->n,pm=P->rmap->n,pN=P->cmap->N,pn=P->cmap->n;
647   PetscInt            *lnk,i,k,pnz,row,nsend;
648   PetscBT             lnkbt;
649   PetscMPIInt         tagi,tagj,*len_si,*len_s,*len_ri,icompleted=0,nrecv;
650   PetscInt            **buf_rj,**buf_ri,**buf_ri_k;
651   PetscInt            len,proc,*dnz,*onz,*owners,nzi,nspacedouble;
652   PetscInt            nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
653   MPI_Request         *swaits,*rwaits;
654   MPI_Status          *sstatus,rstatus;
655   PetscLayout         rowmap;
656   PetscInt            *owners_co,*coi,*coj;    /* i and j array of (p->B)^T*A*P - used in the communication */
657   PetscMPIInt         *len_r,*id_r;    /* array of length of comm->size, store send/recv matrix values */
658   PetscInt            *api,*apj,*Jptr,apnz,*prmap=p->garray,con,j,ap_rmax=0,Crmax,*aj,*ai,*pi;
659   Mat_SeqAIJ          *p_loc,*p_oth=NULL,*ad=(Mat_SeqAIJ*)(a->A)->data,*ao=NULL,*c_loc,*c_oth;
660   PetscScalar         *apv;
661   PetscTable          ta;
662   MatType             mtype;
663 #if defined(PETSC_USE_INFO)
664   PetscReal           apfill;
665 #endif
666 
667   PetscFunctionBegin;
668   ierr = PetscObjectGetComm((PetscObject)A,&comm);CHKERRQ(ierr);
669   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
670   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
671 
672   if (size > 1) ao = (Mat_SeqAIJ*)(a->B)->data;
673 
674   /* create symbolic parallel matrix Cmpi */
675   ierr = MatCreate(comm,&Cmpi);CHKERRQ(ierr);
676   ierr = MatGetType(A,&mtype);CHKERRQ(ierr);
677   ierr = MatSetType(Cmpi,mtype);CHKERRQ(ierr);
678 
679   /* Do dense axpy in MatPtAPNumeric_MPIAIJ_MPIAIJ() */
680   Cmpi->ops->ptapnumeric = MatPtAPNumeric_MPIAIJ_MPIAIJ;
681 
682   /* create struct Mat_APMPI and attached it to C later */
683   ierr        = PetscNew(&ptap);CHKERRQ(ierr);
684   ptap->reuse = MAT_INITIAL_MATRIX;
685   ptap->algType = 1;
686 
687   /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
688   ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_INITIAL_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
689   /* get P_loc by taking all local rows of P */
690   ierr = MatMPIAIJGetLocalMat(P,MAT_INITIAL_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
691 
692   /* (0) compute Rd = Pd^T, Ro = Po^T  */
693   /* --------------------------------- */
694   ierr = MatTranspose(p->A,MAT_INITIAL_MATRIX,&ptap->Rd);CHKERRQ(ierr);
695   ierr = MatTranspose(p->B,MAT_INITIAL_MATRIX,&ptap->Ro);CHKERRQ(ierr);
696 
697   /* (1) compute symbolic AP = A_loc*P = Ad*P_loc + Ao*P_oth (api,apj) */
698   /* ----------------------------------------------------------------- */
699   p_loc  = (Mat_SeqAIJ*)(ptap->P_loc)->data;
700   if (ptap->P_oth) p_oth  = (Mat_SeqAIJ*)(ptap->P_oth)->data;
701 
702   /* create and initialize a linked list */
703   ierr = PetscTableCreate(pn,pN,&ta);CHKERRQ(ierr); /* for compute AP_loc and Cmpi */
704   MatRowMergeMax_SeqAIJ(p_loc,ptap->P_loc->rmap->N,ta);
705   MatRowMergeMax_SeqAIJ(p_oth,ptap->P_oth->rmap->N,ta);
706   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr); /* Crmax = nnz(sum of Prows) */
707   /* printf("[%d] est %d, Crmax %d; pN %d\n",rank,5*(p_loc->rmax+p_oth->rmax + (PetscInt)(1.e-2*pN)),Crmax,pN); */
708 
709   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
710 
711   /* Initial FreeSpace size is fill*(nnz(A) + nnz(P)) */
712   if (ao) {
713     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],PetscIntSumTruncate(ao->i[am],p_loc->i[pm]))),&free_space);CHKERRQ(ierr);
714   } else {
715     ierr = PetscFreeSpaceGet(PetscRealIntMultTruncate(fill,PetscIntSumTruncate(ad->i[am],p_loc->i[pm])),&free_space);CHKERRQ(ierr);
716   }
717   current_space = free_space;
718   nspacedouble  = 0;
719 
720   ierr   = PetscMalloc1(am+1,&api);CHKERRQ(ierr);
721   api[0] = 0;
722   for (i=0; i<am; i++) {
723     /* diagonal portion: Ad[i,:]*P */
724     ai = ad->i; pi = p_loc->i;
725     nzi = ai[i+1] - ai[i];
726     aj  = ad->j + ai[i];
727     for (j=0; j<nzi; j++) {
728       row  = aj[j];
729       pnz  = pi[row+1] - pi[row];
730       Jptr = p_loc->j + pi[row];
731       /* add non-zero cols of P into the sorted linked list lnk */
732       ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
733     }
734     /* off-diagonal portion: Ao[i,:]*P */
735     if (ao) {
736       ai = ao->i; pi = p_oth->i;
737       nzi = ai[i+1] - ai[i];
738       aj  = ao->j + ai[i];
739       for (j=0; j<nzi; j++) {
740         row  = aj[j];
741         pnz  = pi[row+1] - pi[row];
742         Jptr = p_oth->j + pi[row];
743         ierr = PetscLLCondensedAddSorted(pnz,Jptr,lnk,lnkbt);CHKERRQ(ierr);
744       }
745     }
746     apnz     = lnk[0];
747     api[i+1] = api[i] + apnz;
748     if (ap_rmax < apnz) ap_rmax = apnz;
749 
750     /* if free space is not available, double the total space in the list */
751     if (current_space->local_remaining<apnz) {
752       ierr = PetscFreeSpaceGet(PetscIntSumTruncate(apnz,current_space->total_array_size),&current_space);CHKERRQ(ierr);
753       nspacedouble++;
754     }
755 
756     /* Copy data into free space, then initialize lnk */
757     ierr = PetscLLCondensedClean(pN,apnz,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
758 
759     current_space->array           += apnz;
760     current_space->local_used      += apnz;
761     current_space->local_remaining -= apnz;
762   }
763   /* Allocate space for apj and apv, initialize apj, and */
764   /* destroy list of free space and other temporary array(s) */
765   ierr   = PetscMalloc2(api[am],&apj,api[am],&apv);CHKERRQ(ierr);
766   ierr   = PetscFreeSpaceContiguous(&free_space,apj);CHKERRQ(ierr);
767   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
768 
769   /* Create AP_loc for reuse */
770   ierr = MatCreateSeqAIJWithArrays(PETSC_COMM_SELF,am,pN,api,apj,apv,&ptap->AP_loc);CHKERRQ(ierr);
771 
772 #if defined(PETSC_USE_INFO)
773   if (ao) {
774     apfill = (PetscReal)api[am]/(ad->i[am]+ao->i[am]+p_loc->i[pm]+1);
775   } else {
776     apfill = (PetscReal)api[am]/(ad->i[am]+p_loc->i[pm]+1);
777   }
778   ptap->AP_loc->info.mallocs           = nspacedouble;
779   ptap->AP_loc->info.fill_ratio_given  = fill;
780   ptap->AP_loc->info.fill_ratio_needed = apfill;
781 
782   if (api[am]) {
783     ierr = PetscInfo3(ptap->AP_loc,"Nonscalable algorithm, AP_loc reallocs %D; Fill ratio: given %g needed %g.\n",nspacedouble,(double)fill,(double)apfill);CHKERRQ(ierr);
784     ierr = PetscInfo1(ptap->AP_loc,"Use MatPtAP(A,B,MatReuse,%g,&C) for best AP_loc performance.;\n",(double)apfill);CHKERRQ(ierr);
785   } else {
786     ierr = PetscInfo(ptap->AP_loc,"Nonscalable algorithm, AP_loc is empty \n");CHKERRQ(ierr);
787   }
788 #endif
789 
790   /* (2-1) compute symbolic Co = Ro*AP_loc  */
791   /* ------------------------------------ */
792   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Ro,ptap->AP_loc,fill,&ptap->C_oth);CHKERRQ(ierr);
793 
794   /* (3) send coj of C_oth to other processors  */
795   /* ------------------------------------------ */
796   /* determine row ownership */
797   ierr = PetscLayoutCreate(comm,&rowmap);CHKERRQ(ierr);
798   rowmap->n  = pn;
799   rowmap->bs = 1;
800   ierr   = PetscLayoutSetUp(rowmap);CHKERRQ(ierr);
801   owners = rowmap->range;
802 
803   /* determine the number of messages to send, their lengths */
804   ierr = PetscMalloc4(size,&len_s,size,&len_si,size,&sstatus,size+2,&owners_co);CHKERRQ(ierr);
805   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
806   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
807 
808   c_oth = (Mat_SeqAIJ*)ptap->C_oth->data;
809   coi   = c_oth->i; coj = c_oth->j;
810   con   = ptap->C_oth->rmap->n;
811   proc  = 0;
812   for (i=0; i<con; i++) {
813     while (prmap[i] >= owners[proc+1]) proc++;
814     len_si[proc]++;               /* num of rows in Co(=Pt*AP) to be sent to [proc] */
815     len_s[proc] += coi[i+1] - coi[i]; /* num of nonzeros in Co to be sent to [proc] */
816   }
817 
818   len          = 0; /* max length of buf_si[], see (4) */
819   owners_co[0] = 0;
820   nsend        = 0;
821   for (proc=0; proc<size; proc++) {
822     owners_co[proc+1] = owners_co[proc] + len_si[proc];
823     if (len_s[proc]) {
824       nsend++;
825       len_si[proc] = 2*(len_si[proc] + 1); /* length of buf_si to be sent to [proc] */
826       len         += len_si[proc];
827     }
828   }
829 
830   /* determine the number and length of messages to receive for coi and coj  */
831   ierr = PetscGatherNumberOfMessages(comm,NULL,len_s,&nrecv);CHKERRQ(ierr);
832   ierr = PetscGatherMessageLengths2(comm,nsend,nrecv,len_s,len_si,&id_r,&len_r,&len_ri);CHKERRQ(ierr);
833 
834   /* post the Irecv and Isend of coj */
835   ierr = PetscCommGetNewTag(comm,&tagj);CHKERRQ(ierr);
836   ierr = PetscPostIrecvInt(comm,tagj,nrecv,id_r,len_r,&buf_rj,&rwaits);CHKERRQ(ierr);
837   ierr = PetscMalloc1(nsend+1,&swaits);CHKERRQ(ierr);
838   for (proc=0, k=0; proc<size; proc++) {
839     if (!len_s[proc]) continue;
840     i    = owners_co[proc];
841     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,swaits+k);CHKERRQ(ierr);
842     k++;
843   }
844 
845   /* (2-2) compute symbolic C_loc = Rd*AP_loc */
846   /* ---------------------------------------- */
847   ierr = MatMatMultSymbolic_SeqAIJ_SeqAIJ(ptap->Rd,ptap->AP_loc,fill,&ptap->C_loc);CHKERRQ(ierr);
848   c_loc = (Mat_SeqAIJ*)ptap->C_loc->data;
849 
850   /* receives coj are complete */
851   for (i=0; i<nrecv; i++) {
852     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
853   }
854   ierr = PetscFree(rwaits);CHKERRQ(ierr);
855   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
856 
857   /* add received column indices into ta to update Crmax */
858   for (k=0; k<nrecv; k++) {/* k-th received message */
859     Jptr = buf_rj[k];
860     for (j=0; j<len_r[k]; j++) {
861       ierr = PetscTableAdd(ta,*(Jptr+j)+1,1,INSERT_VALUES);CHKERRQ(ierr);
862     }
863   }
864   ierr = PetscTableGetCount(ta,&Crmax);CHKERRQ(ierr);
865   ierr = PetscTableDestroy(&ta);CHKERRQ(ierr);
866 
867   /* (4) send and recv coi */
868   /*-----------------------*/
869   ierr   = PetscCommGetNewTag(comm,&tagi);CHKERRQ(ierr);
870   ierr   = PetscPostIrecvInt(comm,tagi,nrecv,id_r,len_ri,&buf_ri,&rwaits);CHKERRQ(ierr);
871   ierr   = PetscMalloc1(len+1,&buf_s);CHKERRQ(ierr);
872   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
873   for (proc=0,k=0; proc<size; proc++) {
874     if (!len_s[proc]) continue;
875     /* form outgoing message for i-structure:
876          buf_si[0]:                 nrows to be sent
877                [1:nrows]:           row index (global)
878                [nrows+1:2*nrows+1]: i-structure index
879     */
880     /*-------------------------------------------*/
881     nrows       = len_si[proc]/2 - 1; /* num of rows in Co to be sent to [proc] */
882     buf_si_i    = buf_si + nrows+1;
883     buf_si[0]   = nrows;
884     buf_si_i[0] = 0;
885     nrows       = 0;
886     for (i=owners_co[proc]; i<owners_co[proc+1]; i++) {
887       nzi = coi[i+1] - coi[i];
888       buf_si_i[nrows+1] = buf_si_i[nrows] + nzi;  /* i-structure */
889       buf_si[nrows+1]   = prmap[i] -owners[proc]; /* local row index */
890       nrows++;
891     }
892     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,swaits+k);CHKERRQ(ierr);
893     k++;
894     buf_si += len_si[proc];
895   }
896   for (i=0; i<nrecv; i++) {
897     ierr = MPI_Waitany(nrecv,rwaits,&icompleted,&rstatus);CHKERRQ(ierr);
898   }
899   ierr = PetscFree(rwaits);CHKERRQ(ierr);
900   if (nsend) {ierr = MPI_Waitall(nsend,swaits,sstatus);CHKERRQ(ierr);}
901 
902   ierr = PetscFree4(len_s,len_si,sstatus,owners_co);CHKERRQ(ierr);
903   ierr = PetscFree(len_ri);CHKERRQ(ierr);
904   ierr = PetscFree(swaits);CHKERRQ(ierr);
905   ierr = PetscFree(buf_s);CHKERRQ(ierr);
906 
907   /* (5) compute the local portion of Cmpi      */
908   /* ------------------------------------------ */
909   /* set initial free space to be Crmax, sufficient for holding nozeros in each row of Cmpi */
910   ierr          = PetscFreeSpaceGet(Crmax,&free_space);CHKERRQ(ierr);
911   current_space = free_space;
912 
913   ierr = PetscMalloc3(nrecv,&buf_ri_k,nrecv,&nextrow,nrecv,&nextci);CHKERRQ(ierr);
914   for (k=0; k<nrecv; k++) {
915     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
916     nrows       = *buf_ri_k[k];
917     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
918     nextci[k]   = buf_ri_k[k] + (nrows + 1); /* poins to the next i-structure of k-th recved i-structure  */
919   }
920 
921   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
922   ierr = PetscLLCondensedCreate(Crmax,pN,&lnk,&lnkbt);CHKERRQ(ierr);
923   for (i=0; i<pn; i++) {
924     /* add C_loc into Cmpi */
925     nzi  = c_loc->i[i+1] - c_loc->i[i];
926     Jptr = c_loc->j + c_loc->i[i];
927     ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
928 
929     /* add received col data into lnk */
930     for (k=0; k<nrecv; k++) { /* k-th received message */
931       if (i == *nextrow[k]) { /* i-th row */
932         nzi  = *(nextci[k]+1) - *nextci[k];
933         Jptr = buf_rj[k] + *nextci[k];
934         ierr = PetscLLCondensedAddSorted(nzi,Jptr,lnk,lnkbt);CHKERRQ(ierr);
935         nextrow[k]++; nextci[k]++;
936       }
937     }
938     nzi = lnk[0];
939 
940     /* copy data into free space, then initialize lnk */
941     ierr = PetscLLCondensedClean(pN,nzi,current_space->array,lnk,lnkbt);CHKERRQ(ierr);
942     ierr = MatPreallocateSet(i+owners[rank],nzi,current_space->array,dnz,onz);CHKERRQ(ierr);
943   }
944   ierr = PetscFree3(buf_ri_k,nextrow,nextci);CHKERRQ(ierr);
945   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
946   ierr = PetscFreeSpaceDestroy(free_space);CHKERRQ(ierr);
947 
948   /* local sizes and preallocation */
949   ierr = MatSetSizes(Cmpi,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE);CHKERRQ(ierr);
950   ierr = MatSetBlockSizes(Cmpi,PetscAbs(P->cmap->bs),PetscAbs(P->cmap->bs));CHKERRQ(ierr);
951   ierr = MatMPIAIJSetPreallocation(Cmpi,0,dnz,0,onz);CHKERRQ(ierr);
952   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
953 
954   /* members in merge */
955   ierr = PetscFree(id_r);CHKERRQ(ierr);
956   ierr = PetscFree(len_r);CHKERRQ(ierr);
957   ierr = PetscFree(buf_ri[0]);CHKERRQ(ierr);
958   ierr = PetscFree(buf_ri);CHKERRQ(ierr);
959   ierr = PetscFree(buf_rj[0]);CHKERRQ(ierr);
960   ierr = PetscFree(buf_rj);CHKERRQ(ierr);
961   ierr = PetscLayoutDestroy(&rowmap);CHKERRQ(ierr);
962 
963   /* attach the supporting struct to Cmpi for reuse */
964   c = (Mat_MPIAIJ*)Cmpi->data;
965   c->ap           = ptap;
966   ptap->duplicate = Cmpi->ops->duplicate;
967   ptap->destroy   = Cmpi->ops->destroy;
968   ptap->view      = Cmpi->ops->view;
969   ierr = PetscCalloc1(pN,&ptap->apa);CHKERRQ(ierr);
970 
971   /* Cmpi is not ready for use - assembly will be done by MatPtAPNumeric() */
972   Cmpi->assembled        = PETSC_FALSE;
973   Cmpi->ops->destroy     = MatDestroy_MPIAIJ_PtAP;
974   Cmpi->ops->view        = MatView_MPIAIJ_PtAP;
975   Cmpi->ops->freeintermediatedatastructures = MatFreeIntermediateDataStructures_MPIAIJ_AP;
976   *C                     = Cmpi;
977   PetscFunctionReturn(0);
978 }
979 
980 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
981 {
982   PetscErrorCode    ierr;
983   Mat_MPIAIJ        *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data,*c=(Mat_MPIAIJ*)C->data;
984   Mat_SeqAIJ        *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
985   Mat_SeqAIJ        *ap,*p_loc,*p_oth=NULL,*c_seq;
986   Mat_APMPI         *ptap = c->ap;
987   Mat               AP_loc,C_loc,C_oth;
988   PetscInt          i,rstart,rend,cm,ncols,row;
989   PetscInt          *api,*apj,am = A->rmap->n,j,col,apnz;
990   PetscScalar       *apa;
991   const PetscInt    *cols;
992   const PetscScalar *vals;
993   PetscBool         freestruct;
994 
995   PetscFunctionBegin;
996   if (!ptap) {
997     MPI_Comm comm;
998     ierr = PetscObjectGetComm((PetscObject)C,&comm);CHKERRQ(ierr);
999     SETERRQ(comm,PETSC_ERR_ARG_WRONGSTATE,"PtAP cannot be reused. Do not call MatFreeIntermediateDataStructures() or use '-mat_freeintermediatedatastructures'");
1000   }
1001 
1002   ierr = MatZeroEntries(C);CHKERRQ(ierr);
1003   /* 1) get R = Pd^T,Ro = Po^T */
1004   if (ptap->reuse == MAT_REUSE_MATRIX) {
1005     ierr = MatTranspose(p->A,MAT_REUSE_MATRIX,&ptap->Rd);CHKERRQ(ierr);
1006     ierr = MatTranspose(p->B,MAT_REUSE_MATRIX,&ptap->Ro);CHKERRQ(ierr);
1007   }
1008 
1009   /* 2) get AP_loc */
1010   AP_loc = ptap->AP_loc;
1011   ap = (Mat_SeqAIJ*)AP_loc->data;
1012 
1013   /* 2-1) get P_oth = ptap->P_oth  and P_loc = ptap->P_loc */
1014   /*-----------------------------------------------------*/
1015   if (ptap->reuse == MAT_REUSE_MATRIX) {
1016     /* P_oth and P_loc are obtained in MatPtASymbolic() when reuse == MAT_INITIAL_MATRIX */
1017     ierr = MatGetBrowsOfAoCols_MPIAIJ(A,P,MAT_REUSE_MATRIX,&ptap->startsj_s,&ptap->startsj_r,&ptap->bufa,&ptap->P_oth);CHKERRQ(ierr);
1018     ierr = MatMPIAIJGetLocalMat(P,MAT_REUSE_MATRIX,&ptap->P_loc);CHKERRQ(ierr);
1019   }
1020 
1021   /* 2-2) compute numeric A_loc*P - dominating part */
1022   /* ---------------------------------------------- */
1023   /* get data from symbolic products */
1024   p_loc = (Mat_SeqAIJ*)(ptap->P_loc)->data;
1025   if (ptap->P_oth) {
1026     p_oth = (Mat_SeqAIJ*)(ptap->P_oth)->data;
1027   }
1028   apa   = ptap->apa;
1029   api   = ap->i;
1030   apj   = ap->j;
1031   for (i=0; i<am; i++) {
1032     /* AP[i,:] = A[i,:]*P = Ad*P_loc Ao*P_oth */
1033     AProw_nonscalable(i,ad,ao,p_loc,p_oth,apa);
1034     apnz = api[i+1] - api[i];
1035     for (j=0; j<apnz; j++) {
1036       col = apj[j+api[i]];
1037       ap->a[j+ap->i[i]] = apa[col];
1038       apa[col] = 0.0;
1039     }
1040     ierr = PetscLogFlops(2.0*apnz);CHKERRQ(ierr);
1041   }
1042 
1043   /* 3) C_loc = Rd*AP_loc, C_oth = Ro*AP_loc */
1044   ierr = ((ptap->C_loc)->ops->matmultnumeric)(ptap->Rd,AP_loc,ptap->C_loc);CHKERRQ(ierr);
1045   ierr = ((ptap->C_oth)->ops->matmultnumeric)(ptap->Ro,AP_loc,ptap->C_oth);CHKERRQ(ierr);
1046   C_loc = ptap->C_loc;
1047   C_oth = ptap->C_oth;
1048 
1049   /* add C_loc and Co to to C */
1050   ierr = MatGetOwnershipRange(C,&rstart,&rend);CHKERRQ(ierr);
1051 
1052   /* C_loc -> C */
1053   cm    = C_loc->rmap->N;
1054   c_seq = (Mat_SeqAIJ*)C_loc->data;
1055   cols = c_seq->j;
1056   vals = c_seq->a;
1057 
1058 
1059   /* The (fast) MatSetValues_MPIAIJ_CopyFromCSRFormat function can only be used when C->was_assembled is PETSC_FALSE and */
1060   /* when there are no off-processor parts.  */
1061   /* If was_assembled is true, then the statement aj[rowstart_diag+dnz_row] = mat_j[col] - cstart; in MatSetValues_MPIAIJ_CopyFromCSRFormat */
1062   /* is no longer true. Then the more complex function MatSetValues_MPIAIJ() has to be used, where the column index is looked up from */
1063   /* a table, and other, more complex stuff has to be done. */
1064   if (C->assembled) {
1065     C->was_assembled = PETSC_TRUE;
1066     C->assembled     = PETSC_FALSE;
1067   }
1068   if (C->was_assembled) {
1069     for (i=0; i<cm; i++) {
1070       ncols = c_seq->i[i+1] - c_seq->i[i];
1071       row = rstart + i;
1072       ierr = MatSetValues_MPIAIJ(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1073       cols += ncols; vals += ncols;
1074     }
1075   } else {
1076     ierr = MatSetValues_MPIAIJ_CopyFromCSRFormat(C,c_seq->j,c_seq->i,c_seq->a);CHKERRQ(ierr);
1077   }
1078 
1079   /* Co -> C, off-processor part */
1080   cm = C_oth->rmap->N;
1081   c_seq = (Mat_SeqAIJ*)C_oth->data;
1082   cols = c_seq->j;
1083   vals = c_seq->a;
1084   for (i=0; i<cm; i++) {
1085     ncols = c_seq->i[i+1] - c_seq->i[i];
1086     row = p->garray[i];
1087     ierr = MatSetValues(C,1,&row,ncols,cols,vals,ADD_VALUES);CHKERRQ(ierr);
1088     cols += ncols; vals += ncols;
1089   }
1090 
1091   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1092   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
1093 
1094   ptap->reuse = MAT_REUSE_MATRIX;
1095 
1096   /* supporting struct ptap consumes almost same amount of memory as C=PtAP, release it if C will not be updated by A and P */
1097   ierr = PetscOptionsBegin(PetscObjectComm((PetscObject)C),((PetscObject)C)->prefix,"MatFreeIntermediateDataStructures","Mat");CHKERRQ(ierr);
1098   freestruct = PETSC_FALSE;
1099   ierr = PetscOptionsBool("-mat_freeintermediatedatastructures","Free intermediate data structures", "MatFreeIntermediateDataStructures",freestruct, &freestruct, NULL);CHKERRQ(ierr);
1100   if (freestruct) {
1101     ierr = MatFreeIntermediateDataStructures(C);CHKERRQ(ierr);
1102   }
1103   ierr = PetscOptionsEnd();CHKERRQ(ierr);
1104   PetscFunctionReturn(0);
1105 }
1106