xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision 7b64d3fba22e7be7af32e9d18402651b8e08de73)
1 
2 /*
3   Defines projective product routines where A is a MPIAIJ matrix
4           C = P^T * A * P
5 */
6 
7 #include "src/mat/impls/aij/seq/aij.h"   /*I "petscmat.h" I*/
8 #include "src/mat/utils/freespace.h"
9 #include "src/mat/impls/aij/mpi/mpiaij.h"
10 #include "petscbt.h"
11 
12 EXTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
13 #undef __FUNCT__
14 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
15 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
16 {
17   PetscErrorCode       ierr;
18   Mat_MatMatMultMPI    *mult;
19   PetscObjectContainer container;
20 
21   PetscFunctionBegin;
22   ierr = PetscObjectQuery((PetscObject)A,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
23   if (container) {
24     ierr  = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
25     ierr = MatDestroy(mult->B_loc);CHKERRQ(ierr);
26     ierr = MatDestroy(mult->B_oth);CHKERRQ(ierr);
27     if (mult->abi) ierr = PetscFree(mult->abi);CHKERRQ(ierr);
28     if (mult->abj) ierr = PetscFree(mult->abj);CHKERRQ(ierr);
29     if (mult->startsj) ierr = PetscFree(mult->startsj);CHKERRQ(ierr);
30     if (mult->bufa) ierr = PetscFree(mult->bufa);CHKERRQ(ierr);
31     ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr);
32     ierr = PetscObjectCompose((PetscObject)A,"MatPtAPMPI",0);CHKERRQ(ierr);
33   }
34   ierr = PetscFree(mult);CHKERRQ(ierr);
35 
36   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
37   PetscFunctionReturn(0);
38 }
39 
40 #undef __FUNCT__
41 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
42 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
43 {
44   PetscErrorCode       ierr;
45   Mat_MatMatMultMPI    *mult;
46   PetscObjectContainer container;
47 
48   PetscFunctionBegin;
49   if (scall == MAT_INITIAL_MATRIX){
50     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
51     ierr = PetscNew(Mat_MatMatMultMPI,&mult);CHKERRQ(ierr);
52     mult->B_loc=PETSC_NULL; mult->B_oth=PETSC_NULL;
53     mult->abi=PETSC_NULL; mult->abj=PETSC_NULL;
54     mult->abnz_max = 0; /* symbolic A*P is not done yet */
55 
56     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
57     ierr = MatGetBrowsOfAoCols(A,P,scall,&mult->startsj,&mult->bufa,&mult->B_oth);CHKERRQ(ierr);
58 
59     /* get P_loc by taking all local rows of P */
60     ierr = MatGetLocalMat(P,scall,&mult->B_loc);CHKERRQ(ierr);
61 
62     /* attach the supporting struct to P for reuse */
63     P->ops->destroy  = MatDestroy_MPIAIJ_PtAP;
64     ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
65     ierr = PetscObjectContainerSetPointer(container,mult);CHKERRQ(ierr);
66     ierr = PetscObjectCompose((PetscObject)P,"MatPtAPMPI",(PetscObject)container);CHKERRQ(ierr);
67 
68     /* now, compute symbolic local P^T*A*P */
69     ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
70     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
71   } else if (scall == MAT_REUSE_MATRIX){
72     ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
73     if (container) {
74       ierr  = PetscObjectContainerGetPointer(container,(void **)&mult);CHKERRQ(ierr);
75     } else {
76       SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
77     }
78 
79     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
80     ierr = MatGetBrowsOfAoCols(A,P,scall,&mult->startsj,&mult->bufa,&mult->B_oth);CHKERRQ(ierr);
81 
82     /* get P_loc by taking all local rows of P */
83     ierr = MatGetLocalMat(P,scall,&mult->B_loc);CHKERRQ(ierr);
84 
85   } else {
86     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",scall);
87   }
88   /* now, compute numeric local P^T*A*P */
89   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
90   ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
91   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
92 
93   PetscFunctionReturn(0);
94 }
95 
96 #undef __FUNCT__
97 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
98 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
99 {
100   PetscErrorCode       ierr;
101   Mat                  P_loc,P_oth,B_mpi;
102   Mat_MatMatMultMPI    *ap;
103   PetscObjectContainer container;
104   FreeSpaceList        free_space=PETSC_NULL,current_space=PETSC_NULL;
105   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
106   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
107   Mat_SeqAIJ           *p_loc,*p_oth;
108   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pdti,*pdtj,*poti,*potj,*ptJ,*pjj;
109   PetscInt             *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,nnz;
110   PetscInt             nlnk,*lnk,*cdi,*cdj,*owners_co,*coi,*coj,*cji;
111   PetscInt             am=A->m,pN=P->N,pn=P->n;
112   PetscInt             i,j,k,pnz,row,cnz;
113   PetscBT              lnkbt;
114   MPI_Comm             comm=A->comm;
115   PetscMPIInt          size,rank,tagi,tagj,*len_si,*len_s,*len_ri;
116   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
117   PetscInt             len,proc,*dnz,*onz,*owners;
118   PetscInt             anzi,*bi,*bj,bnz;
119   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
120   MPI_Request          *si_waits,*sj_waits,*ri_waits,*rj_waits;
121   MPI_Status           *status;
122   Mat_Merge_SeqsToMPI  *merge;
123   PetscInt             *api,*apj,*apJ,apnz,*prmap=p->garray,pon;
124 
125   PetscFunctionBegin;
126   ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
127   if (container) {
128     ierr  = PetscObjectContainerGetPointer(container,(void **)&ap);CHKERRQ(ierr);
129   } else {
130     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
131   }
132   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
133   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
134 
135   /* get data from P_loc and P_oth */
136   P_loc=ap->B_loc; P_oth=ap->B_oth;
137   p_loc=(Mat_SeqAIJ*)P_loc->data; p_oth=(Mat_SeqAIJ*)P_oth->data;
138   pi_loc=p_loc->i; pj_loc=p_loc->j; pi_oth=p_oth->i; pj_oth=p_oth->j;
139 
140   /* first, compute symbolic AP = A_loc*P */
141   /*--------------------------------------*/
142   ierr  = PetscMalloc((am+2)*sizeof(PetscInt),&api);CHKERRQ(ierr);
143   ap->abi = api;
144   api[0] = 0;
145 
146   /* create and initialize a linked list */
147   nlnk = pN+1;
148   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
149 
150   /* Initial FreeSpace size is fill*nnz(A) */
151   ierr = GetMoreSpace((PetscInt)(fill*(adi[am]+aoi[am])),&free_space);CHKERRQ(ierr);
152   current_space = free_space;
153 
154   for (i=0;i<am;i++) {
155     apnz = 0;
156     /* diagonal portion of A */
157     anzi = adi[i+1] - adi[i];
158     for (j=0; j<anzi; j++){
159       row = *adj++;
160       pnz = pi_loc[row+1] - pi_loc[row];
161       pjj  = pj_loc + pi_loc[row];
162       /* add non-zero cols of P into the sorted linked list lnk */
163       ierr = PetscLLAdd(pnz,pjj,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
164       apnz += nlnk;
165     }
166     /* off-diagonal portion of A */
167     anzi = aoi[i+1] - aoi[i];
168     for (j=0; j<anzi; j++){
169       row = *aoj++;
170       pnz = pi_oth[row+1] - pi_oth[row];
171       pjj  = pj_oth + pi_oth[row];
172       ierr = PetscLLAdd(pnz,pjj,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
173       apnz += nlnk;
174     }
175 
176     api[i+1] = api[i] + apnz;
177     if (ap->abnz_max < apnz) ap->abnz_max = apnz;
178 
179     /* if free space is not available, double the total space in the list */
180     if (current_space->local_remaining<apnz) {
181       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
182     }
183 
184     /* Copy data into free space, then initialize lnk */
185     ierr = PetscLLClean(pN,pN,apnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
186     current_space->array           += apnz;
187     current_space->local_used      += apnz;
188     current_space->local_remaining -= apnz;
189   }
190   /* Allocate space for apj, initialize apj, and */
191   /* destroy list of free space and other temporary array(s) */
192   ierr = PetscMalloc((api[am]+1)*sizeof(PetscInt),&ap->abj);CHKERRQ(ierr);
193   apj = ap->abj;
194   ierr = MakeSpaceContiguous(&free_space,ap->abj);CHKERRQ(ierr);
195 
196   /* get ij structure of (p->A)^T and (p->B)^T */
197   /*-------------------------------------------*/
198   ierr = MatGetSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
199   ierr = MatGetSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
200 
201   /* then, compute symbolic Co_seq = (p->B)^T*AP */
202   /* Allocate coi array, arrays for fill computation and */
203   /* free space for accumulating nonzero column info */
204   pon = (p->B)->n; /* total num of rows to be sent to other processors
205                          >= (num of nonzero rows of C_seq) - pn */
206   ierr = PetscMalloc((pon+1)*sizeof(PetscInt),&coi);CHKERRQ(ierr);
207   coi[0] = 0;
208 
209   /* set initial free space to be 3*pon*max( nnz(AP) per row) */
210   nnz           = 3*pon*ap->abnz_max + 1;
211   ierr          = GetMoreSpace(nnz,&free_space);
212   current_space = free_space;
213 
214   /* determine symbolic Co=(p->B)^T*AP - send to others */
215   /*----------------------------------------------------*/
216   for (i=0; i<pon; i++) {
217     cnz  = 0;
218     pnz = poti[i+1] - poti[i];
219     j     = pnz;
220     ptJ   = potj + poti[i+1];
221     while (j){/* assume cols are almost in increasing order, starting from its end saves computation */
222       j--; ptJ--;
223       row  = *ptJ; /* row of AP == col of Pot */
224       apnz = api[row+1] - api[row];
225       apJ   = apj + api[row];
226       /* add non-zero cols of AP into the sorted linked list lnk */
227       ierr = PetscLLAdd(apnz,apJ,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
228       cnz += nlnk;
229     }
230 
231     /* If free space is not available, double the total space in the list */
232     if (current_space->local_remaining<cnz) {
233       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
234     }
235 
236     /* Copy data into free space, and zero out denserows */
237     ierr = PetscLLClean(pN,pN,cnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
238     current_space->array           += cnz;
239     current_space->local_used      += cnz;
240     current_space->local_remaining -= cnz;
241     coi[i+1] = coi[i] + cnz;
242   }
243   ierr = PetscMalloc((coi[pon]+1)*sizeof(PetscInt),&coj);CHKERRQ(ierr);
244   ierr = MakeSpaceContiguous(&free_space,coj);CHKERRQ(ierr);
245   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->B,&poti,&potj);CHKERRQ(ierr);
246 
247   /* send j structure of Co to other processors */
248   /*--------------------------------------------*/
249   /* determine row ownership */
250   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
251   ierr = PetscMapCreate(comm,&merge->rowmap);CHKERRQ(ierr);
252   ierr = PetscMapSetLocalSize(merge->rowmap,pn);CHKERRQ(ierr);
253   ierr = PetscMapSetType(merge->rowmap,MAP_MPI);CHKERRQ(ierr);
254   ierr = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
255 
256   /* determine the number of messages to send, their lengths */
257   /*---------------------------------------------------------*/
258   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
259   ierr = PetscMemzero(len_si,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
260   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
261   len_s = merge->len_s;
262   merge->nsend = 0;
263 
264   ierr = PetscMalloc((size+2)*sizeof(PetscInt),&owners_co);CHKERRQ(ierr);
265   ierr = PetscMemzero(len_s,size*sizeof(PetscMPIInt));CHKERRQ(ierr);
266 
267   proc = 0;
268   for (i=0; i<pon; i++){
269     while (prmap[i] >= owners[proc+1]) proc++;
270     len_si[proc]++;  /* num of rows in Co to be sent to [proc] */
271     len_s[proc] += coi[i+1] - coi[i];
272   }
273 
274   len   = 0;  /* max length of buf_si[] */
275   owners_co[0] = 0;
276   for (proc=0; proc<size; proc++){
277     owners_co[proc+1] = owners_co[proc] + len_si[proc];
278     if (len_si[proc]){
279       merge->nsend++;
280       len_si[proc] = 2*(len_si[proc] + 1);
281       len += len_si[proc];
282     }
283   }
284 
285   /* determine the number and length of messages to receive for ij-structure */
286   /*-------------------------------------------------------------------------*/
287   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
288   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
289 
290   /* post the Irecv and Isend of j-structure */
291   /*-------------------------------*/
292   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&tagj);CHKERRQ(ierr);
293   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr);
294 
295   ierr = PetscMalloc((2*merge->nsend+1)*sizeof(MPI_Request),&si_waits);CHKERRQ(ierr);
296   sj_waits = si_waits + merge->nsend;
297 
298   for (proc=0, k=0; proc<size; proc++){
299     if (!len_s[proc]) continue;
300     i = owners_co[proc];
301     ierr = MPI_Isend(coj+coi[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr);
302     k++;
303   }
304 
305   /* determine symbolic Cd = (p->A)^T*AP - stay local */
306   /*--------------------------------------------------*/
307   /* Allocate ci array, arrays for fill computation and */
308   /* free space for accumulating nonzero column info */
309   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&cdi);CHKERRQ(ierr);
310   cdi[0] = 0;
311 
312   /* Set initial free space to be 3*pn*max( nnz(AP) per row) */
313   nnz           = 3*pn*ap->abnz_max + 1;
314   ierr          = GetMoreSpace(nnz,&free_space);
315   current_space = free_space;
316 
317   for (i=0; i<pn; i++) {
318     cnz = 0;
319     pnz  = pdti[i+1] - pdti[i];
320     j    = pnz;
321     ptJ  = pdtj + pdti[i+1];
322     while (j){/* assume cols are almost in increasing order, starting from its end saves computation */
323       j--; ptJ--;
324       row  = *ptJ; /* row of AP == col of Pt */
325       apnz = api[row+1] - api[row];
326       apJ   = apj + api[row];
327       /* add non-zero cols of AP into the sorted linked list lnk */
328       ierr = PetscLLAdd(apnz,apJ,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
329       cnz += nlnk;
330     }
331 
332     /* If free space is not available, double the total space in the list */
333     if (current_space->local_remaining<cnz) {
334       /* printf("[%d] free space is not available, double\n",rank); */
335       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
336     }
337 
338     /* Copy data into free space, and zero out denserows */
339     ierr = PetscLLClean(pN,pN,cnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
340     current_space->array           += cnz;
341     current_space->local_used      += cnz;
342     current_space->local_remaining -= cnz;
343     cdi[i+1] = cdi[i] + cnz;
344   }
345   ierr = MatRestoreSymbolicTranspose_SeqAIJ(p->A,&pdti,&pdtj);CHKERRQ(ierr);
346 
347   /* Allocate space for cj, initialize cj, and */
348   /* destroy list of free space and other temporary array(s) */
349   ierr = PetscMalloc((cdi[pn]+1)*sizeof(PetscInt),&cdj);CHKERRQ(ierr);
350   ierr = MakeSpaceContiguous(&free_space,cdj);CHKERRQ(ierr);
351 
352   /* receives and sends of j-structure are complete */
353   /*------------------------------------------------*/
354   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
355   ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);
356   ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);
357 
358   /* send and recv i-structure */
359   /*---------------------------*/
360   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&tagi);CHKERRQ(ierr);
361   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr);
362 
363   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
364   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
365   for (proc=0,k=0; proc<size; proc++){
366     if (!len_s[proc]) continue;
367     /* form outgoing message for i-structure:
368          buf_si[0]:                 nrows to be sent
369                [1:nrows]:           row index (global)
370                [nrows+1:2*nrows+1]: i-structure index
371     */
372     /*-------------------------------------------*/
373     nrows = len_si[proc]/2 - 1;
374     buf_si_i    = buf_si + nrows+1;
375     buf_si[0]   = nrows;
376     buf_si_i[0] = 0;
377     nrows = 0;
378     for (i=owners_co[proc]; i<owners_co[proc+1]; i++){
379       anzi = coi[i+1] - coi[i];
380       if (!anzi) SETERRQ1(1,"i=%d, ani = 0",i);
381       buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
382       buf_si[nrows+1] =prmap[i] -owners[proc]; /* local row index */
383       nrows++;
384     }
385     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr);
386     k++;
387     buf_si += len_si[proc];
388   }
389 
390   ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);
391   ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);
392 
393   ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI: nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
394   for (i=0; i<merge->nrecv; i++){
395     ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI:   recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
396   }
397 
398   ierr = PetscFree(len_si);CHKERRQ(ierr);
399   ierr = PetscFree(len_ri);CHKERRQ(ierr);
400   ierr = PetscFree(rj_waits);CHKERRQ(ierr);
401   ierr = PetscFree(si_waits);CHKERRQ(ierr);
402   ierr = PetscFree(ri_waits);CHKERRQ(ierr);
403   ierr = PetscFree(buf_s);CHKERRQ(ierr);
404   ierr = PetscFree(status);CHKERRQ(ierr);
405 
406   /* compute the local portion of C (mpi mat) */
407   /*------------------------------------------*/
408   /* allocate bi array and free space for accumulating nonzero column info */
409   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
410   bi[0] = 0;
411 
412   /* initial FreeSpace size is 2*(num of local nnz(C_seq)) */
413   ierr = GetMoreSpace((PetscInt)(2*cdi[pn]+1),&free_space);CHKERRQ(ierr);
414   current_space = free_space;
415 
416   /* determine symbolic info for each local row */
417   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
418   nextrow = buf_ri_k + merge->nrecv;
419   nextci  = nextrow + merge->nrecv;
420   for (k=0; k<merge->nrecv; k++){
421     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
422     nrows       = *buf_ri_k[k];
423     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
424     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
425   }
426 
427   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
428   len = 0;
429   for (i=0; i<pn; i++) {
430     bnz = 0;
431     /* add local non-zero cols of this proc's C_seq into lnk */
432     anzi = cdi[i+1] - cdi[i];
433     cji  = cdj + cdi[i];
434     ierr = PetscLLAdd(anzi,cji,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
435     bnz += nlnk;
436     /* add received col data into lnk */
437     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
438       if (i == *nextrow[k]) { /* i-th row */
439         anzi = *(nextci[k]+1) - *nextci[k];
440         cji  = buf_rj[k] + *nextci[k];
441         ierr = PetscLLAdd(anzi,cji,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
442         bnz += nlnk;
443         nextrow[k]++; nextci[k]++;
444       }
445     }
446     if (len < bnz) len = bnz;  /* =max(bnz) */
447 
448     /* if free space is not available, make more free space */
449     if (current_space->local_remaining<bnz) {
450       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
451     }
452     /* copy data into free space, then initialize lnk */
453     ierr = PetscLLClean(pN,pN,bnz,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
454     ierr = MatPreallocateSet(i+owners[rank],bnz,current_space->array,dnz,onz);CHKERRQ(ierr);
455 
456     current_space->array           += bnz;
457     current_space->local_used      += bnz;
458     current_space->local_remaining -= bnz;
459 
460     bi[i+1] = bi[i] + bnz;
461   }
462   ierr = PetscFree(cdi);CHKERRQ(ierr);
463   ierr = PetscFree(cdj);CHKERRQ(ierr);
464   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
465 
466   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
467   ierr = MakeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
468   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
469 
470   /* create symbolic parallel matrix B_mpi */
471   /*---------------------------------------*/
472   ierr = MatCreate(comm,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE,&B_mpi);CHKERRQ(ierr);
473   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
474   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
475   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
476 
477   /* B_mpi is not ready for use - assembly will be done by MatPtAPNumeric() */
478   B_mpi->assembled     = PETSC_FALSE;
479   B_mpi->ops->destroy  = MatDestroy_MPIAIJ_SeqsToMPI;
480   merge->bi            = bi;
481   merge->bj            = bj;
482   merge->coi           = coi;
483   merge->coj           = coj;
484   merge->buf_ri        = buf_ri;
485   merge->buf_rj        = buf_rj;
486   merge->owners_co     = owners_co;
487 
488   /* attach the supporting struct to B_mpi for reuse */
489   ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
490   ierr = PetscObjectContainerSetPointer(container,merge);CHKERRQ(ierr);
491   ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
492   *C = B_mpi;
493 
494   PetscFunctionReturn(0);
495 }
496 
497 #undef __FUNCT__
498 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
499 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
500 {
501   PetscErrorCode       ierr;
502   Mat_Merge_SeqsToMPI  *merge;
503   Mat_MatMatMultMPI    *ap;
504   PetscObjectContainer cont_merge,cont_ptap;
505   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data,*p=(Mat_MPIAIJ*)P->data;
506   Mat_SeqAIJ     *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
507   Mat_SeqAIJ     *pd=(Mat_SeqAIJ*)(p->A)->data,*po=(Mat_SeqAIJ*)(p->B)->data;
508   Mat_SeqAIJ     *p_loc,*p_oth;
509   PetscInt       *adi=ad->i,*aoi=ao->i,*adj=ad->j,*aoj=ao->j,*apJ,nextp,flops=0;
510   PetscInt       *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pj;
511   PetscInt       i,j,k,anz,pnz,apnz,nextap,row,*cj;
512   MatScalar      *ada=ad->a,*aoa=ao->a,*apa,*pa,*ca,*pa_loc,*pa_oth;
513   PetscInt       am=A->m,cm=C->m,pon=(p->B)->n;
514   MPI_Comm       comm=C->comm;
515   PetscMPIInt    size,rank,taga,*len_s;
516   PetscInt       *owners,proc,nrows,**buf_ri_k,**nextrow,**nextci;
517   PetscInt       **buf_ri,**buf_rj;
518   PetscInt       cnz=0,*bj_i,*bi,*bj,bnz,nextcj; /* bi,bj,ba: local array of C(mpi mat) */
519   MPI_Request    *s_waits,*r_waits;
520   MPI_Status     *status;
521   MatScalar      **abuf_r,*ba_i,*pA,*coa,*ba;
522   PetscInt       *api,*apj,*coi,*coj;
523   PetscInt       *poJ=po->j,*pdJ=pd->j,pcstart=p->cstart,pcend=p->cend;
524 
525   PetscFunctionBegin;
526   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
527   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
528 
529   ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&cont_merge);CHKERRQ(ierr);
530   if (cont_merge) {
531     ierr  = PetscObjectContainerGetPointer(cont_merge,(void **)&merge);CHKERRQ(ierr);
532   } else {
533     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses an object container");
534   }
535   ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&cont_ptap);CHKERRQ(ierr);
536   if (cont_ptap) {
537     ierr  = PetscObjectContainerGetPointer(cont_ptap,(void **)&ap);CHKERRQ(ierr);
538   } else {
539     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
540   }
541 
542   /* get data from symbolic products */
543   p_loc = (Mat_SeqAIJ*)(ap->B_loc)->data;
544   p_oth = (Mat_SeqAIJ*)(ap->B_oth)->data;
545   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a,pA=pa_loc;
546   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
547 
548   coi = merge->coi; coj = merge->coj;
549   ierr = PetscMalloc((coi[pon]+1)*sizeof(MatScalar),&coa);CHKERRQ(ierr);
550   ierr = PetscMemzero(coa,coi[pon]*sizeof(MatScalar));CHKERRQ(ierr);
551 
552   bi = merge->bi; bj = merge->bj;
553   ierr  = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
554   ierr  = PetscMalloc((bi[cm]+1)*sizeof(MatScalar),&ba);CHKERRQ(ierr);
555   ierr  = PetscMemzero(ba,bi[cm]*sizeof(MatScalar));CHKERRQ(ierr);
556 
557   /* get data from symbolic A*P */
558   ierr = PetscMalloc((ap->abnz_max+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr);
559   ierr = PetscMemzero(apa,ap->abnz_max*sizeof(MatScalar));CHKERRQ(ierr);
560 
561   /* compute numeric C_seq=P_loc^T*A_loc*P */
562   api = ap->abi; apj = ap->abj;
563   for (i=0;i<am;i++) {
564     /* form i-th sparse row of A*P */
565     apnz = api[i+1] - api[i];
566     apJ  = apj + api[i];
567     /* diagonal portion of A */
568     anz  = adi[i+1] - adi[i];
569     for (j=0;j<anz;j++) {
570       row = *adj++;
571       pnz = pi_loc[row+1] - pi_loc[row];
572       pj  = pj_loc + pi_loc[row];
573       pa  = pa_loc + pi_loc[row];
574       nextp = 0;
575       for (k=0; nextp<pnz; k++) {
576         if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
577           apa[k] += (*ada)*pa[nextp++];
578         }
579       }
580       flops += 2*pnz;
581       ada++;
582     }
583     /* off-diagonal portion of A */
584     anz  = aoi[i+1] - aoi[i];
585     for (j=0; j<anz; j++) {
586       row = *aoj++;
587       pnz = pi_oth[row+1] - pi_oth[row];
588       pj  = pj_oth + pi_oth[row];
589       pa  = pa_oth + pi_oth[row];
590       nextp = 0;
591       for (k=0; nextp<pnz; k++) {
592         if (apJ[k] == pj[nextp]) { /* col of AP == col of P */
593           apa[k] += (*aoa)*pa[nextp++];
594         }
595       }
596       flops += 2*pnz;
597       aoa++;
598     }
599 
600     /* Compute P_loc[i,:]^T*AP[i,:] using outer product */
601     pnz = pi_loc[i+1] - pi_loc[i];
602     for (j=0; j<pnz; j++) {
603       nextap = 0;
604       row    = *pJ++; /* global index */
605       if (row < pcstart || row >=pcend) { /* put the value into Co */
606         cj  = coj + coi[*poJ];
607         ca  = coa + coi[*poJ++];
608       } else {                            /* put the value into Cd */
609         cj   = bj + bi[*pdJ];
610         ca   = ba + bi[*pdJ++];
611       }
612       for (k=0; nextap<apnz; k++) {
613         if (cj[k]==apJ[nextap]) ca[k] += (*pA)*apa[nextap++];
614       }
615       flops += 2*apnz;
616       pA++;
617     }
618 
619     /* zero the current row info for A*P */
620     ierr = PetscMemzero(apa,apnz*sizeof(MatScalar));CHKERRQ(ierr);
621   }
622   ierr = PetscFree(apa);CHKERRQ(ierr);
623 
624   /* send and recv matrix values */
625   /*-----------------------------*/
626   buf_ri = merge->buf_ri;
627   buf_rj = merge->buf_rj;
628   len_s  = merge->len_s;
629   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&taga);CHKERRQ(ierr);
630   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
631 
632   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);CHKERRQ(ierr);
633   for (proc=0,k=0; proc<size; proc++){
634     if (!len_s[proc]) continue;
635     i = merge->owners_co[proc];
636     ierr = MPI_Isend(coa+coi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
637     k++;
638   }
639   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
640   ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);
641   ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);
642   ierr = PetscFree(status);CHKERRQ(ierr);
643 
644   ierr = PetscFree(s_waits);CHKERRQ(ierr);
645   ierr = PetscFree(r_waits);CHKERRQ(ierr);
646   ierr = PetscFree(coa);CHKERRQ(ierr);
647 
648   /* insert local and received values into C */
649   /*-----------------------------------------*/
650   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
651   nextrow   = buf_ri_k + merge->nrecv;
652   nextci = nextrow + merge->nrecv;
653 
654   for (k=0; k<merge->nrecv; k++){
655     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
656     nrows       = *(buf_ri_k[k]);
657     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
658     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
659   }
660 
661   for (i=0; i<cm; i++) {
662     row = owners[rank] + i; /* global row index of C_seq */
663     bj_i = bj + bi[i];  /* col indices of the i-th row of C */
664     ba_i = ba + bi[i];
665     bnz  = bi[i+1] - bi[i];
666     /* add received vals into ba */
667     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
668       /* i-th row */
669       if (i == *nextrow[k]) {
670         cnz = *(nextci[k]+1) - *nextci[k];
671         cj  = buf_rj[k] + *(nextci[k]);
672         ca  = abuf_r[k] + *(nextci[k]);
673         nextcj = 0;
674         for (j=0; nextcj<cnz; j++){
675           if (bj_i[j] == cj[nextcj]){ /* bcol == ccol */
676             ba_i[j] += ca[nextcj++];
677           }
678         }
679         nextrow[k]++; nextci[k]++;
680       }
681     }
682     ierr = MatSetValues(C,1,&row,bnz,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
683     flops += 2*cnz;
684   }
685   ierr = MatSetOption(C,MAT_COLUMNS_SORTED);CHKERRQ(ierr); /* -cause delay? */
686   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
687   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
688 
689   ierr = PetscFree(ba);CHKERRQ(ierr);
690   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
691   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
692   ierr = PetscLogFlops(flops);CHKERRQ(ierr);
693 
694   PetscFunctionReturn(0);
695 }
696