xref: /petsc/src/mat/impls/aij/mpi/mpiptap.c (revision f4a743e17f2a2e4f545f3492fc6765e688ab193b)
1 /*
2   Defines projective product routines where A is a MPIAIJ matrix
3           C = P^T * A * P
4 */
5 
6 #include "src/mat/impls/aij/seq/aij.h"   /*I "petscmat.h" I*/
7 #include "src/mat/utils/freespace.h"
8 #include "src/mat/impls/aij/mpi/mpiaij.h"
9 #include "petscbt.h"
10 
11 EXTERN PetscErrorCode MatDestroy_MPIAIJ(Mat);
12 #undef __FUNCT__
13 #define __FUNCT__ "MatDestroy_MPIAIJ_PtAP"
14 PetscErrorCode MatDestroy_MPIAIJ_PtAP(Mat A)
15 {
16   PetscErrorCode       ierr;
17   Mat_MatMatMultMPI    *ptap;
18   PetscObjectContainer container;
19 
20   PetscFunctionBegin;
21   ierr = PetscObjectQuery((PetscObject)A,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
22   if (container) {
23     ierr  = PetscObjectContainerGetPointer(container,(void **)&ptap);CHKERRQ(ierr);
24     ierr = MatDestroy(ptap->B_loc);CHKERRQ(ierr);
25     ierr = MatDestroy(ptap->B_oth);CHKERRQ(ierr);
26     ierr = ISDestroy(ptap->isrowb);CHKERRQ(ierr);
27     ierr = ISDestroy(ptap->iscolb);CHKERRQ(ierr);
28     if (ptap->abi) ierr = PetscFree(ptap->abi);CHKERRQ(ierr);
29     if (ptap->abj) ierr = PetscFree(ptap->abj);CHKERRQ(ierr);
30 
31     ierr = PetscObjectContainerDestroy(container);CHKERRQ(ierr);
32     ierr = PetscObjectCompose((PetscObject)A,"MatPtAPMPI",0);CHKERRQ(ierr);
33   }
34   ierr = PetscFree(ptap);CHKERRQ(ierr);
35 
36   ierr = MatDestroy_MPIAIJ(A);CHKERRQ(ierr);
37   PetscFunctionReturn(0);
38 }
39 
40 #undef __FUNCT__
41 #define __FUNCT__ "MatPtAP_MPIAIJ_MPIAIJ"
42 PetscErrorCode MatPtAP_MPIAIJ_MPIAIJ(Mat A,Mat P,MatReuse scall,PetscReal fill,Mat *C)
43 {
44   PetscErrorCode       ierr;
45   Mat_MatMatMultMPI    *ptap;
46   PetscObjectContainer container;
47 
48   PetscFunctionBegin;
49   if (scall == MAT_INITIAL_MATRIX){
50     ierr = PetscLogEventBegin(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
51     ierr = PetscNew(Mat_MatMatMultMPI,&ptap);CHKERRQ(ierr);
52     ptap->abi=PETSC_NULL; ptap->abj=PETSC_NULL;
53     ptap->abnz_max = 0; /* symbolic A*P is not done yet */
54 
55     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
56     ierr = MatGetBrowsOfAoCols(A,P,scall,&ptap->isrowb,&ptap->iscolb,&ptap->brstart,&ptap->B_oth);CHKERRQ(ierr);
57 
58     /* get P_loc by taking all local rows of P */
59     ierr = MatGetLocalMat(P,scall,&ptap->B_loc);CHKERRQ(ierr);
60 
61     /* attach the supporting struct to P for reuse */
62     P->ops->destroy  = MatDestroy_MPIAIJ_PtAP;
63     ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
64     ierr = PetscObjectContainerSetPointer(container,ptap);CHKERRQ(ierr);
65     ierr = PetscObjectCompose((PetscObject)P,"MatPtAPMPI",(PetscObject)container);CHKERRQ(ierr);
66 
67     /* now, compute symbolic local P^T*A*P */
68     ierr = MatPtAPSymbolic_MPIAIJ_MPIAIJ(A,P,fill,C);CHKERRQ(ierr);/* numeric product is computed as well */
69     ierr = PetscLogEventEnd(MAT_PtAPSymbolic,A,P,0,0);CHKERRQ(ierr);
70   } else if (scall == MAT_REUSE_MATRIX){
71     ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
72     if (container) {
73       ierr  = PetscObjectContainerGetPointer(container,(void **)&ptap);CHKERRQ(ierr);
74     } else {
75       SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
76     }
77 
78     /* get P_oth by taking rows of P (= non-zero cols of local A) from other processors */
79     ierr = MatGetBrowsOfAoCols(A,P,scall,&ptap->isrowb,&ptap->iscolb,&ptap->brstart,&ptap->B_oth);CHKERRQ(ierr);
80 
81     /* get P_loc by taking all local rows of P */
82     ierr = MatGetLocalMat(P,scall,&ptap->B_loc);CHKERRQ(ierr);
83 
84   } else {
85     SETERRQ1(PETSC_ERR_ARG_WRONG,"Invalid MatReuse %d",scall);
86   }
87   /* now, compute numeric local P^T*A*P */
88   ierr = PetscLogEventBegin(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
89   ierr = MatPtAPNumeric_MPIAIJ_MPIAIJ(A,P,*C);CHKERRQ(ierr);
90   ierr = PetscLogEventEnd(MAT_PtAPNumeric,A,P,0,0);CHKERRQ(ierr);
91 
92   PetscFunctionReturn(0);
93 }
94 
95 #undef __FUNCT__
96 #define __FUNCT__ "MatPtAPSymbolic_MPIAIJ_MPIAIJ"
97 PetscErrorCode MatPtAPSymbolic_MPIAIJ_MPIAIJ(Mat A,Mat P,PetscReal fill,Mat *C)
98 {
99   PetscErrorCode       ierr;
100   Mat                  P_loc,P_oth;
101   Mat_MatMatMultMPI    *ptap;
102   PetscObjectContainer container;
103   FreeSpaceList        free_space=PETSC_NULL,current_space=PETSC_NULL;
104   Mat_MPIAIJ           *a=(Mat_MPIAIJ*)A->data;
105   Mat_SeqAIJ           *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
106   Mat_SeqAIJ           *p_loc,*p_oth;
107   PetscInt             *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pti,*ptj,*ptJ,*ajj,*pjj;
108   PetscInt             *adi=ad->i,*adj=ad->j,*aoi=ao->i,*aoj=ao->j,nnz,cstart=a->cstart,cend=a->cend,col;
109   PetscInt             pm=P->m,pn=P->n,nlnk,*lnk,*ci,*cj,*cji,*ptaj;
110   PetscInt             aN=A->N,am=A->m,pN=P->N;
111   PetscInt             i,j,k,ptnzi,arow,anzj,prow,pnzj,cnzi;
112   PetscBT              lnkbt;
113   PetscInt             prstart,prend,nprow_loc,nprow_oth;
114   PetscInt             *ptadenserow_loc,*ptadenserow_oth,*ptasparserow_loc,*ptasparserow_oth;
115 
116   MPI_Comm             comm=A->comm;
117   Mat                  B_mpi;
118   PetscMPIInt          size,rank,tagi,tagj,*len_s,*len_si,*len_ri;
119   PetscInt             **buf_rj,**buf_ri,**buf_ri_k;
120   PetscInt             len,proc,*dnz,*onz,*owners;
121   PetscInt             anzi,*bi,*bj,bnzi,nspacedouble=0;
122   PetscInt             nrows,*buf_s,*buf_si,*buf_si_i,**nextrow,**nextci;
123   MPI_Request          *si_waits,*sj_waits,*ri_waits,*rj_waits;
124   MPI_Status           *status;
125   Mat_Merge_SeqsToMPI  *merge;
126   PetscInt             *apsymi,*apj,*apjdense,apnzj;
127 
128   PetscFunctionBegin;
129   ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&container);CHKERRQ(ierr);
130   if (container) {
131     ierr  = PetscObjectContainerGetPointer(container,(void **)&ptap);CHKERRQ(ierr);
132   } else {
133     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
134   }
135   /* compute symbolic C_seq = P_loc^T * A_loc * P_seq */
136   /*--------------------------------------------------*/
137   /* get data from P_loc and P_oth */
138   P_loc=ptap->B_loc; P_oth=ptap->B_oth; prstart=ptap->brstart;
139   p_loc=(Mat_SeqAIJ*)P_loc->data; p_oth=(Mat_SeqAIJ*)P_oth->data;
140   pi_loc=p_loc->i; pj_loc=p_loc->j; pi_oth=p_oth->i; pj_oth=p_oth->j;
141   prend = prstart + pm;
142 
143   /* ---------- Compute symbolic A*P --------- */
144   ierr  = PetscMalloc((am+2)*sizeof(PetscInt),&apsymi);CHKERRQ(ierr);
145   ptap->abi = apsymi;
146   apsymi[0] = 0;
147   ierr = PetscMalloc(pN*2*sizeof(PetscInt),&apj);CHKERRQ(ierr);
148   ierr = PetscMemzero(apj,pN*2*sizeof(PetscInt));CHKERRQ(ierr);
149   apjdense = apj + pN;
150   /* Initial FreeSpace size is 2*nnz(A) */
151   ierr = GetMoreSpace((PetscInt)(2*(adi[am]+aoi[am])),&free_space);CHKERRQ(ierr);
152   current_space = free_space;
153 
154   for (i=0;i<am;i++) {
155     apnzj = 0;
156     /* diagonal portion of A */
157     anzi  = adi[i+1] - adi[i];
158     for (j=0;j<anzi;j++) {
159       prow = *adj;
160       adj++;
161       pnzj = pi_loc[prow+1] - pi_loc[prow];
162       pjj  = pj_loc + pi_loc[prow];
163       for (k=0;k<pnzj;k++) {
164         if (!apjdense[pjj[k]]) {
165           apjdense[pjj[k]] = -1;
166           apj[apnzj++]     = pjj[k];
167         }
168       }
169     }
170     /* off-diagonal portion of A */
171     anzi  = aoi[i+1] - aoi[i];
172     for (j=0;j<anzi;j++) {
173       col = a->garray[*aoj];
174       if (col < cstart){
175         prow = *aoj;
176       } else if (col >= cend){
177         prow = *aoj;
178       } else {
179         SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Off-diagonal portion of A has wrong column map");
180       }
181       aoj++;
182       pnzj = pi_oth[prow+1] - pi_oth[prow];
183       pjj  = pj_oth + pi_oth[prow];
184       for (k=0;k<pnzj;k++) {
185         if (!apjdense[pjj[k]]) {
186           apjdense[pjj[k]] = -1;
187           apj[apnzj++]     = pjj[k];
188         }
189       }
190     }
191     /* Sort the j index array for quick sparse axpy. */
192     ierr = PetscSortInt(apnzj,apj);CHKERRQ(ierr);
193 
194     apsymi[i+1] = apsymi[i] + apnzj;
195     if (ptap->abnz_max < apnzj) ptap->abnz_max = apnzj;
196 
197     /* If free space is not available, make more free space */
198     /* Double the amount of total space in the list */
199     if (current_space->local_remaining<apnzj) {
200       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
201     }
202 
203     /* Copy data into free space, then initialize lnk */
204     /* ierr = PetscLLClean(bn,bn,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr); */
205     for (j=0; j<apnzj; j++) current_space->array[j] = apj[j];
206     current_space->array           += apnzj;
207     current_space->local_used      += apnzj;
208     current_space->local_remaining -= apnzj;
209 
210     for (j=0;j<apnzj;j++) apjdense[apj[j]] = 0;
211   }
212   /* Allocate space for apsymj, initialize apsymj, and */
213   /* destroy list of free space and other temporary array(s) */
214   ierr = PetscMalloc((apsymi[am]+1)*sizeof(PetscInt),&ptap->abj);CHKERRQ(ierr);
215   ierr = MakeSpaceContiguous(&free_space,ptap->abj);CHKERRQ(ierr);
216   ierr = PetscFree(apj);CHKERRQ(ierr);
217   /* -------endof Compute symbolic A*P ---------------------*/
218 
219   /* get ij structure of P_loc^T */
220   ierr = MatGetSymbolicTranspose_SeqAIJ(P_loc,&pti,&ptj);CHKERRQ(ierr);
221   ptJ=ptj;
222 
223   /* Allocate ci array, arrays for fill computation and */
224   /* free space for accumulating nonzero column info */
225   ierr = PetscMalloc((pN+1)*sizeof(PetscInt),&ci);CHKERRQ(ierr);
226   ci[0] = 0;
227 
228   ierr = PetscMalloc((4*aN+1)*sizeof(PetscInt),&ptadenserow_loc);CHKERRQ(ierr);
229   ierr = PetscMemzero(ptadenserow_loc,(4*aN+1)*sizeof(PetscInt));CHKERRQ(ierr);
230   ptasparserow_loc = ptadenserow_loc + aN;
231   ptadenserow_oth  = ptasparserow_loc + aN;
232   ptasparserow_oth = ptadenserow_oth + aN;
233 
234   /* create and initialize a linked list */
235   nlnk = pN+1;
236   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
237 
238   /* Set initial free space to be nnz(A) scaled by fill*P->N/P->M. */
239   /* This should be reasonable if sparsity of PtAP is similar to that of A. */
240   nnz           = adi[am] + aoi[am];
241   ierr          = GetMoreSpace((PetscInt)(fill*nnz*pN/aN+1),&free_space);
242   current_space = free_space;
243 
244   /* determine symbolic info for each row of C: */
245   adj = ad->j; aoj = ao->j;
246   for (i=0;i<pN;i++) {
247     ptnzi  = pti[i+1] - pti[i];
248     nprow_loc = 0; nprow_oth = 0;
249     /* i-th row of symbolic P_loc^T*A_loc: */
250     for (j=0;j<ptnzi;j++) {
251       arow = *ptJ++;
252       /* diagonal portion of A */
253       anzj = adi[arow+1] - adi[arow];
254       ajj  = adj + adi[arow];
255       for (k=0;k<anzj;k++) {
256         col = ajj[k]+prstart;
257         if (!ptadenserow_loc[col]) {
258           ptadenserow_loc[col]    = -1;
259           ptasparserow_loc[nprow_loc++] = col;
260         }
261       }
262       /* off-diagonal portion of A */
263       anzj = aoi[arow+1] - aoi[arow];
264       ajj  = aoj + aoi[arow];
265       for (k=0;k<anzj;k++) {
266         col = a->garray[ajj[k]];  /* global col */
267         if (col < cstart){
268           col = ajj[k];
269         } else if (col >= cend){
270           col = ajj[k] + pm;
271         } else {
272           SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Off-diagonal portion of A has wrong column map");
273         }
274         if (!ptadenserow_oth[col]) {
275           ptadenserow_oth[col]    = -1;
276           ptasparserow_oth[nprow_oth++] = col;
277         }
278       }
279     }
280 
281     /* using symbolic info of local PtA, determine symbolic info for row of C: */
282     cnzi   = 0;
283     /* rows of P_loc */
284     ptaj = ptasparserow_loc;
285     for (j=0; j<nprow_loc; j++) {
286       prow = *ptaj++;
287       prow -= prstart; /* rm */
288       pnzj = pi_loc[prow+1] - pi_loc[prow];
289       pjj  = pj_loc + pi_loc[prow];
290       /* add non-zero cols of P into the sorted linked list lnk */
291       ierr = PetscLLAdd(pnzj,pjj,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
292       cnzi += nlnk;
293     }
294     /* rows of P_oth */
295     ptaj = ptasparserow_oth;
296     for (j=0; j<nprow_oth; j++) {
297       prow = *ptaj++;
298       if (prow >= prend) prow -= pm; /* rm */
299       pnzj = pi_oth[prow+1] - pi_oth[prow];
300       pjj  = pj_oth + pi_oth[prow];
301       /* add non-zero cols of P into the sorted linked list lnk */
302       ierr = PetscLLAdd(pnzj,pjj,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
303       cnzi += nlnk;
304     }
305 
306     /* If free space is not available, make more free space */
307     /* Double the amount of total space in the list */
308     if (current_space->local_remaining<cnzi) {
309       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
310     }
311 
312     /* Copy data into free space, and zero out denserows */
313     ierr = PetscLLClean(pN,pN,cnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
314     current_space->array           += cnzi;
315     current_space->local_used      += cnzi;
316     current_space->local_remaining -= cnzi;
317 
318     for (j=0;j<nprow_loc; j++) {
319       ptadenserow_loc[ptasparserow_loc[j]] = 0;
320     }
321     for (j=0;j<nprow_oth; j++) {
322       ptadenserow_oth[ptasparserow_oth[j]] = 0;
323     }
324 
325     /* Aside: Perhaps we should save the pta info for the numerical factorization. */
326     /*        For now, we will recompute what is needed. */
327     ci[i+1] = ci[i] + cnzi;
328   }
329   /* Clean up. */
330   ierr = MatRestoreSymbolicTranspose_SeqAIJ(P_loc,&pti,&ptj);CHKERRQ(ierr);
331 
332   /* nnz is now stored in ci[ptm], column indices are in the list of free space */
333   /* Allocate space for cj, initialize cj, and */
334   /* destroy list of free space and other temporary array(s) */
335   ierr = PetscMalloc((ci[pN]+1)*sizeof(PetscInt),&cj);CHKERRQ(ierr);
336   ierr = MakeSpaceContiguous(&free_space,cj);CHKERRQ(ierr);
337   ierr = PetscFree(ptadenserow_loc);CHKERRQ(ierr);
338   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
339 
340   /* add C_seq into mpi C              */
341   /*-----------------------------------*/
342   free_space=PETSC_NULL; current_space=PETSC_NULL;
343 
344   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
345   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
346 
347   ierr = PetscNew(Mat_Merge_SeqsToMPI,&merge);CHKERRQ(ierr);
348 
349 
350   /* determine row ownership */
351   /*---------------------------------------------------------*/
352   ierr = PetscMapCreate(comm,&merge->rowmap);CHKERRQ(ierr);
353   ierr = PetscMapSetLocalSize(merge->rowmap,pn);CHKERRQ(ierr);
354   ierr = PetscMapSetType(merge->rowmap,MAP_MPI);CHKERRQ(ierr);
355   ierr = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
356 
357   /* determine the number of messages to send, their lengths */
358   /*---------------------------------------------------------*/
359   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&len_si);CHKERRQ(ierr);
360   ierr = PetscMalloc(size*sizeof(PetscMPIInt),&merge->len_s);CHKERRQ(ierr);
361   len_s  = merge->len_s;
362   len = 0;  /* length of buf_si[] */
363   merge->nsend = 0;
364   for (proc=0; proc<size; proc++){
365     len_si[proc] = 0;
366     if (proc == rank){
367       len_s[proc] = 0;
368     } else {
369       len_si[proc] = owners[proc+1] - owners[proc] + 1;
370       len_s[proc] = ci[owners[proc+1]] - ci[owners[proc]]; /* num of rows to be sent to [proc] */
371     }
372     if (len_s[proc]) {
373       merge->nsend++;
374       nrows = 0;
375       for (i=owners[proc]; i<owners[proc+1]; i++){
376         if (ci[i+1] > ci[i]) nrows++;
377       }
378       len_si[proc] = 2*(nrows+1);
379       len += len_si[proc];
380     }
381   }
382 
383   /* determine the number and length of messages to receive for ij-structure */
384   /*-------------------------------------------------------------------------*/
385   ierr = PetscGatherNumberOfMessages(comm,PETSC_NULL,len_s,&merge->nrecv);CHKERRQ(ierr);
386   ierr = PetscGatherMessageLengths2(comm,merge->nsend,merge->nrecv,len_s,len_si,&merge->id_r,&merge->len_r,&len_ri);CHKERRQ(ierr);
387 
388   /* post the Irecv of j-structure */
389   /*-------------------------------*/
390   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&tagj);CHKERRQ(ierr);
391   ierr = PetscPostIrecvInt(comm,tagj,merge->nrecv,merge->id_r,merge->len_r,&buf_rj,&rj_waits);CHKERRQ(ierr);
392 
393   /* post the Isend of j-structure */
394   /*--------------------------------*/
395   ierr = PetscMalloc((2*merge->nsend+1)*sizeof(MPI_Request),&si_waits);CHKERRQ(ierr);
396   sj_waits = si_waits + merge->nsend;
397 
398   for (proc=0, k=0; proc<size; proc++){
399     if (!len_s[proc]) continue;
400     i = owners[proc];
401     ierr = MPI_Isend(cj+ci[i],len_s[proc],MPIU_INT,proc,tagj,comm,sj_waits+k);CHKERRQ(ierr);
402     k++;
403   }
404 
405   /* receives and sends of j-structure are complete */
406   /*------------------------------------------------*/
407   ierr = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
408   ierr = MPI_Waitall(merge->nrecv,rj_waits,status);CHKERRQ(ierr);
409   ierr = MPI_Waitall(merge->nsend,sj_waits,status);CHKERRQ(ierr);
410 
411   /* send and recv i-structure */
412   /*---------------------------*/
413   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&tagi);CHKERRQ(ierr);
414   ierr = PetscPostIrecvInt(comm,tagi,merge->nrecv,merge->id_r,len_ri,&buf_ri,&ri_waits);CHKERRQ(ierr);
415 
416   ierr = PetscMalloc((len+1)*sizeof(PetscInt),&buf_s);CHKERRQ(ierr);
417   buf_si = buf_s;  /* points to the beginning of k-th msg to be sent */
418   for (proc=0,k=0; proc<size; proc++){
419     if (!len_s[proc]) continue;
420     /* form outgoing message for i-structure:
421          buf_si[0]:                 nrows to be sent
422                [1:nrows]:           row index (global)
423                [nrows+1:2*nrows+1]: i-structure index
424     */
425     /*-------------------------------------------*/
426     nrows = len_si[proc]/2 - 1;
427     buf_si_i    = buf_si + nrows+1;
428     buf_si[0]   = nrows;
429     buf_si_i[0] = 0;
430     nrows = 0;
431     for (i=owners[proc]; i<owners[proc+1]; i++){
432       anzi = ci[i+1] - ci[i];
433       if (anzi) {
434         buf_si_i[nrows+1] = buf_si_i[nrows] + anzi; /* i-structure */
435         buf_si[nrows+1] = i-owners[proc]; /* local row index */
436         nrows++;
437       }
438     }
439     ierr = MPI_Isend(buf_si,len_si[proc],MPIU_INT,proc,tagi,comm,si_waits+k);CHKERRQ(ierr);
440     k++;
441     buf_si += len_si[proc];
442   }
443 
444   ierr = MPI_Waitall(merge->nrecv,ri_waits,status);CHKERRQ(ierr);
445   ierr = MPI_Waitall(merge->nsend,si_waits,status);CHKERRQ(ierr);
446 
447   ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI: nsend: %d, nrecv: %d\n",merge->nsend,merge->nrecv);CHKERRQ(ierr);
448   for (i=0; i<merge->nrecv; i++){
449     ierr = PetscLogInfo((PetscObject)A,"MatMerge_SeqsToMPI:   recv len_ri=%d, len_rj=%d from [%d]\n",len_ri[i],merge->len_r[i],merge->id_r[i]);CHKERRQ(ierr);
450   }
451 
452   ierr = PetscFree(len_si);CHKERRQ(ierr);
453   ierr = PetscFree(len_ri);CHKERRQ(ierr);
454   ierr = PetscFree(rj_waits);CHKERRQ(ierr);
455   ierr = PetscFree(si_waits);CHKERRQ(ierr);
456   ierr = PetscFree(ri_waits);CHKERRQ(ierr);
457   ierr = PetscFree(buf_s);CHKERRQ(ierr);
458   ierr = PetscFree(status);CHKERRQ(ierr);
459 
460   /* compute a local seq matrix in each processor */
461   /*----------------------------------------------*/
462   /* allocate bi array and free space for accumulating nonzero column info */
463   ierr = PetscMalloc((pn+1)*sizeof(PetscInt),&bi);CHKERRQ(ierr);
464   bi[0] = 0;
465 
466   /* create and initialize a linked list */
467   nlnk = pN+1;
468   ierr = PetscLLCreate(pN,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
469 
470   /* initial FreeSpace size is 2*(num of local nnz(C_seq)) */
471   len = 0;
472   len  = ci[owners[rank+1]] - ci[owners[rank]];
473   ierr = GetMoreSpace((PetscInt)(2*len+1),&free_space);CHKERRQ(ierr);
474   current_space = free_space;
475 
476   /* determine symbolic info for each local row */
477   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
478   nextrow = buf_ri_k + merge->nrecv;
479   nextci  = nextrow + merge->nrecv;
480   for (k=0; k<merge->nrecv; k++){
481     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
482     nrows = *buf_ri_k[k];
483     nextrow[k]  = buf_ri_k[k] + 1;  /* next row number of k-th recved i-structure */
484     nextci[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
485   }
486 
487   ierr = MatPreallocateInitialize(comm,pn,pn,dnz,onz);CHKERRQ(ierr);
488   len = 0;
489   for (i=0; i<pn; i++) {
490     bnzi   = 0;
491     /* add local non-zero cols of this proc's C_seq into lnk */
492     arow   = owners[rank] + i;
493     anzi   = ci[arow+1] - ci[arow];
494     cji    = cj + ci[arow];
495     ierr = PetscLLAdd(anzi,cji,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
496     bnzi += nlnk;
497     /* add received col data into lnk */
498     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
499       if (i == *nextrow[k]) { /* i-th row */
500         anzi = *(nextci[k]+1) - *nextci[k];
501         cji  = buf_rj[k] + *nextci[k];
502         ierr = PetscLLAdd(anzi,cji,pN,nlnk,lnk,lnkbt);CHKERRQ(ierr);
503         bnzi += nlnk;
504         nextrow[k]++; nextci[k]++;
505       }
506     }
507     if (len < bnzi) len = bnzi;  /* =max(bnzi) */
508 
509     /* if free space is not available, make more free space */
510     if (current_space->local_remaining<bnzi) {
511       ierr = GetMoreSpace(current_space->total_array_size,&current_space);CHKERRQ(ierr);
512       nspacedouble++;
513     }
514     /* copy data into free space, then initialize lnk */
515     ierr = PetscLLClean(pN,pN,bnzi,lnk,current_space->array,lnkbt);CHKERRQ(ierr);
516     ierr = MatPreallocateSet(i+owners[rank],bnzi,current_space->array,dnz,onz);CHKERRQ(ierr);
517 
518     current_space->array           += bnzi;
519     current_space->local_used      += bnzi;
520     current_space->local_remaining -= bnzi;
521 
522     bi[i+1] = bi[i] + bnzi;
523   }
524 
525   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
526 
527   ierr = PetscMalloc((bi[pn]+1)*sizeof(PetscInt),&bj);CHKERRQ(ierr);
528   ierr = MakeSpaceContiguous(&free_space,bj);CHKERRQ(ierr);
529   ierr = PetscLLDestroy(lnk,lnkbt);CHKERRQ(ierr);
530 
531   /* create symbolic parallel matrix B_mpi */
532   /*---------------------------------------*/
533   ierr = MatCreate(comm,pn,pn,PETSC_DETERMINE,PETSC_DETERMINE,&B_mpi);CHKERRQ(ierr);
534   ierr = MatSetType(B_mpi,MATMPIAIJ);CHKERRQ(ierr);
535   ierr = MatMPIAIJSetPreallocation(B_mpi,0,dnz,0,onz);CHKERRQ(ierr);
536   ierr = MatPreallocateFinalize(dnz,onz);CHKERRQ(ierr);
537   /* ierr = MatSetOption(B_mpi,MAT_COLUMNS_SORTED);CHKERRQ(ierr); -cause delay? */
538 
539   /* B_mpi is not ready for use - assembly will be done by MatMerge_SeqsToMPINumeric() */
540   B_mpi->assembled     = PETSC_FALSE;
541   B_mpi->ops->destroy  = MatDestroy_MPIAIJ_SeqsToMPI;
542   merge->bi            = bi;
543   merge->bj            = bj;
544   merge->ci            = ci;
545   merge->cj            = cj;
546   merge->buf_ri        = buf_ri;
547   merge->buf_rj        = buf_rj;
548   merge->C_seq         = PETSC_NULL;
549 
550   /* attach the supporting struct to B_mpi for reuse */
551   ierr = PetscObjectContainerCreate(PETSC_COMM_SELF,&container);CHKERRQ(ierr);
552   ierr = PetscObjectContainerSetPointer(container,merge);CHKERRQ(ierr);
553   ierr = PetscObjectCompose((PetscObject)B_mpi,"MatMergeSeqsToMPI",(PetscObject)container);CHKERRQ(ierr);
554   *C = B_mpi;
555 
556   PetscFunctionReturn(0);
557 }
558 
559 #undef __FUNCT__
560 #define __FUNCT__ "MatPtAPNumeric_MPIAIJ_MPIAIJ"
561 PetscErrorCode MatPtAPNumeric_MPIAIJ_MPIAIJ(Mat A,Mat P,Mat C)
562 {
563   PetscErrorCode       ierr;
564   Mat_Merge_SeqsToMPI  *merge;
565   Mat_MatMatMultMPI    *ptap;
566   PetscObjectContainer cont_merge,cont_ptap;
567 
568   PetscInt       flops=0;
569   Mat_MPIAIJ     *a=(Mat_MPIAIJ*)A->data;
570   Mat_SeqAIJ     *ad=(Mat_SeqAIJ*)(a->A)->data,*ao=(Mat_SeqAIJ*)(a->B)->data;
571   Mat_SeqAIJ     *p_loc,*p_oth;
572   PetscInt       *adi=ad->i,*aoi=ao->i,*apj,cstart=a->cstart,cend=a->cend,col;
573   PetscInt       *pi_loc,*pj_loc,*pi_oth,*pj_oth,*pJ,*pjj;
574   PetscInt       i,j,k,anzi,pnzi,apnzj,nextap,pnzj,prow,crow;
575   PetscInt       *cjj;
576   MatScalar      *ada=ad->a,*aoa=ao->a,*apa,*paj,*cseqa,*caj;
577   MatScalar      *pa_loc,*pA,*pa_oth;
578   PetscInt       am=A->m,cN=C->N;
579   PetscInt       nextp,*adj=ad->j,*aoj=ao->j;
580   MPI_Comm             comm=C->comm;
581   PetscMPIInt          size,rank,taga,*len_s;
582   PetscInt             *owners;
583   PetscInt             proc;
584   PetscInt             **buf_ri,**buf_rj;
585   PetscInt             cseqnzi,*bj_i,*bi,*bj,cseqrow,bnzi,nextcseqj; /* bi, bj, ba: for C (mpi mat) */
586   PetscInt             nrows,**buf_ri_k,**nextrow,**nextcseqi;
587   MPI_Request          *s_waits,*r_waits;
588   MPI_Status           *status;
589   MatScalar            **abuf_r,*ba_i;
590   PetscInt             *cseqi,*cseqj;
591   PetscInt             *cseqj_tmp;
592   MatScalar            *cseqa_tmp;
593   PetscInt             stages[2];
594   PetscInt             *apsymi,*apsymj;
595 
596   PetscFunctionBegin;
597   ierr = PetscLogStageRegister(&stages[0],"NumPtAP_local");CHKERRQ(ierr);
598   ierr = PetscLogStageRegister(&stages[1],"NumPtAP_Comm");CHKERRQ(ierr);
599 
600   ierr = MPI_Comm_size(comm,&size);CHKERRQ(ierr);
601   ierr = MPI_Comm_rank(comm,&rank);CHKERRQ(ierr);
602 
603   ierr = PetscObjectQuery((PetscObject)C,"MatMergeSeqsToMPI",(PetscObject *)&cont_merge);CHKERRQ(ierr);
604   if (cont_merge) {
605     ierr  = PetscObjectContainerGetPointer(cont_merge,(void **)&merge);CHKERRQ(ierr);
606   } else {
607     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix C does not posses an object container");
608   }
609   ierr = PetscObjectQuery((PetscObject)P,"MatPtAPMPI",(PetscObject *)&cont_ptap);CHKERRQ(ierr);
610   if (cont_ptap) {
611     ierr  = PetscObjectContainerGetPointer(cont_ptap,(void **)&ptap);CHKERRQ(ierr);
612   } else {
613     SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Matrix P does not posses an object container");
614   }
615 
616   /* get data from symbolic products */
617   p_loc=(Mat_SeqAIJ*)(ptap->B_loc)->data;
618   p_oth=(Mat_SeqAIJ*)(ptap->B_oth)->data;
619   pi_loc=p_loc->i; pj_loc=p_loc->j; pJ=pj_loc; pa_loc=p_loc->a; pA=pa_loc;
620   pi_oth=p_oth->i; pj_oth=p_oth->j; pa_oth=p_oth->a;
621 
622   cseqi = merge->ci; cseqj=merge->cj;
623   ierr  = PetscMalloc((cseqi[cN]+1)*sizeof(MatScalar),&cseqa);CHKERRQ(ierr);
624   ierr  = PetscMemzero(cseqa,cseqi[cN]*sizeof(MatScalar));CHKERRQ(ierr);
625 
626   /* get data from symbolic A*P */
627   ierr = PetscMalloc((ptap->abnz_max+1)*sizeof(MatScalar),&apa);CHKERRQ(ierr);
628   ierr = PetscMemzero(apa,ptap->abnz_max*sizeof(MatScalar));CHKERRQ(ierr);
629 
630   /* compute numeric C_seq=P_loc^T*A_loc*P */
631   ierr = PetscLogStagePush(stages[0]);CHKERRQ(ierr);
632   apsymi = ptap->abi; apsymj = ptap->abj;
633   for (i=0;i<am;i++) {
634     /* form i-th sparse row of A*P */
635     apnzj = apsymi[i+1] - apsymi[i];
636     apj   = apsymj + apsymi[i];
637     /* diagonal portion of A */
638     anzi  = adi[i+1] - adi[i];
639     for (j=0;j<anzi;j++) {
640       prow = *adj;
641       adj++;
642       pnzj = pi_loc[prow+1] - pi_loc[prow];
643       pjj  = pj_loc + pi_loc[prow];
644       paj  = pa_loc + pi_loc[prow];
645       nextp = 0;
646       for (k=0; nextp<pnzj; k++) {
647         if (apj[k] == pjj[nextp]) { /* col of AP == col of P */
648           apa[k] += (*ada)*paj[nextp++];
649         }
650       }
651       flops += 2*pnzj;
652       ada++;
653     }
654     /* off-diagonal portion of A */
655     anzi  = aoi[i+1] - aoi[i];
656     for (j=0;j<anzi;j++) {
657       col = a->garray[*aoj];
658       if (col < cstart){
659         prow = *aoj;
660       } else if (col >= cend){
661         prow = *aoj;
662       } else {
663         SETERRQ(PETSC_ERR_ARG_OUTOFRANGE,"Off-diagonal portion of A has wrong column map");
664       }
665       aoj++;
666       pnzj = pi_oth[prow+1] - pi_oth[prow];
667       pjj  = pj_oth + pi_oth[prow];
668       paj  = pa_oth + pi_oth[prow];
669       nextp = 0;
670       for (k=0; nextp<pnzj; k++) {
671         if (apj[k] == pjj[nextp]) { /* col of AP == col of P */
672           apa[k] += (*aoa)*paj[nextp++];
673         }
674       }
675       flops += 2*pnzj;
676       aoa++;
677     }
678 
679     /* Compute P_loc[i,:]^T*AP[i,:] using outer product */
680     pnzi = pi_loc[i+1] - pi_loc[i];
681     for (j=0;j<pnzi;j++) {
682       nextap = 0;
683       crow   = *pJ++;
684       cjj    = cseqj + cseqi[crow];
685       caj    = cseqa + cseqi[crow];
686       /* Perform sparse axpy operation.  Note cjj includes apj. */
687       for (k=0;nextap<apnzj;k++) {
688         if (cjj[k]==apj[nextap]) caj[k] += (*pA)*apa[nextap++];
689       }
690       flops += 2*apnzj;
691       pA++;
692     }
693     /* zero the current row info for A*P */
694     ierr = PetscMemzero(apa,apnzj*sizeof(MatScalar));CHKERRQ(ierr);
695   }
696 
697   ierr = PetscFree(apa);CHKERRQ(ierr);
698   ierr = PetscLogStagePop();
699 
700   bi     = merge->bi;
701   bj     = merge->bj;
702   buf_ri = merge->buf_ri;
703   buf_rj = merge->buf_rj;
704 
705   ierr   = PetscMalloc(size*sizeof(MPI_Status),&status);CHKERRQ(ierr);
706   ierr   = PetscMapGetGlobalRange(merge->rowmap,&owners);CHKERRQ(ierr);
707 
708   /* send and recv matrix values */
709   /*-----------------------------*/
710   ierr = PetscLogStagePush(stages[1]);CHKERRQ(ierr);
711   len_s  = merge->len_s;
712   ierr = PetscObjectGetNewTag((PetscObject)merge->rowmap,&taga);CHKERRQ(ierr);
713   ierr = PetscPostIrecvScalar(comm,taga,merge->nrecv,merge->id_r,merge->len_r,&abuf_r,&r_waits);CHKERRQ(ierr);
714 
715   ierr = PetscMalloc((merge->nsend+1)*sizeof(MPI_Request),&s_waits);CHKERRQ(ierr);
716   for (proc=0,k=0; proc<size; proc++){
717     if (!len_s[proc]) continue;
718     i = owners[proc];
719     ierr = MPI_Isend(cseqa+cseqi[i],len_s[proc],MPIU_MATSCALAR,proc,taga,comm,s_waits+k);CHKERRQ(ierr);
720     k++;
721   }
722 
723   ierr = MPI_Waitall(merge->nrecv,r_waits,status);CHKERRQ(ierr);
724   ierr = MPI_Waitall(merge->nsend,s_waits,status);CHKERRQ(ierr);
725   ierr = PetscFree(status);CHKERRQ(ierr);
726 
727   ierr = PetscFree(s_waits);CHKERRQ(ierr);
728   ierr = PetscFree(r_waits);CHKERRQ(ierr);
729 
730   /* insert mat values of mpimat */
731   /*----------------------------*/
732   ierr = PetscMalloc(cN*sizeof(MatScalar),&ba_i);CHKERRQ(ierr);
733   ierr = PetscMalloc((3*merge->nrecv+1)*sizeof(PetscInt**),&buf_ri_k);CHKERRQ(ierr);
734   nextrow = buf_ri_k + merge->nrecv;
735   nextcseqi  = nextrow + merge->nrecv;
736 
737   for (k=0; k<merge->nrecv; k++){
738     buf_ri_k[k] = buf_ri[k]; /* beginning of k-th recved i-structure */
739     nrows = *(buf_ri_k[k]);
740     nextrow[k]  = buf_ri_k[k]+1;  /* next row number of k-th recved i-structure */
741     nextcseqi[k]   = buf_ri_k[k] + (nrows + 1);/* poins to the next i-structure of k-th recved i-structure  */
742   }
743 
744   /* set values of ba */
745   for (i=0; i<C->m; i++) {
746     cseqrow = owners[rank] + i; /* global row index of C_seq */
747     bj_i = bj+bi[i];  /* col indices of the i-th row of C */
748     bnzi = bi[i+1] - bi[i];
749     ierr = PetscMemzero(ba_i,bnzi*sizeof(MatScalar));CHKERRQ(ierr);
750 
751     /* add local non-zero vals of this proc's C_seq into ba */
752     cseqnzi = cseqi[cseqrow+1] - cseqi[cseqrow];
753     cseqj_tmp = cseqj + cseqi[cseqrow];
754     cseqa_tmp = cseqa + cseqi[cseqrow];
755     nextcseqj = 0;
756     for (j=0; nextcseqj<cseqnzi; j++){
757       if (*(bj_i + j) == cseqj_tmp[nextcseqj]){ /* bcol == cseqcol */
758         ba_i[j] += cseqa_tmp[nextcseqj++];
759       }
760     }
761 
762     /* add received vals into ba */
763     for (k=0; k<merge->nrecv; k++){ /* k-th received message */
764       /* i-th row */
765       if (i == *nextrow[k]) {
766         cseqnzi   = *(nextcseqi[k]+1) - *nextcseqi[k];
767         cseqj_tmp = buf_rj[k] + *(nextcseqi[k]);
768         cseqa_tmp = abuf_r[k] + *(nextcseqi[k]);
769         nextcseqj = 0;
770         for (j=0; nextcseqj<cseqnzi; j++){
771           if (*(bj_i + j) == cseqj_tmp[nextcseqj]){ /* bcol == cseqcol */
772             ba_i[j] += cseqa_tmp[nextcseqj++];
773           }
774         }
775         nextrow[k]++; nextcseqi[k]++;
776       }
777     }
778     ierr = MatSetValues(C,1,&cseqrow,bnzi,bj_i,ba_i,INSERT_VALUES);CHKERRQ(ierr);
779     flops += 2*bnzi;
780   }
781   ierr = MatAssemblyBegin(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
782   ierr = MatAssemblyEnd(C,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
783   ierr = PetscLogStagePop();CHKERRQ(ierr);
784 
785   ierr = PetscFree(cseqa);CHKERRQ(ierr);
786   ierr = PetscFree(abuf_r);CHKERRQ(ierr);
787   ierr = PetscFree(ba_i);CHKERRQ(ierr);
788   ierr = PetscFree(buf_ri_k);CHKERRQ(ierr);
789   ierr = PetscLogFlops(flops);CHKERRQ(ierr);
790 
791   PetscFunctionReturn(0);
792 }
793