xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision 28b400f66ebc7ae0049166a2294dfcd3df27e64b)
1dd5b3ca6SJunchao Zhang #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2dd5b3ca6SJunchao Zhang 
3ad227feaSJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF,MPI_Datatype,PetscMemType,const void*,PetscMemType,void*,MPI_Op);
4dd5b3ca6SJunchao Zhang 
5dd5b3ca6SJunchao Zhang /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf,PetscInt *nroots,PetscInt *nleaves,const PetscInt **ilocal,const PetscSFNode **iremote)
7dd5b3ca6SJunchao Zhang {
8dd5b3ca6SJunchao Zhang   PetscInt       i,j,k;
9dd5b3ca6SJunchao Zhang   const PetscInt *range;
10dd5b3ca6SJunchao Zhang   PetscMPIInt    size;
11dd5b3ca6SJunchao Zhang 
12dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
135f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size));
14dd5b3ca6SJunchao Zhang   if (nroots)  *nroots  = sf->nroots;
15dd5b3ca6SJunchao Zhang   if (nleaves) *nleaves = sf->nleaves;
16dd5b3ca6SJunchao Zhang   if (ilocal)  *ilocal  = NULL; /* Contiguous leaves */
17dd5b3ca6SJunchao Zhang   if (iremote) {
18dd5b3ca6SJunchao Zhang     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
195f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
205f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMalloc1(sf->nleaves,&sf->remote));
21dd5b3ca6SJunchao Zhang       sf->remote_alloc = sf->remote;
22dd5b3ca6SJunchao Zhang       for (i=0; i<size; i++) {
23dd5b3ca6SJunchao Zhang         for (j=range[i],k=0; j<range[i+1]; j++,k++) {
24dd5b3ca6SJunchao Zhang           sf->remote[j].rank  = i;
25dd5b3ca6SJunchao Zhang           sf->remote[j].index = k;
26dd5b3ca6SJunchao Zhang         }
27dd5b3ca6SJunchao Zhang       }
28dd5b3ca6SJunchao Zhang     }
29dd5b3ca6SJunchao Zhang     *iremote = sf->remote;
30dd5b3ca6SJunchao Zhang   }
31dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
32dd5b3ca6SJunchao Zhang }
33dd5b3ca6SJunchao Zhang 
34dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
35dd5b3ca6SJunchao Zhang {
36dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
37dd5b3ca6SJunchao Zhang   PetscMPIInt        size;
38dd5b3ca6SJunchao Zhang   PetscInt           i;
39dd5b3ca6SJunchao Zhang   const PetscInt     *range;
40dd5b3ca6SJunchao Zhang 
41dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
425f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFSetUp_Allgather(sf));
435f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf),&size));
44dd5b3ca6SJunchao Zhang   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
455f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMalloc1(size,&dat->recvcounts));
465f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMalloc1(size,&dat->displs));
475f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
48dd5b3ca6SJunchao Zhang 
49dd5b3ca6SJunchao Zhang     for (i=0; i<size; i++) {
505f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMPIIntCast(range[i],&dat->displs[i]));
515f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMPIIntCast(range[i+1]-range[i],&dat->recvcounts[i]));
52dd5b3ca6SJunchao Zhang     }
53dd5b3ca6SJunchao Zhang   }
54dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
55dd5b3ca6SJunchao Zhang }
56dd5b3ca6SJunchao Zhang 
57dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
58dd5b3ca6SJunchao Zhang {
59eb02082bSJunchao Zhang   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
6071438e86SJunchao Zhang   PetscSFLink            link = dat->avail,next;
61dd5b3ca6SJunchao Zhang 
62dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
635f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(dat->iranks));
645f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(dat->ioffset));
655f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(dat->irootloc));
665f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(dat->recvcounts));
675f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(dat->displs));
68*28b400f6SJacob Faibussowitsch   PetscCheck(!dat->inuse,PetscObjectComm((PetscObject)sf),PETSC_ERR_ARG_WRONGSTATE,"Outstanding operation has not been completed");
695f80ce2aSJacob Faibussowitsch   for (; link; link=next) {next = link->next; CHKERRQ(PetscSFLinkDestroy(sf,link));}
7071438e86SJunchao Zhang   dat->avail = NULL;
71dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
72dd5b3ca6SJunchao Zhang }
73dd5b3ca6SJunchao Zhang 
74dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
75dd5b3ca6SJunchao Zhang {
76dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
775f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFReset_Allgatherv(sf));
785f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscFree(sf->data));
79dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
80dd5b3ca6SJunchao Zhang }
81dd5b3ca6SJunchao Zhang 
82ad227feaSJunchao Zhang static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata,MPI_Op op)
83dd5b3ca6SJunchao Zhang {
84cd620004SJunchao Zhang   PetscSFLink            link;
85dd5b3ca6SJunchao Zhang   PetscMPIInt            sendcount;
86dd5b3ca6SJunchao Zhang   MPI_Comm               comm;
87cd620004SJunchao Zhang   void                   *rootbuf = NULL,*leafbuf = NULL;
88cd620004SJunchao Zhang   MPI_Request            *req;
89dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
90dd5b3ca6SJunchao Zhang 
91dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
925f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_BCAST,&link));
935f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkPackRootData(sf,link,PETSCSF_REMOTE,rootdata));
945f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */));
955f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
965f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscMPIIntCast(sf->nroots,&sendcount));
975f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_ROOT2LEAF,&rootbuf,&leafbuf,&req,NULL));
985f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_ROOT2LEAF));
995f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPIU_Iallgatherv(rootbuf,sendcount,unit,leafbuf,dat->recvcounts,dat->displs,unit,comm,req));
100855db38dSJunchao Zhang   PetscFunctionReturn(0);
101855db38dSJunchao Zhang }
102855db38dSJunchao Zhang 
103eb02082bSJunchao Zhang static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType leafmtype,const void *leafdata,PetscMemType rootmtype,void *rootdata,MPI_Op op)
104eb02082bSJunchao Zhang {
105cd620004SJunchao Zhang   PetscSFLink            link;
106dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv     *dat = (PetscSF_Allgatherv*)sf->data;
107dd5b3ca6SJunchao Zhang   PetscInt               rstart;
108cd620004SJunchao Zhang   PetscMPIInt            rank,count,recvcount;
109dd5b3ca6SJunchao Zhang   MPI_Comm               comm;
110cd620004SJunchao Zhang   void                   *rootbuf = NULL,*leafbuf = NULL;
111cd620004SJunchao Zhang   MPI_Request            *req;
112dd5b3ca6SJunchao Zhang 
113dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
1145f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_REDUCE,&link));
11583df288dSJunchao Zhang   if (op == MPI_REPLACE) {
116cd620004SJunchao Zhang     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
1175f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscLayoutGetRange(sf->map,&rstart,NULL));
1185f80ce2aSJacob Faibussowitsch     CHKERRQ((*link->Memcpy)(link,rootmtype,rootdata,leafmtype,(const char*)leafdata+(size_t)rstart*link->unitbytes,(size_t)sf->nroots*link->unitbytes));
1195f80ce2aSJacob Faibussowitsch     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) CHKERRQ((*link->SyncStream)(link));
120dd5b3ca6SJunchao Zhang   } else {
121cd620004SJunchao Zhang     /* Reduce leafdata, then scatter to rootdata */
1225f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
1235f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Comm_rank(comm,&rank));
1245f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata));
1255f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf,link,PETSC_TRUE/* device2host before sending */));
1265f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkGetMPIBuffersAndRequests(sf,link,PETSCSF_LEAF2ROOT,&rootbuf,&leafbuf,&req,NULL));
1275f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE],&recvcount));
128cd620004SJunchao Zhang     /* Allocate a separate leaf buffer on rank 0 */
129dd400576SPatrick Sanan     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
1305f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscSFMalloc(sf,link->leafmtype_mpi,sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes,(void**)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
131dd5b3ca6SJunchao Zhang     }
132cd620004SJunchao Zhang     /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
133dd400576SPatrick Sanan     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
1345f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMPIIntCast(sf->nleaves*link->bs,&count));
1355f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkSyncStreamBeforeCallMPI(sf,link,PETSCSF_LEAF2ROOT));
1365f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Reduce(leafbuf,link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],count,link->basicunit,op,0,comm)); /* Must do reduce with MPI builltin datatype basicunit */
1375f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi],dat->recvcounts,dat->displs,unit,rootbuf,recvcount,unit,0,comm,req));
138dd5b3ca6SJunchao Zhang   }
139eb02082bSJunchao Zhang   PetscFunctionReturn(0);
140eb02082bSJunchao Zhang }
141eb02082bSJunchao Zhang 
1429319200aSJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,const void *leafdata,void *rootdata,MPI_Op op)
1439319200aSJunchao Zhang {
1449319200aSJunchao Zhang   PetscSFLink           link;
1459319200aSJunchao Zhang 
1469319200aSJunchao Zhang   PetscFunctionBegin;
1479319200aSJunchao Zhang   if (op == MPI_REPLACE) {
1489319200aSJunchao Zhang     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
1499319200aSJunchao Zhang       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
1509319200aSJunchao Zhang       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
1519319200aSJunchao Zhang       It does a host to device memory copy on rootbuf, wrongly overwritting the results. So we don't overload
1529319200aSJunchao Zhang       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
1539319200aSJunchao Zhang      */
1545f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
1555f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFLinkReclaim(sf,&link));
1569319200aSJunchao Zhang   } else {
1575f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscSFReduceEnd_Basic(sf,unit,leafdata,rootdata,op));
1589319200aSJunchao Zhang   }
1599319200aSJunchao Zhang   PetscFunctionReturn(0);
1609319200aSJunchao Zhang }
1619319200aSJunchao Zhang 
162eb02082bSJunchao Zhang static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,const void *rootdata,PetscMemType leafmtype,void *leafdata)
163eb02082bSJunchao Zhang {
164cd620004SJunchao Zhang   PetscSFLink            link;
165855db38dSJunchao Zhang   PetscMPIInt            rank;
166eb02082bSJunchao Zhang 
167eb02082bSJunchao Zhang   PetscFunctionBegin;
1685f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFBcastBegin_Gatherv(sf,unit,rootmtype,rootdata,leafmtype,leafdata,MPI_REPLACE));
1695f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
1705f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkFinishCommunication(sf,link,PETSCSF_ROOT2LEAF));
1715f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf),&rank));
172dd400576SPatrick Sanan   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) {
1735f80ce2aSJacob Faibussowitsch     CHKERRQ((*link->Memcpy)(link,PETSC_MEMTYPE_DEVICE,leafdata,PETSC_MEMTYPE_HOST,link->leafbuf[PETSC_MEMTYPE_HOST],sf->leafbuflen[PETSCSF_REMOTE]*link->unitbytes));
174855db38dSJunchao Zhang   }
1755f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkReclaim(sf,&link));
176dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
177dd5b3ca6SJunchao Zhang }
178dd5b3ca6SJunchao Zhang 
179dd5b3ca6SJunchao Zhang /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
180dd5b3ca6SJunchao Zhang 
181dd5b3ca6SJunchao Zhang    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
182dd5b3ca6SJunchao Zhang    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
183dd5b3ca6SJunchao Zhang    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
184dd5b3ca6SJunchao Zhang    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
185dd5b3ca6SJunchao Zhang    0,1,2 is 1,3,6 respectively. root is 10.
186dd5b3ca6SJunchao Zhang 
187dd5b3ca6SJunchao Zhang    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
188dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
189dd5b3ca6SJunchao Zhang         Root     1
190dd5b3ca6SJunchao Zhang         Leaf     2       3         4
191dd5b3ca6SJunchao Zhang      Leafupdate  2       3         4
192dd5b3ca6SJunchao Zhang 
193dd5b3ca6SJunchao Zhang    Do MPI_Exscan on leafupdate,
194dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
195dd5b3ca6SJunchao Zhang         Root     1
196dd5b3ca6SJunchao Zhang         Leaf     2       3         4
197dd5b3ca6SJunchao Zhang      Leafupdate  2       2         5
198dd5b3ca6SJunchao Zhang 
199dd5b3ca6SJunchao Zhang    BcastAndOp from root to leafupdate,
200dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
201dd5b3ca6SJunchao Zhang         Root     1
202dd5b3ca6SJunchao Zhang         Leaf     2       3         4
203dd5b3ca6SJunchao Zhang      Leafupdate  3       3         6
204dd5b3ca6SJunchao Zhang 
205dd5b3ca6SJunchao Zhang    Copy root to leafupdate on rank-0
206dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
207dd5b3ca6SJunchao Zhang         Root     1
208dd5b3ca6SJunchao Zhang         Leaf     2       3         4
209dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
210dd5b3ca6SJunchao Zhang 
211dd5b3ca6SJunchao Zhang    Reduce from leaf to root,
212dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
213dd5b3ca6SJunchao Zhang         Root     10
214dd5b3ca6SJunchao Zhang         Leaf     2       3         4
215dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
216dd5b3ca6SJunchao Zhang */
217eb02082bSJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf,MPI_Datatype unit,PetscMemType rootmtype,void *rootdata,PetscMemType leafmtype,const void *leafdata,void *leafupdate,MPI_Op op)
218dd5b3ca6SJunchao Zhang {
219cd620004SJunchao Zhang   PetscSFLink            link;
220dd5b3ca6SJunchao Zhang   MPI_Comm               comm;
221dd5b3ca6SJunchao Zhang   PetscMPIInt            count;
222dd5b3ca6SJunchao Zhang 
223dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
2245f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
2252c71b3e2SJacob Faibussowitsch   PetscCheckFalse(PetscMemTypeDevice(rootmtype) || PetscMemTypeDevice(leafmtype),comm,PETSC_ERR_SUP,"Do FetchAndOp on device");
226dd5b3ca6SJunchao Zhang   /* Copy leafdata to leafupdate */
2275f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkCreate(sf,unit,rootmtype,rootdata,leafmtype,leafdata,op,PETSCSF_FETCH,&link));
2285f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkPackLeafData(sf,link,PETSCSF_REMOTE,leafdata)); /* Sync the device */
2295f80ce2aSJacob Faibussowitsch   CHKERRQ((*link->Memcpy)(link,leafmtype,leafupdate,leafmtype,leafdata,sf->nleaves*link->unitbytes));
2305f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkGetInUse(sf,unit,rootdata,leafdata,PETSC_OWN_POINTER,&link));
231dd5b3ca6SJunchao Zhang 
232dd5b3ca6SJunchao Zhang   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
23383df288dSJunchao Zhang   if (op == MPI_REPLACE) {
234dd5b3ca6SJunchao Zhang     PetscMPIInt size,rank,prev,next;
2355f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Comm_rank(comm,&rank));
2365f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Comm_size(comm,&size));
237dd5b3ca6SJunchao Zhang     prev = rank ?            rank-1 : MPI_PROC_NULL;
238dd5b3ca6SJunchao Zhang     next = (rank < size-1) ? rank+1 : MPI_PROC_NULL;
2395f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMPIIntCast(sf->nleaves,&count));
2405f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Sendrecv_replace(leafupdate,count,unit,next,link->tag,prev,link->tag,comm,MPI_STATUSES_IGNORE));
241cd620004SJunchao Zhang   } else {
2425f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMPIIntCast(sf->nleaves*link->bs,&count));
2435f80ce2aSJacob Faibussowitsch     CHKERRMPI(MPI_Exscan(MPI_IN_PLACE,leafupdate,count,link->basicunit,op,comm));
244cd620004SJunchao Zhang   }
2455f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFLinkReclaim(sf,&link));
2465f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFBcastBegin(sf,unit,rootdata,leafupdate,op));
2475f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFBcastEnd(sf,unit,rootdata,leafupdate,op));
248dd5b3ca6SJunchao Zhang 
249dd5b3ca6SJunchao Zhang   /* Bcast roots to rank 0's leafupdate */
2505f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFBcastToZero_Private(sf,unit,rootdata,leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
251dd5b3ca6SJunchao Zhang 
252dd5b3ca6SJunchao Zhang   /* Reduce leafdata to rootdata */
2535f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFReduceBegin(sf,unit,leafdata,rootdata,op));
254dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
255dd5b3ca6SJunchao Zhang }
256dd5b3ca6SJunchao Zhang 
25700816365SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf,MPI_Datatype unit,void *rootdata,const void *leafdata,void *leafupdate,MPI_Op op)
258dd5b3ca6SJunchao Zhang {
259dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
2605f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFReduceEnd(sf,unit,leafdata,rootdata,op));
261dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
262dd5b3ca6SJunchao Zhang }
263dd5b3ca6SJunchao Zhang 
264dd5b3ca6SJunchao Zhang /* Get root ranks accessing my leaves */
265dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf,PetscInt *nranks,const PetscMPIInt **ranks,const PetscInt **roffset,const PetscInt **rmine,const PetscInt **rremote)
266dd5b3ca6SJunchao Zhang {
267dd5b3ca6SJunchao Zhang   PetscInt       i,j,k,size;
268dd5b3ca6SJunchao Zhang   const PetscInt *range;
269dd5b3ca6SJunchao Zhang 
270dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
271dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
272dd5b3ca6SJunchao Zhang   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
273dd5b3ca6SJunchao Zhang     size = sf->nranks;
2745f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscLayoutGetRanges(sf->map,&range));
2755f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscMalloc4(size,&sf->ranks,size+1,&sf->roffset,sf->nleaves,&sf->rmine,sf->nleaves,&sf->rremote));
276dd5b3ca6SJunchao Zhang     for (i=0; i<size; i++) sf->ranks[i] = i;
2775f80ce2aSJacob Faibussowitsch     CHKERRQ(PetscArraycpy(sf->roffset,range,size+1));
278dd5b3ca6SJunchao Zhang     for (i=0; i<sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
279dd5b3ca6SJunchao Zhang     for (i=0; i<size; i++) {
280dd5b3ca6SJunchao Zhang       for (j=range[i],k=0; j<range[i+1]; j++,k++) sf->rremote[j] = k;
281dd5b3ca6SJunchao Zhang     }
282dd5b3ca6SJunchao Zhang   }
283dd5b3ca6SJunchao Zhang 
284dd5b3ca6SJunchao Zhang   if (nranks)  *nranks  = sf->nranks;
285dd5b3ca6SJunchao Zhang   if (ranks)   *ranks   = sf->ranks;
286dd5b3ca6SJunchao Zhang   if (roffset) *roffset = sf->roffset;
287dd5b3ca6SJunchao Zhang   if (rmine)   *rmine   = sf->rmine;
288dd5b3ca6SJunchao Zhang   if (rremote) *rremote = sf->rremote;
289dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
290dd5b3ca6SJunchao Zhang }
291dd5b3ca6SJunchao Zhang 
292dd5b3ca6SJunchao Zhang /* Get leaf ranks accessing my roots */
293dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf,PetscInt *niranks,const PetscMPIInt **iranks,const PetscInt **ioffset,const PetscInt **irootloc)
294dd5b3ca6SJunchao Zhang {
295dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
296dd5b3ca6SJunchao Zhang   MPI_Comm           comm;
297dd5b3ca6SJunchao Zhang   PetscMPIInt        size,rank;
298dd5b3ca6SJunchao Zhang   PetscInt           i,j;
299dd5b3ca6SJunchao Zhang 
300dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
301dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
3025f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscObjectGetComm((PetscObject)sf,&comm));
3035f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPI_Comm_size(comm,&size));
3045f80ce2aSJacob Faibussowitsch   CHKERRMPI(MPI_Comm_rank(comm,&rank));
305dd5b3ca6SJunchao Zhang   if (niranks) *niranks = size;
306dd5b3ca6SJunchao Zhang 
307dd5b3ca6SJunchao Zhang   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
308dd5b3ca6SJunchao Zhang      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
309dd5b3ca6SJunchao Zhang    */
310dd5b3ca6SJunchao Zhang   if (iranks) {
311dd5b3ca6SJunchao Zhang     if (!dat->iranks) {
3125f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMalloc1(size,&dat->iranks));
313dd5b3ca6SJunchao Zhang       dat->iranks[0] = rank;
314dd5b3ca6SJunchao Zhang       for (i=0,j=1; i<size; i++) {if (i == rank) continue; dat->iranks[j++] = i;}
315dd5b3ca6SJunchao Zhang     }
316dd5b3ca6SJunchao Zhang     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
317dd5b3ca6SJunchao Zhang   }
318dd5b3ca6SJunchao Zhang 
319dd5b3ca6SJunchao Zhang   if (ioffset) {
320dd5b3ca6SJunchao Zhang     if (!dat->ioffset) {
3215f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMalloc1(size+1,&dat->ioffset));
322dd5b3ca6SJunchao Zhang       for (i=0; i<=size; i++) dat->ioffset[i] = i*sf->nroots;
323dd5b3ca6SJunchao Zhang     }
324dd5b3ca6SJunchao Zhang     *ioffset = dat->ioffset;
325dd5b3ca6SJunchao Zhang   }
326dd5b3ca6SJunchao Zhang 
327dd5b3ca6SJunchao Zhang   if (irootloc) {
328dd5b3ca6SJunchao Zhang     if (!dat->irootloc) {
3295f80ce2aSJacob Faibussowitsch       CHKERRQ(PetscMalloc1(sf->nleaves,&dat->irootloc));
330dd5b3ca6SJunchao Zhang       for (i=0; i<size; i++) {
331dd5b3ca6SJunchao Zhang         for (j=0; j<sf->nroots; j++) dat->irootloc[i*sf->nroots+j] = j;
332dd5b3ca6SJunchao Zhang       }
333dd5b3ca6SJunchao Zhang     }
334dd5b3ca6SJunchao Zhang     *irootloc = dat->irootloc;
335dd5b3ca6SJunchao Zhang   }
336dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
337dd5b3ca6SJunchao Zhang }
338dd5b3ca6SJunchao Zhang 
339dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf,PetscSF *out)
340dd5b3ca6SJunchao Zhang {
341dd5b3ca6SJunchao Zhang   PetscInt       i,nroots,nleaves,rstart,*ilocal;
342dd5b3ca6SJunchao Zhang   PetscSFNode    *iremote;
343dd5b3ca6SJunchao Zhang   PetscSF        lsf;
344dd5b3ca6SJunchao Zhang 
345dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
346eb02082bSJunchao Zhang   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
347eb02082bSJunchao Zhang   nroots  = nleaves;
3485f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscMalloc1(nleaves,&ilocal));
3495f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscMalloc1(nleaves,&iremote));
3505f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscLayoutGetRange(sf->map,&rstart,NULL));
351dd5b3ca6SJunchao Zhang 
352dd5b3ca6SJunchao Zhang   for (i=0; i<nleaves; i++) {
353dd5b3ca6SJunchao Zhang     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
354dd5b3ca6SJunchao Zhang     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
355dd5b3ca6SJunchao Zhang     iremote[i].index = i;          /* root index */
356dd5b3ca6SJunchao Zhang   }
357dd5b3ca6SJunchao Zhang 
3585f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFCreate(PETSC_COMM_SELF,&lsf));
3595f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFSetGraph(lsf,nroots,nleaves,ilocal,PETSC_OWN_POINTER,iremote,PETSC_OWN_POINTER));
3605f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscSFSetUp(lsf));
361dd5b3ca6SJunchao Zhang   *out = lsf;
362dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
363dd5b3ca6SJunchao Zhang }
364dd5b3ca6SJunchao Zhang 
365dd5b3ca6SJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
366dd5b3ca6SJunchao Zhang {
367dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv*)sf->data;
368dd5b3ca6SJunchao Zhang 
369dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
370ad227feaSJunchao Zhang   sf->ops->BcastEnd        = PetscSFBcastEnd_Basic;
3719319200aSJunchao Zhang   sf->ops->ReduceEnd       = PetscSFReduceEnd_Allgatherv;
372cd620004SJunchao Zhang 
373dd5b3ca6SJunchao Zhang   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
374dd5b3ca6SJunchao Zhang   sf->ops->Reset           = PetscSFReset_Allgatherv;
375dd5b3ca6SJunchao Zhang   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
376dd5b3ca6SJunchao Zhang   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
377dd5b3ca6SJunchao Zhang   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
378dd5b3ca6SJunchao Zhang   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
379ad227feaSJunchao Zhang   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
380dd5b3ca6SJunchao Zhang   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
381dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
382dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
383dd5b3ca6SJunchao Zhang   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
384dd5b3ca6SJunchao Zhang   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
385dd5b3ca6SJunchao Zhang 
3865f80ce2aSJacob Faibussowitsch   CHKERRQ(PetscNewLog(sf,&dat));
387dd5b3ca6SJunchao Zhang   sf->data = (void*)dat;
388dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
389dd5b3ca6SJunchao Zhang }
390