xref: /petsc/src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.c (revision d71ae5a4db6382e7f06317b8d368875286fe9008)
1dd5b3ca6SJunchao Zhang #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h>
2dd5b3ca6SJunchao Zhang 
3ad227feaSJunchao Zhang PETSC_INTERN PetscErrorCode PetscSFBcastBegin_Gatherv(PetscSF, MPI_Datatype, PetscMemType, const void *, PetscMemType, void *, MPI_Op);
4dd5b3ca6SJunchao Zhang 
5dd5b3ca6SJunchao Zhang /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */
6*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote)
7*d71ae5a4SJacob Faibussowitsch {
8dd5b3ca6SJunchao Zhang   PetscInt        i, j, k;
9dd5b3ca6SJunchao Zhang   const PetscInt *range;
10dd5b3ca6SJunchao Zhang   PetscMPIInt     size;
11dd5b3ca6SJunchao Zhang 
12dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
139566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
14dd5b3ca6SJunchao Zhang   if (nroots) *nroots = sf->nroots;
15dd5b3ca6SJunchao Zhang   if (nleaves) *nleaves = sf->nleaves;
16dd5b3ca6SJunchao Zhang   if (ilocal) *ilocal = NULL; /* Contiguous leaves */
17dd5b3ca6SJunchao Zhang   if (iremote) {
18dd5b3ca6SJunchao Zhang     if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */
199566063dSJacob Faibussowitsch       PetscCall(PetscLayoutGetRanges(sf->map, &range));
209566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(sf->nleaves, &sf->remote));
21dd5b3ca6SJunchao Zhang       sf->remote_alloc = sf->remote;
22dd5b3ca6SJunchao Zhang       for (i = 0; i < size; i++) {
23dd5b3ca6SJunchao Zhang         for (j = range[i], k = 0; j < range[i + 1]; j++, k++) {
24dd5b3ca6SJunchao Zhang           sf->remote[j].rank  = i;
25dd5b3ca6SJunchao Zhang           sf->remote[j].index = k;
26dd5b3ca6SJunchao Zhang         }
27dd5b3ca6SJunchao Zhang       }
28dd5b3ca6SJunchao Zhang     }
29dd5b3ca6SJunchao Zhang     *iremote = sf->remote;
30dd5b3ca6SJunchao Zhang   }
31dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
32dd5b3ca6SJunchao Zhang }
33dd5b3ca6SJunchao Zhang 
34*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf)
35*d71ae5a4SJacob Faibussowitsch {
36dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
37dd5b3ca6SJunchao Zhang   PetscMPIInt         size;
38dd5b3ca6SJunchao Zhang   PetscInt            i;
39dd5b3ca6SJunchao Zhang   const PetscInt     *range;
40dd5b3ca6SJunchao Zhang 
41dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
429566063dSJacob Faibussowitsch   PetscCall(PetscSFSetUp_Allgather(sf));
439566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size));
44dd5b3ca6SJunchao Zhang   if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */
459566063dSJacob Faibussowitsch     PetscCall(PetscMalloc1(size, &dat->recvcounts));
469566063dSJacob Faibussowitsch     PetscCall(PetscMalloc1(size, &dat->displs));
479566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRanges(sf->map, &range));
48dd5b3ca6SJunchao Zhang 
49dd5b3ca6SJunchao Zhang     for (i = 0; i < size; i++) {
509566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(range[i], &dat->displs[i]));
519566063dSJacob Faibussowitsch       PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i]));
52dd5b3ca6SJunchao Zhang     }
53dd5b3ca6SJunchao Zhang   }
54dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
55dd5b3ca6SJunchao Zhang }
56dd5b3ca6SJunchao Zhang 
57*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf)
58*d71ae5a4SJacob Faibussowitsch {
59eb02082bSJunchao Zhang   PetscSF_Allgatherv *dat  = (PetscSF_Allgatherv *)sf->data;
6071438e86SJunchao Zhang   PetscSFLink         link = dat->avail, next;
61dd5b3ca6SJunchao Zhang 
62dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
639566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->iranks));
649566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->ioffset));
659566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->irootloc));
669566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->recvcounts));
679566063dSJacob Faibussowitsch   PetscCall(PetscFree(dat->displs));
6828b400f6SJacob Faibussowitsch   PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
699371c9d4SSatish Balay   for (; link; link = next) {
709371c9d4SSatish Balay     next = link->next;
719371c9d4SSatish Balay     PetscCall(PetscSFLinkDestroy(sf, link));
729371c9d4SSatish Balay   }
7371438e86SJunchao Zhang   dat->avail = NULL;
74dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
75dd5b3ca6SJunchao Zhang }
76dd5b3ca6SJunchao Zhang 
77*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf)
78*d71ae5a4SJacob Faibussowitsch {
79dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
809566063dSJacob Faibussowitsch   PetscCall(PetscSFReset_Allgatherv(sf));
819566063dSJacob Faibussowitsch   PetscCall(PetscFree(sf->data));
82dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
83dd5b3ca6SJunchao Zhang }
84dd5b3ca6SJunchao Zhang 
85*d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op)
86*d71ae5a4SJacob Faibussowitsch {
87cd620004SJunchao Zhang   PetscSFLink         link;
88dd5b3ca6SJunchao Zhang   PetscMPIInt         sendcount;
89dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
90cd620004SJunchao Zhang   void               *rootbuf = NULL, *leafbuf = NULL;
91cd620004SJunchao Zhang   MPI_Request        *req;
92dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
93dd5b3ca6SJunchao Zhang 
94dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
959566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link));
969566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata));
979566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
989566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
999566063dSJacob Faibussowitsch   PetscCall(PetscMPIIntCast(sf->nroots, &sendcount));
1009566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL));
1019566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_ROOT2LEAF));
1029566063dSJacob Faibussowitsch   PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req));
103855db38dSJunchao Zhang   PetscFunctionReturn(0);
104855db38dSJunchao Zhang }
105855db38dSJunchao Zhang 
106*d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op)
107*d71ae5a4SJacob Faibussowitsch {
108cd620004SJunchao Zhang   PetscSFLink         link;
109dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
110dd5b3ca6SJunchao Zhang   PetscInt            rstart;
111cd620004SJunchao Zhang   PetscMPIInt         rank, count, recvcount;
112dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
113cd620004SJunchao Zhang   void               *rootbuf = NULL, *leafbuf = NULL;
114cd620004SJunchao Zhang   MPI_Request        *req;
115dd5b3ca6SJunchao Zhang 
116dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
1179566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link));
11883df288dSJunchao Zhang   if (op == MPI_REPLACE) {
119cd620004SJunchao Zhang     /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */
1209566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
1219566063dSJacob Faibussowitsch     PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes));
1229566063dSJacob Faibussowitsch     if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link));
123dd5b3ca6SJunchao Zhang   } else {
124cd620004SJunchao Zhang     /* Reduce leafdata, then scatter to rootdata */
1259566063dSJacob Faibussowitsch     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
1269566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Comm_rank(comm, &rank));
1279566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata));
1289566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
1299566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL));
1309566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount));
131cd620004SJunchao Zhang     /* Allocate a separate leaf buffer on rank 0 */
132dd400576SPatrick Sanan     if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) {
1339566063dSJacob Faibussowitsch       PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]));
134dd5b3ca6SJunchao Zhang     }
135cd620004SJunchao Zhang     /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */
136dd400576SPatrick Sanan     if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE;
1379566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
1389566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, PETSCSF_LEAF2ROOT));
1399566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builltin datatype basicunit */
1409566063dSJacob Faibussowitsch     PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req));
141dd5b3ca6SJunchao Zhang   }
142eb02082bSJunchao Zhang   PetscFunctionReturn(0);
143eb02082bSJunchao Zhang }
144eb02082bSJunchao Zhang 
145*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op)
146*d71ae5a4SJacob Faibussowitsch {
1479319200aSJunchao Zhang   PetscSFLink link;
1489319200aSJunchao Zhang 
1499319200aSJunchao Zhang   PetscFunctionBegin;
1509319200aSJunchao Zhang   if (op == MPI_REPLACE) {
1519319200aSJunchao Zhang     /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v),
1529319200aSJunchao Zhang       we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication()
1539319200aSJunchao Zhang       of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI().
1546aad120cSJose E. Roman       It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload
1559319200aSJunchao Zhang       PetscSFReduceEnd_Basic() in this case, and just reclaim the link.
1569319200aSJunchao Zhang      */
1579566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
1589566063dSJacob Faibussowitsch     PetscCall(PetscSFLinkReclaim(sf, &link));
1599319200aSJunchao Zhang   } else {
1609566063dSJacob Faibussowitsch     PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op));
1619319200aSJunchao Zhang   }
1629319200aSJunchao Zhang   PetscFunctionReturn(0);
1639319200aSJunchao Zhang }
1649319200aSJunchao Zhang 
165*d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata)
166*d71ae5a4SJacob Faibussowitsch {
167cd620004SJunchao Zhang   PetscSFLink link;
168855db38dSJunchao Zhang   PetscMPIInt rank;
169eb02082bSJunchao Zhang 
170eb02082bSJunchao Zhang   PetscFunctionBegin;
1719566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastBegin_Gatherv(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE));
1729566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
1739566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF));
1749566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank));
17548a46eb9SPierre Jolivet   if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes));
1769566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkReclaim(sf, &link));
177dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
178dd5b3ca6SJunchao Zhang }
179dd5b3ca6SJunchao Zhang 
180dd5b3ca6SJunchao Zhang /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation).
181dd5b3ca6SJunchao Zhang 
182dd5b3ca6SJunchao Zhang    Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected
183dd5b3ca6SJunchao Zhang    to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1
184dd5b3ca6SJunchao Zhang    in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates
185dd5b3ca6SJunchao Zhang    root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10.  At the end, leafupdate on rank
186dd5b3ca6SJunchao Zhang    0,1,2 is 1,3,6 respectively. root is 10.
187dd5b3ca6SJunchao Zhang 
188dd5b3ca6SJunchao Zhang    We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate
189dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
190dd5b3ca6SJunchao Zhang         Root     1
191dd5b3ca6SJunchao Zhang         Leaf     2       3         4
192dd5b3ca6SJunchao Zhang      Leafupdate  2       3         4
193dd5b3ca6SJunchao Zhang 
194dd5b3ca6SJunchao Zhang    Do MPI_Exscan on leafupdate,
195dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
196dd5b3ca6SJunchao Zhang         Root     1
197dd5b3ca6SJunchao Zhang         Leaf     2       3         4
198dd5b3ca6SJunchao Zhang      Leafupdate  2       2         5
199dd5b3ca6SJunchao Zhang 
200dd5b3ca6SJunchao Zhang    BcastAndOp from root to leafupdate,
201dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
202dd5b3ca6SJunchao Zhang         Root     1
203dd5b3ca6SJunchao Zhang         Leaf     2       3         4
204dd5b3ca6SJunchao Zhang      Leafupdate  3       3         6
205dd5b3ca6SJunchao Zhang 
206dd5b3ca6SJunchao Zhang    Copy root to leafupdate on rank-0
207dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
208dd5b3ca6SJunchao Zhang         Root     1
209dd5b3ca6SJunchao Zhang         Leaf     2       3         4
210dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
211dd5b3ca6SJunchao Zhang 
212dd5b3ca6SJunchao Zhang    Reduce from leaf to root,
213dd5b3ca6SJunchao Zhang              rank-0   rank-1    rank-2
214dd5b3ca6SJunchao Zhang         Root     10
215dd5b3ca6SJunchao Zhang         Leaf     2       3         4
216dd5b3ca6SJunchao Zhang      Leafupdate  1       3         6
217dd5b3ca6SJunchao Zhang */
218*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op)
219*d71ae5a4SJacob Faibussowitsch {
220cd620004SJunchao Zhang   PetscSFLink link;
221dd5b3ca6SJunchao Zhang   MPI_Comm    comm;
222dd5b3ca6SJunchao Zhang   PetscMPIInt count;
223dd5b3ca6SJunchao Zhang 
224dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
2259566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
22608401ef6SPierre Jolivet   PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device");
227dd5b3ca6SJunchao Zhang   /* Copy leafdata to leafupdate */
2289566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link));
2299566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */
2309566063dSJacob Faibussowitsch   PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes));
2319566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link));
232dd5b3ca6SJunchao Zhang 
233dd5b3ca6SJunchao Zhang   /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */
23483df288dSJunchao Zhang   if (op == MPI_REPLACE) {
235dd5b3ca6SJunchao Zhang     PetscMPIInt size, rank, prev, next;
2369566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Comm_rank(comm, &rank));
2379566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Comm_size(comm, &size));
238dd5b3ca6SJunchao Zhang     prev = rank ? rank - 1 : MPI_PROC_NULL;
239dd5b3ca6SJunchao Zhang     next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL;
2409566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(sf->nleaves, &count));
2419566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE));
242cd620004SJunchao Zhang   } else {
2439566063dSJacob Faibussowitsch     PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count));
2449566063dSJacob Faibussowitsch     PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm));
245cd620004SJunchao Zhang   }
2469566063dSJacob Faibussowitsch   PetscCall(PetscSFLinkReclaim(sf, &link));
2479566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op));
2489566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op));
249dd5b3ca6SJunchao Zhang 
250dd5b3ca6SJunchao Zhang   /* Bcast roots to rank 0's leafupdate */
2519566063dSJacob Faibussowitsch   PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */
252dd5b3ca6SJunchao Zhang 
253dd5b3ca6SJunchao Zhang   /* Reduce leafdata to rootdata */
2549566063dSJacob Faibussowitsch   PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op));
255dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
256dd5b3ca6SJunchao Zhang }
257dd5b3ca6SJunchao Zhang 
258*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op)
259*d71ae5a4SJacob Faibussowitsch {
260dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
2619566063dSJacob Faibussowitsch   PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op));
262dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
263dd5b3ca6SJunchao Zhang }
264dd5b3ca6SJunchao Zhang 
265dd5b3ca6SJunchao Zhang /* Get root ranks accessing my leaves */
266*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote)
267*d71ae5a4SJacob Faibussowitsch {
268dd5b3ca6SJunchao Zhang   PetscInt        i, j, k, size;
269dd5b3ca6SJunchao Zhang   const PetscInt *range;
270dd5b3ca6SJunchao Zhang 
271dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
272dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
273dd5b3ca6SJunchao Zhang   if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */
274dd5b3ca6SJunchao Zhang     size = sf->nranks;
2759566063dSJacob Faibussowitsch     PetscCall(PetscLayoutGetRanges(sf->map, &range));
2769566063dSJacob Faibussowitsch     PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote));
277dd5b3ca6SJunchao Zhang     for (i = 0; i < size; i++) sf->ranks[i] = i;
2789566063dSJacob Faibussowitsch     PetscCall(PetscArraycpy(sf->roffset, range, size + 1));
279dd5b3ca6SJunchao Zhang     for (i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */
280dd5b3ca6SJunchao Zhang     for (i = 0; i < size; i++) {
281dd5b3ca6SJunchao Zhang       for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k;
282dd5b3ca6SJunchao Zhang     }
283dd5b3ca6SJunchao Zhang   }
284dd5b3ca6SJunchao Zhang 
285dd5b3ca6SJunchao Zhang   if (nranks) *nranks = sf->nranks;
286dd5b3ca6SJunchao Zhang   if (ranks) *ranks = sf->ranks;
287dd5b3ca6SJunchao Zhang   if (roffset) *roffset = sf->roffset;
288dd5b3ca6SJunchao Zhang   if (rmine) *rmine = sf->rmine;
289dd5b3ca6SJunchao Zhang   if (rremote) *rremote = sf->rremote;
290dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
291dd5b3ca6SJunchao Zhang }
292dd5b3ca6SJunchao Zhang 
293dd5b3ca6SJunchao Zhang /* Get leaf ranks accessing my roots */
294*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc)
295*d71ae5a4SJacob Faibussowitsch {
296dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
297dd5b3ca6SJunchao Zhang   MPI_Comm            comm;
298dd5b3ca6SJunchao Zhang   PetscMPIInt         size, rank;
299dd5b3ca6SJunchao Zhang   PetscInt            i, j;
300dd5b3ca6SJunchao Zhang 
301dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
302dd5b3ca6SJunchao Zhang   /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */
3039566063dSJacob Faibussowitsch   PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
3049566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_size(comm, &size));
3059566063dSJacob Faibussowitsch   PetscCallMPI(MPI_Comm_rank(comm, &rank));
306dd5b3ca6SJunchao Zhang   if (niranks) *niranks = size;
307dd5b3ca6SJunchao Zhang 
308dd5b3ca6SJunchao Zhang   /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and
309dd5b3ca6SJunchao Zhang      sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why.
310dd5b3ca6SJunchao Zhang    */
311dd5b3ca6SJunchao Zhang   if (iranks) {
312dd5b3ca6SJunchao Zhang     if (!dat->iranks) {
3139566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(size, &dat->iranks));
314dd5b3ca6SJunchao Zhang       dat->iranks[0] = rank;
3159371c9d4SSatish Balay       for (i = 0, j = 1; i < size; i++) {
3169371c9d4SSatish Balay         if (i == rank) continue;
3179371c9d4SSatish Balay         dat->iranks[j++] = i;
3189371c9d4SSatish Balay       }
319dd5b3ca6SJunchao Zhang     }
320dd5b3ca6SJunchao Zhang     *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNewLog */
321dd5b3ca6SJunchao Zhang   }
322dd5b3ca6SJunchao Zhang 
323dd5b3ca6SJunchao Zhang   if (ioffset) {
324dd5b3ca6SJunchao Zhang     if (!dat->ioffset) {
3259566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(size + 1, &dat->ioffset));
326dd5b3ca6SJunchao Zhang       for (i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots;
327dd5b3ca6SJunchao Zhang     }
328dd5b3ca6SJunchao Zhang     *ioffset = dat->ioffset;
329dd5b3ca6SJunchao Zhang   }
330dd5b3ca6SJunchao Zhang 
331dd5b3ca6SJunchao Zhang   if (irootloc) {
332dd5b3ca6SJunchao Zhang     if (!dat->irootloc) {
3339566063dSJacob Faibussowitsch       PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc));
334dd5b3ca6SJunchao Zhang       for (i = 0; i < size; i++) {
335dd5b3ca6SJunchao Zhang         for (j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j;
336dd5b3ca6SJunchao Zhang       }
337dd5b3ca6SJunchao Zhang     }
338dd5b3ca6SJunchao Zhang     *irootloc = dat->irootloc;
339dd5b3ca6SJunchao Zhang   }
340dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
341dd5b3ca6SJunchao Zhang }
342dd5b3ca6SJunchao Zhang 
343*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out)
344*d71ae5a4SJacob Faibussowitsch {
345dd5b3ca6SJunchao Zhang   PetscInt     i, nroots, nleaves, rstart, *ilocal;
346dd5b3ca6SJunchao Zhang   PetscSFNode *iremote;
347dd5b3ca6SJunchao Zhang   PetscSF      lsf;
348dd5b3ca6SJunchao Zhang 
349dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
350eb02082bSJunchao Zhang   nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */
351eb02082bSJunchao Zhang   nroots  = nleaves;
3529566063dSJacob Faibussowitsch   PetscCall(PetscMalloc1(nleaves, &ilocal));
3539566063dSJacob Faibussowitsch   PetscCall(PetscMalloc1(nleaves, &iremote));
3549566063dSJacob Faibussowitsch   PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL));
355dd5b3ca6SJunchao Zhang 
356dd5b3ca6SJunchao Zhang   for (i = 0; i < nleaves; i++) {
357dd5b3ca6SJunchao Zhang     ilocal[i]        = rstart + i; /* lsf does not change leave indices */
358dd5b3ca6SJunchao Zhang     iremote[i].rank  = 0;          /* rank in PETSC_COMM_SELF */
359dd5b3ca6SJunchao Zhang     iremote[i].index = i;          /* root index */
360dd5b3ca6SJunchao Zhang   }
361dd5b3ca6SJunchao Zhang 
3629566063dSJacob Faibussowitsch   PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf));
3639566063dSJacob Faibussowitsch   PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER));
3649566063dSJacob Faibussowitsch   PetscCall(PetscSFSetUp(lsf));
365dd5b3ca6SJunchao Zhang   *out = lsf;
366dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
367dd5b3ca6SJunchao Zhang }
368dd5b3ca6SJunchao Zhang 
369*d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf)
370*d71ae5a4SJacob Faibussowitsch {
371dd5b3ca6SJunchao Zhang   PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data;
372dd5b3ca6SJunchao Zhang 
373dd5b3ca6SJunchao Zhang   PetscFunctionBegin;
374ad227feaSJunchao Zhang   sf->ops->BcastEnd  = PetscSFBcastEnd_Basic;
3759319200aSJunchao Zhang   sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv;
376cd620004SJunchao Zhang 
377dd5b3ca6SJunchao Zhang   sf->ops->SetUp           = PetscSFSetUp_Allgatherv;
378dd5b3ca6SJunchao Zhang   sf->ops->Reset           = PetscSFReset_Allgatherv;
379dd5b3ca6SJunchao Zhang   sf->ops->Destroy         = PetscSFDestroy_Allgatherv;
380dd5b3ca6SJunchao Zhang   sf->ops->GetRootRanks    = PetscSFGetRootRanks_Allgatherv;
381dd5b3ca6SJunchao Zhang   sf->ops->GetLeafRanks    = PetscSFGetLeafRanks_Allgatherv;
382dd5b3ca6SJunchao Zhang   sf->ops->GetGraph        = PetscSFGetGraph_Allgatherv;
383ad227feaSJunchao Zhang   sf->ops->BcastBegin      = PetscSFBcastBegin_Allgatherv;
384dd5b3ca6SJunchao Zhang   sf->ops->ReduceBegin     = PetscSFReduceBegin_Allgatherv;
385dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv;
386dd5b3ca6SJunchao Zhang   sf->ops->FetchAndOpEnd   = PetscSFFetchAndOpEnd_Allgatherv;
387dd5b3ca6SJunchao Zhang   sf->ops->CreateLocalSF   = PetscSFCreateLocalSF_Allgatherv;
388dd5b3ca6SJunchao Zhang   sf->ops->BcastToZero     = PetscSFBcastToZero_Allgatherv;
389dd5b3ca6SJunchao Zhang 
3904dfa11a4SJacob Faibussowitsch   PetscCall(PetscNew(&dat));
391dd5b3ca6SJunchao Zhang   sf->data = (void *)dat;
392dd5b3ca6SJunchao Zhang   PetscFunctionReturn(0);
393dd5b3ca6SJunchao Zhang }
394