1dd5b3ca6SJunchao Zhang #include <../src/vec/is/sf/impls/basic/allgatherv/sfallgatherv.h> 2dd5b3ca6SJunchao Zhang 3dd5b3ca6SJunchao Zhang /* PetscSFGetGraph is non-collective. An implementation should not have collective calls */ 4d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFGetGraph_Allgatherv(PetscSF sf, PetscInt *nroots, PetscInt *nleaves, const PetscInt **ilocal, const PetscSFNode **iremote) 5d71ae5a4SJacob Faibussowitsch { 66497c311SBarry Smith PetscInt j, k; 7dd5b3ca6SJunchao Zhang const PetscInt *range; 8dd5b3ca6SJunchao Zhang PetscMPIInt size; 9dd5b3ca6SJunchao Zhang 10dd5b3ca6SJunchao Zhang PetscFunctionBegin; 119566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_size(PetscObjectComm((PetscObject)sf), &size)); 12dd5b3ca6SJunchao Zhang if (nroots) *nroots = sf->nroots; 13dd5b3ca6SJunchao Zhang if (nleaves) *nleaves = sf->nleaves; 14dd5b3ca6SJunchao Zhang if (ilocal) *ilocal = NULL; /* Contiguous leaves */ 15dd5b3ca6SJunchao Zhang if (iremote) { 16dd5b3ca6SJunchao Zhang if (!sf->remote && sf->nleaves) { /* The && sf->nleaves makes sfgatherv able to inherit this routine */ 179566063dSJacob Faibussowitsch PetscCall(PetscLayoutGetRanges(sf->map, &range)); 189566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(sf->nleaves, &sf->remote)); 19dd5b3ca6SJunchao Zhang sf->remote_alloc = sf->remote; 206497c311SBarry Smith for (PetscMPIInt i = 0; i < size; i++) { 21dd5b3ca6SJunchao Zhang for (j = range[i], k = 0; j < range[i + 1]; j++, k++) { 22dd5b3ca6SJunchao Zhang sf->remote[j].rank = i; 23dd5b3ca6SJunchao Zhang sf->remote[j].index = k; 24dd5b3ca6SJunchao Zhang } 25dd5b3ca6SJunchao Zhang } 26dd5b3ca6SJunchao Zhang } 27dd5b3ca6SJunchao Zhang *iremote = sf->remote; 28dd5b3ca6SJunchao Zhang } 293ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 30dd5b3ca6SJunchao Zhang } 31dd5b3ca6SJunchao Zhang 32d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFSetUp_Allgatherv(PetscSF sf) 33d71ae5a4SJacob Faibussowitsch { 34dd5b3ca6SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 35dd5b3ca6SJunchao Zhang PetscMPIInt size; 36dd5b3ca6SJunchao Zhang PetscInt i; 37dd5b3ca6SJunchao Zhang const PetscInt *range; 3866100624SStefano Zampini MPI_Comm comm; 39dd5b3ca6SJunchao Zhang 40dd5b3ca6SJunchao Zhang PetscFunctionBegin; 419566063dSJacob Faibussowitsch PetscCall(PetscSFSetUp_Allgather(sf)); 4266100624SStefano Zampini PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 4366100624SStefano Zampini PetscCallMPI(MPI_Comm_size(comm, &size)); 44dd5b3ca6SJunchao Zhang if (sf->nleaves) { /* This if (sf->nleaves) test makes sfgatherv able to inherit this routine */ 4566100624SStefano Zampini PetscBool isallgatherv = PETSC_FALSE; 4666100624SStefano Zampini 479566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(size, &dat->recvcounts)); 489566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(size, &dat->displs)); 499566063dSJacob Faibussowitsch PetscCall(PetscLayoutGetRanges(sf->map, &range)); 50dd5b3ca6SJunchao Zhang 51dd5b3ca6SJunchao Zhang for (i = 0; i < size; i++) { 529566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(range[i], &dat->displs[i])); 539566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(range[i + 1] - range[i], &dat->recvcounts[i])); 54dd5b3ca6SJunchao Zhang } 5566100624SStefano Zampini 5666100624SStefano Zampini /* check if we actually have a one-to-all pattern */ 5766100624SStefano Zampini PetscCall(PetscObjectTypeCompare((PetscObject)sf, PETSCSFALLGATHERV, &isallgatherv)); 5866100624SStefano Zampini if (isallgatherv) { 5966100624SStefano Zampini PetscMPIInt rank, nRanksWithZeroRoots; 6066100624SStefano Zampini 6166100624SStefano Zampini nRanksWithZeroRoots = (sf->nroots == 0) ? 1 : 0; /* I have no roots */ 626a210b70SBarry Smith PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &nRanksWithZeroRoots, 1, MPI_INT, MPI_SUM, comm)); 6366100624SStefano Zampini if (nRanksWithZeroRoots == size - 1) { /* Only one rank has roots, which indicates a bcast pattern */ 6466100624SStefano Zampini dat->bcast_pattern = PETSC_TRUE; 6566100624SStefano Zampini PetscCallMPI(MPI_Comm_rank(comm, &rank)); 6666100624SStefano Zampini dat->bcast_root = sf->nroots > 0 ? rank : -1; 676a210b70SBarry Smith PetscCallMPI(MPIU_Allreduce(MPI_IN_PLACE, &dat->bcast_root, 1, MPI_INT, MPI_MAX, comm)); 6866100624SStefano Zampini } 6966100624SStefano Zampini } 70dd5b3ca6SJunchao Zhang } 713ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 72dd5b3ca6SJunchao Zhang } 73dd5b3ca6SJunchao Zhang 74d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReset_Allgatherv(PetscSF sf) 75d71ae5a4SJacob Faibussowitsch { 76eb02082bSJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 7771438e86SJunchao Zhang PetscSFLink link = dat->avail, next; 78dd5b3ca6SJunchao Zhang 79dd5b3ca6SJunchao Zhang PetscFunctionBegin; 809566063dSJacob Faibussowitsch PetscCall(PetscFree(dat->iranks)); 819566063dSJacob Faibussowitsch PetscCall(PetscFree(dat->ioffset)); 829566063dSJacob Faibussowitsch PetscCall(PetscFree(dat->irootloc)); 839566063dSJacob Faibussowitsch PetscCall(PetscFree(dat->recvcounts)); 849566063dSJacob Faibussowitsch PetscCall(PetscFree(dat->displs)); 8528b400f6SJacob Faibussowitsch PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed"); 869371c9d4SSatish Balay for (; link; link = next) { 879371c9d4SSatish Balay next = link->next; 889371c9d4SSatish Balay PetscCall(PetscSFLinkDestroy(sf, link)); 899371c9d4SSatish Balay } 9071438e86SJunchao Zhang dat->avail = NULL; 913ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 92dd5b3ca6SJunchao Zhang } 93dd5b3ca6SJunchao Zhang 94d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFDestroy_Allgatherv(PetscSF sf) 95d71ae5a4SJacob Faibussowitsch { 96dd5b3ca6SJunchao Zhang PetscFunctionBegin; 979566063dSJacob Faibussowitsch PetscCall(PetscSFReset_Allgatherv(sf)); 989566063dSJacob Faibussowitsch PetscCall(PetscFree(sf->data)); 993ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 100dd5b3ca6SJunchao Zhang } 101dd5b3ca6SJunchao Zhang 102d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata, MPI_Op op) 103d71ae5a4SJacob Faibussowitsch { 104cd620004SJunchao Zhang PetscSFLink link; 10560b1fa21SPierre Jolivet PetscMPIInt sendcount, rank, nleaves; 106dd5b3ca6SJunchao Zhang MPI_Comm comm; 107cd620004SJunchao Zhang void *rootbuf = NULL, *leafbuf = NULL; 108f5d27ee7SJunchao Zhang MPI_Request *req = NULL; 109dd5b3ca6SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 110dd5b3ca6SJunchao Zhang 111dd5b3ca6SJunchao Zhang PetscFunctionBegin; 1129566063dSJacob Faibussowitsch PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_BCAST, &link)); 1139566063dSJacob Faibussowitsch PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata)); 1149566063dSJacob Faibussowitsch PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */)); 1159566063dSJacob Faibussowitsch PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 11666100624SStefano Zampini PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1179566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(sf->nroots, &sendcount)); 1189566063dSJacob Faibussowitsch PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL)); 11966100624SStefano Zampini 12066100624SStefano Zampini if (dat->bcast_pattern && rank == dat->bcast_root) PetscCall((*link->Memcpy)(link, link->leafmtype_mpi, leafbuf, link->rootmtype_mpi, rootbuf, (size_t)sendcount * link->unitbytes)); 12166100624SStefano Zampini /* Ready the buffers for MPI */ 122646b835dSJunchao Zhang PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link)); 12360b1fa21SPierre Jolivet PetscCall(PetscMPIIntCast(sf->nleaves, &nleaves)); 12460b1fa21SPierre Jolivet if (dat->bcast_pattern) PetscCallMPI(MPIU_Ibcast(leafbuf, nleaves, unit, dat->bcast_root, comm, req)); 12566100624SStefano Zampini else PetscCallMPI(MPIU_Iallgatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, comm, req)); 1263ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 127855db38dSJunchao Zhang } 128855db38dSJunchao Zhang 129d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFReduceBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType leafmtype, const void *leafdata, PetscMemType rootmtype, void *rootdata, MPI_Op op) 130d71ae5a4SJacob Faibussowitsch { 131cd620004SJunchao Zhang PetscSFLink link; 132dd5b3ca6SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 133dd5b3ca6SJunchao Zhang PetscInt rstart; 134cd620004SJunchao Zhang PetscMPIInt rank, count, recvcount; 135dd5b3ca6SJunchao Zhang MPI_Comm comm; 136cd620004SJunchao Zhang void *rootbuf = NULL, *leafbuf = NULL; 137f5d27ee7SJunchao Zhang MPI_Request *req = NULL; 138dd5b3ca6SJunchao Zhang 139dd5b3ca6SJunchao Zhang PetscFunctionBegin; 1409566063dSJacob Faibussowitsch PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_REDUCE, &link)); 14183df288dSJunchao Zhang if (op == MPI_REPLACE) { 142cd620004SJunchao Zhang /* REPLACE is only meaningful when all processes have the same leafdata to reduce. Therefore copying from local leafdata is fine */ 1439566063dSJacob Faibussowitsch PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL)); 1449566063dSJacob Faibussowitsch PetscCall((*link->Memcpy)(link, rootmtype, rootdata, leafmtype, (const char *)leafdata + (size_t)rstart * link->unitbytes, (size_t)sf->nroots * link->unitbytes)); 1459566063dSJacob Faibussowitsch if (PetscMemTypeDevice(leafmtype) && PetscMemTypeHost(rootmtype)) PetscCall((*link->SyncStream)(link)); 146dd5b3ca6SJunchao Zhang } else { 1479566063dSJacob Faibussowitsch PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 1489566063dSJacob Faibussowitsch PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); 1499566063dSJacob Faibussowitsch PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */)); 1509566063dSJacob Faibussowitsch PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_LEAF2ROOT, &rootbuf, &leafbuf, &req, NULL)); 151646b835dSJunchao Zhang PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link)); 15266100624SStefano Zampini if (dat->bcast_pattern) { 153*d016bddeSToby Isaac PetscInt nleaves = sf->nleaves; 154*d016bddeSToby Isaac PetscInt nreal; 155835f2295SStefano Zampini PetscMPIInt nleavesi; 156*d016bddeSToby Isaac MPI_Datatype baseunit = unit; 157835f2295SStefano Zampini 158*d016bddeSToby Isaac PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_REAL, &nreal)); 159*d016bddeSToby Isaac if (nreal > 0) { 160*d016bddeSToby Isaac baseunit = MPIU_REAL; 161*d016bddeSToby Isaac nleaves *= nreal; 162*d016bddeSToby Isaac #if PetscDefined(HAVE_COMPLEX) 163*d016bddeSToby Isaac } else { 164*d016bddeSToby Isaac PetscInt ncomplex; 165*d016bddeSToby Isaac 166*d016bddeSToby Isaac PetscCall(MPIPetsc_Type_compare_contig(unit, MPIU_COMPLEX, &ncomplex)); 167*d016bddeSToby Isaac if (ncomplex > 0) { 168*d016bddeSToby Isaac baseunit = MPIU_COMPLEX; 169*d016bddeSToby Isaac nleaves *= ncomplex; 170*d016bddeSToby Isaac } 171*d016bddeSToby Isaac #endif 172*d016bddeSToby Isaac } 173*d016bddeSToby Isaac PetscCall(PetscMPIIntCast(nleaves, &nleavesi)); 174100ffedbSJunchao Zhang #if defined(PETSC_HAVE_OPENMPI) /* Workaround: cuda-aware Open MPI 4.1.3 does not support MPI_Ireduce() with device buffers */ 17566100624SStefano Zampini *req = MPI_REQUEST_NULL; /* Set NULL so that we can safely MPI_Wait(req) */ 176*d016bddeSToby Isaac PetscCallMPI(MPIU_Reduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm)); 17766100624SStefano Zampini #else 178*d016bddeSToby Isaac PetscCallMPI(MPIU_Ireduce(leafbuf, rootbuf, nleavesi, baseunit, op, dat->bcast_root, comm, req)); 17966100624SStefano Zampini #endif 18066100624SStefano Zampini } else { /* Reduce leafdata, then scatter to rootdata */ 18166100624SStefano Zampini PetscCallMPI(MPI_Comm_rank(comm, &rank)); 1829566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(dat->rootbuflen[PETSCSF_REMOTE], &recvcount)); 183cd620004SJunchao Zhang /* Allocate a separate leaf buffer on rank 0 */ 184dd400576SPatrick Sanan if (rank == 0 && !link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi]) { 1859566063dSJacob Faibussowitsch PetscCall(PetscSFMalloc(sf, link->leafmtype_mpi, sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes, (void **)&link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi])); 186dd5b3ca6SJunchao Zhang } 187cd620004SJunchao Zhang /* In case we already copied leafdata from device to host (i.e., no use_gpu_aware_mpi), we need to adjust leafbuf on rank 0 */ 188dd400576SPatrick Sanan if (rank == 0 && link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi] == leafbuf) leafbuf = MPI_IN_PLACE; 1899566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count)); 19066100624SStefano Zampini PetscCallMPI(MPI_Reduce(leafbuf, link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], count, link->basicunit, op, 0, comm)); /* Must do reduce with MPI builtin datatype basicunit */ 1919566063dSJacob Faibussowitsch PetscCallMPI(MPIU_Iscatterv(link->leafbuf_alloc[PETSCSF_REMOTE][link->leafmtype_mpi], dat->recvcounts, dat->displs, unit, rootbuf, recvcount, unit, 0, comm, req)); 192dd5b3ca6SJunchao Zhang } 19366100624SStefano Zampini } 1943ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 195eb02082bSJunchao Zhang } 196eb02082bSJunchao Zhang 197d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFReduceEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, const void *leafdata, void *rootdata, MPI_Op op) 198d71ae5a4SJacob Faibussowitsch { 1999319200aSJunchao Zhang PetscSFLink link; 2009319200aSJunchao Zhang 2019319200aSJunchao Zhang PetscFunctionBegin; 2029319200aSJunchao Zhang if (op == MPI_REPLACE) { 2039319200aSJunchao Zhang /* A rare case happens when op is MPI_REPLACE, using GPUs but no GPU aware MPI. In PetscSFReduceBegin_Allgather(v), 2049319200aSJunchao Zhang we did a device to device copy and in effect finished the communication. But in PetscSFLinkFinishCommunication() 2059319200aSJunchao Zhang of PetscSFReduceEnd_Basic(), it thinks since there is rootbuf, it calls PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(). 2066aad120cSJose E. Roman It does a host to device memory copy on rootbuf, wrongly overwriting the results. So we don't overload 2079319200aSJunchao Zhang PetscSFReduceEnd_Basic() in this case, and just reclaim the link. 2089319200aSJunchao Zhang */ 2099566063dSJacob Faibussowitsch PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 2109566063dSJacob Faibussowitsch PetscCall(PetscSFLinkReclaim(sf, &link)); 2119319200aSJunchao Zhang } else { 2129566063dSJacob Faibussowitsch PetscCall(PetscSFReduceEnd_Basic(sf, unit, leafdata, rootdata, op)); 2139319200aSJunchao Zhang } 2143ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 2159319200aSJunchao Zhang } 2169319200aSJunchao Zhang 217d71ae5a4SJacob Faibussowitsch static PetscErrorCode PetscSFBcastToZero_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, const void *rootdata, PetscMemType leafmtype, void *leafdata) 218d71ae5a4SJacob Faibussowitsch { 219cd620004SJunchao Zhang PetscSFLink link; 220855db38dSJunchao Zhang PetscMPIInt rank; 221f5d27ee7SJunchao Zhang PetscMPIInt sendcount; 222f5d27ee7SJunchao Zhang MPI_Comm comm; 223f5d27ee7SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 224f5d27ee7SJunchao Zhang void *rootbuf = NULL, *leafbuf = NULL; /* buffer seen by MPI */ 225f5d27ee7SJunchao Zhang MPI_Request *req = NULL; 226eb02082bSJunchao Zhang 227eb02082bSJunchao Zhang PetscFunctionBegin; 228f5d27ee7SJunchao Zhang PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, MPI_REPLACE, PETSCSF_BCAST, &link)); 229f5d27ee7SJunchao Zhang PetscCall(PetscSFLinkPackRootData(sf, link, PETSCSF_REMOTE, rootdata)); 230f5d27ee7SJunchao Zhang PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */)); 231f5d27ee7SJunchao Zhang PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 232f5d27ee7SJunchao Zhang PetscCall(PetscMPIIntCast(sf->nroots, &sendcount)); 233f5d27ee7SJunchao Zhang PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, PETSCSF_ROOT2LEAF, &rootbuf, &leafbuf, &req, NULL)); 234646b835dSJunchao Zhang PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link)); 235f5d27ee7SJunchao Zhang PetscCallMPI(MPIU_Igatherv(rootbuf, sendcount, unit, leafbuf, dat->recvcounts, dat->displs, unit, 0 /*rank 0*/, comm, req)); 236f5d27ee7SJunchao Zhang 2379566063dSJacob Faibussowitsch PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 2389566063dSJacob Faibussowitsch PetscCall(PetscSFLinkFinishCommunication(sf, link, PETSCSF_ROOT2LEAF)); 2399566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_rank(PetscObjectComm((PetscObject)sf), &rank)); 24048a46eb9SPierre Jolivet if (rank == 0 && PetscMemTypeDevice(leafmtype) && !sf->use_gpu_aware_mpi) PetscCall((*link->Memcpy)(link, PETSC_MEMTYPE_DEVICE, leafdata, PETSC_MEMTYPE_HOST, link->leafbuf[PETSC_MEMTYPE_HOST], sf->leafbuflen[PETSCSF_REMOTE] * link->unitbytes)); 2419566063dSJacob Faibussowitsch PetscCall(PetscSFLinkReclaim(sf, &link)); 2423ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 243dd5b3ca6SJunchao Zhang } 244dd5b3ca6SJunchao Zhang 245dd5b3ca6SJunchao Zhang /* This routine is very tricky (I believe it is rarely used with this kind of graph so just provide a simple but not-optimal implementation). 246dd5b3ca6SJunchao Zhang 247dd5b3ca6SJunchao Zhang Suppose we have three ranks. Rank 0 has a root with value 1. Rank 0,1,2 has a leaf with value 2,3,4 respectively. The leaves are connected 248dd5b3ca6SJunchao Zhang to the root on rank 0. Suppose op=MPI_SUM and rank 0,1,2 gets root state in their rank order. By definition of this routine, rank 0 sees 1 249dd5b3ca6SJunchao Zhang in root, fetches it into its leafupate, then updates root to 1 + 2 = 3; rank 1 sees 3 in root, fetches it into its leafupate, then updates 250dd5b3ca6SJunchao Zhang root to 3 + 3 = 6; rank 2 sees 6 in root, fetches it into its leafupdate, then updates root to 6 + 4 = 10. At the end, leafupdate on rank 251dd5b3ca6SJunchao Zhang 0,1,2 is 1,3,6 respectively. root is 10. 252dd5b3ca6SJunchao Zhang 253dd5b3ca6SJunchao Zhang We use a simpler implementation. From the same initial state, we copy leafdata to leafupdate 254dd5b3ca6SJunchao Zhang rank-0 rank-1 rank-2 255dd5b3ca6SJunchao Zhang Root 1 256dd5b3ca6SJunchao Zhang Leaf 2 3 4 257dd5b3ca6SJunchao Zhang Leafupdate 2 3 4 258dd5b3ca6SJunchao Zhang 259dd5b3ca6SJunchao Zhang Do MPI_Exscan on leafupdate, 260dd5b3ca6SJunchao Zhang rank-0 rank-1 rank-2 261dd5b3ca6SJunchao Zhang Root 1 262dd5b3ca6SJunchao Zhang Leaf 2 3 4 263dd5b3ca6SJunchao Zhang Leafupdate 2 2 5 264dd5b3ca6SJunchao Zhang 265dd5b3ca6SJunchao Zhang BcastAndOp from root to leafupdate, 266dd5b3ca6SJunchao Zhang rank-0 rank-1 rank-2 267dd5b3ca6SJunchao Zhang Root 1 268dd5b3ca6SJunchao Zhang Leaf 2 3 4 269dd5b3ca6SJunchao Zhang Leafupdate 3 3 6 270dd5b3ca6SJunchao Zhang 271dd5b3ca6SJunchao Zhang Copy root to leafupdate on rank-0 272dd5b3ca6SJunchao Zhang rank-0 rank-1 rank-2 273dd5b3ca6SJunchao Zhang Root 1 274dd5b3ca6SJunchao Zhang Leaf 2 3 4 275dd5b3ca6SJunchao Zhang Leafupdate 1 3 6 276dd5b3ca6SJunchao Zhang 277dd5b3ca6SJunchao Zhang Reduce from leaf to root, 278dd5b3ca6SJunchao Zhang rank-0 rank-1 rank-2 279dd5b3ca6SJunchao Zhang Root 10 280dd5b3ca6SJunchao Zhang Leaf 2 3 4 281dd5b3ca6SJunchao Zhang Leafupdate 1 3 6 282dd5b3ca6SJunchao Zhang */ 283d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpBegin_Allgatherv(PetscSF sf, MPI_Datatype unit, PetscMemType rootmtype, void *rootdata, PetscMemType leafmtype, const void *leafdata, void *leafupdate, MPI_Op op) 284d71ae5a4SJacob Faibussowitsch { 285cd620004SJunchao Zhang PetscSFLink link; 286dd5b3ca6SJunchao Zhang MPI_Comm comm; 287dd5b3ca6SJunchao Zhang PetscMPIInt count; 288dd5b3ca6SJunchao Zhang 289dd5b3ca6SJunchao Zhang PetscFunctionBegin; 2909566063dSJacob Faibussowitsch PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 29108401ef6SPierre Jolivet PetscCheck(!PetscMemTypeDevice(rootmtype) && !PetscMemTypeDevice(leafmtype), comm, PETSC_ERR_SUP, "Do FetchAndOp on device"); 292dd5b3ca6SJunchao Zhang /* Copy leafdata to leafupdate */ 2939566063dSJacob Faibussowitsch PetscCall(PetscSFLinkCreate(sf, unit, rootmtype, rootdata, leafmtype, leafdata, op, PETSCSF_FETCH, &link)); 2949566063dSJacob Faibussowitsch PetscCall(PetscSFLinkPackLeafData(sf, link, PETSCSF_REMOTE, leafdata)); /* Sync the device */ 2959566063dSJacob Faibussowitsch PetscCall((*link->Memcpy)(link, leafmtype, leafupdate, leafmtype, leafdata, sf->nleaves * link->unitbytes)); 2969566063dSJacob Faibussowitsch PetscCall(PetscSFLinkGetInUse(sf, unit, rootdata, leafdata, PETSC_OWN_POINTER, &link)); 297dd5b3ca6SJunchao Zhang 298dd5b3ca6SJunchao Zhang /* Exscan on leafupdate and then BcastAndOp rootdata to leafupdate */ 29983df288dSJunchao Zhang if (op == MPI_REPLACE) { 300dd5b3ca6SJunchao Zhang PetscMPIInt size, rank, prev, next; 3019566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_rank(comm, &rank)); 3029566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_size(comm, &size)); 303dd5b3ca6SJunchao Zhang prev = rank ? rank - 1 : MPI_PROC_NULL; 304dd5b3ca6SJunchao Zhang next = (rank < size - 1) ? rank + 1 : MPI_PROC_NULL; 3059566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(sf->nleaves, &count)); 3069566063dSJacob Faibussowitsch PetscCallMPI(MPI_Sendrecv_replace(leafupdate, count, unit, next, link->tag, prev, link->tag, comm, MPI_STATUSES_IGNORE)); 307cd620004SJunchao Zhang } else { 3089566063dSJacob Faibussowitsch PetscCall(PetscMPIIntCast(sf->nleaves * link->bs, &count)); 3099566063dSJacob Faibussowitsch PetscCallMPI(MPI_Exscan(MPI_IN_PLACE, leafupdate, count, link->basicunit, op, comm)); 310cd620004SJunchao Zhang } 3119566063dSJacob Faibussowitsch PetscCall(PetscSFLinkReclaim(sf, &link)); 3129566063dSJacob Faibussowitsch PetscCall(PetscSFBcastBegin(sf, unit, rootdata, leafupdate, op)); 3139566063dSJacob Faibussowitsch PetscCall(PetscSFBcastEnd(sf, unit, rootdata, leafupdate, op)); 314dd5b3ca6SJunchao Zhang 315dd5b3ca6SJunchao Zhang /* Bcast roots to rank 0's leafupdate */ 3169566063dSJacob Faibussowitsch PetscCall(PetscSFBcastToZero_Private(sf, unit, rootdata, leafupdate)); /* Using this line makes Allgather SFs able to inherit this routine */ 317dd5b3ca6SJunchao Zhang 318dd5b3ca6SJunchao Zhang /* Reduce leafdata to rootdata */ 3199566063dSJacob Faibussowitsch PetscCall(PetscSFReduceBegin(sf, unit, leafdata, rootdata, op)); 3203ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 321dd5b3ca6SJunchao Zhang } 322dd5b3ca6SJunchao Zhang 323d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFFetchAndOpEnd_Allgatherv(PetscSF sf, MPI_Datatype unit, void *rootdata, const void *leafdata, void *leafupdate, MPI_Op op) 324d71ae5a4SJacob Faibussowitsch { 325dd5b3ca6SJunchao Zhang PetscFunctionBegin; 3269566063dSJacob Faibussowitsch PetscCall(PetscSFReduceEnd(sf, unit, leafdata, rootdata, op)); 3273ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 328dd5b3ca6SJunchao Zhang } 329dd5b3ca6SJunchao Zhang 330dd5b3ca6SJunchao Zhang /* Get root ranks accessing my leaves */ 3316497c311SBarry Smith PETSC_INTERN PetscErrorCode PetscSFGetRootRanks_Allgatherv(PetscSF sf, PetscMPIInt *nranks, const PetscMPIInt **ranks, const PetscInt **roffset, const PetscInt **rmine, const PetscInt **rremote) 332d71ae5a4SJacob Faibussowitsch { 3336497c311SBarry Smith PetscInt j, k, size; 334dd5b3ca6SJunchao Zhang const PetscInt *range; 335dd5b3ca6SJunchao Zhang 336dd5b3ca6SJunchao Zhang PetscFunctionBegin; 337dd5b3ca6SJunchao Zhang /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */ 338dd5b3ca6SJunchao Zhang if (sf->nranks && !sf->ranks) { /* On rank!=0, sf->nranks=0. The sf->nranks test makes this routine also works for sfgatherv */ 339dd5b3ca6SJunchao Zhang size = sf->nranks; 3409566063dSJacob Faibussowitsch PetscCall(PetscLayoutGetRanges(sf->map, &range)); 3419566063dSJacob Faibussowitsch PetscCall(PetscMalloc4(size, &sf->ranks, size + 1, &sf->roffset, sf->nleaves, &sf->rmine, sf->nleaves, &sf->rremote)); 3426497c311SBarry Smith for (PetscMPIInt i = 0; i < size; i++) sf->ranks[i] = i; 3439566063dSJacob Faibussowitsch PetscCall(PetscArraycpy(sf->roffset, range, size + 1)); 3446497c311SBarry Smith for (PetscInt i = 0; i < sf->nleaves; i++) sf->rmine[i] = i; /*rmine are never NULL even for contiguous leaves */ 3456497c311SBarry Smith for (PetscMPIInt i = 0; i < size; i++) { 346dd5b3ca6SJunchao Zhang for (j = range[i], k = 0; j < range[i + 1]; j++, k++) sf->rremote[j] = k; 347dd5b3ca6SJunchao Zhang } 348dd5b3ca6SJunchao Zhang } 349dd5b3ca6SJunchao Zhang 350dd5b3ca6SJunchao Zhang if (nranks) *nranks = sf->nranks; 351dd5b3ca6SJunchao Zhang if (ranks) *ranks = sf->ranks; 352dd5b3ca6SJunchao Zhang if (roffset) *roffset = sf->roffset; 353dd5b3ca6SJunchao Zhang if (rmine) *rmine = sf->rmine; 354dd5b3ca6SJunchao Zhang if (rremote) *rremote = sf->rremote; 3553ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 356dd5b3ca6SJunchao Zhang } 357dd5b3ca6SJunchao Zhang 358dd5b3ca6SJunchao Zhang /* Get leaf ranks accessing my roots */ 3596497c311SBarry Smith PETSC_INTERN PetscErrorCode PetscSFGetLeafRanks_Allgatherv(PetscSF sf, PetscMPIInt *niranks, const PetscMPIInt **iranks, const PetscInt **ioffset, const PetscInt **irootloc) 360d71ae5a4SJacob Faibussowitsch { 361dd5b3ca6SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 362dd5b3ca6SJunchao Zhang MPI_Comm comm; 363dd5b3ca6SJunchao Zhang PetscMPIInt size, rank; 364dd5b3ca6SJunchao Zhang 365dd5b3ca6SJunchao Zhang PetscFunctionBegin; 366dd5b3ca6SJunchao Zhang /* Lazily construct these large arrays if users really need them for this type of SF. Very likely, they do not */ 3679566063dSJacob Faibussowitsch PetscCall(PetscObjectGetComm((PetscObject)sf, &comm)); 3689566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_size(comm, &size)); 3699566063dSJacob Faibussowitsch PetscCallMPI(MPI_Comm_rank(comm, &rank)); 370dd5b3ca6SJunchao Zhang if (niranks) *niranks = size; 371dd5b3ca6SJunchao Zhang 372dd5b3ca6SJunchao Zhang /* PetscSF_Basic has distinguished incoming ranks. Here we do not need that. But we must put self as the first and 373dd5b3ca6SJunchao Zhang sort other ranks. See comments in PetscSFSetUp_Basic about MatGetBrowsOfAoCols_MPIAIJ on why. 374dd5b3ca6SJunchao Zhang */ 375dd5b3ca6SJunchao Zhang if (iranks) { 376dd5b3ca6SJunchao Zhang if (!dat->iranks) { 3779566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(size, &dat->iranks)); 378dd5b3ca6SJunchao Zhang dat->iranks[0] = rank; 3796497c311SBarry Smith for (PetscMPIInt i = 0, j = 1; i < size; i++) { 3809371c9d4SSatish Balay if (i == rank) continue; 3819371c9d4SSatish Balay dat->iranks[j++] = i; 3829371c9d4SSatish Balay } 383dd5b3ca6SJunchao Zhang } 38466100624SStefano Zampini *iranks = dat->iranks; /* dat->iranks was init'ed to NULL by PetscNew */ 385dd5b3ca6SJunchao Zhang } 386dd5b3ca6SJunchao Zhang 387dd5b3ca6SJunchao Zhang if (ioffset) { 388dd5b3ca6SJunchao Zhang if (!dat->ioffset) { 3899566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(size + 1, &dat->ioffset)); 3906497c311SBarry Smith for (PetscMPIInt i = 0; i <= size; i++) dat->ioffset[i] = i * sf->nroots; 391dd5b3ca6SJunchao Zhang } 392dd5b3ca6SJunchao Zhang *ioffset = dat->ioffset; 393dd5b3ca6SJunchao Zhang } 394dd5b3ca6SJunchao Zhang 395dd5b3ca6SJunchao Zhang if (irootloc) { 396dd5b3ca6SJunchao Zhang if (!dat->irootloc) { 3979566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(sf->nleaves, &dat->irootloc)); 3986497c311SBarry Smith for (PetscMPIInt i = 0; i < size; i++) { 3996497c311SBarry Smith for (PetscInt j = 0; j < sf->nroots; j++) dat->irootloc[i * sf->nroots + j] = j; 400dd5b3ca6SJunchao Zhang } 401dd5b3ca6SJunchao Zhang } 402dd5b3ca6SJunchao Zhang *irootloc = dat->irootloc; 403dd5b3ca6SJunchao Zhang } 4043ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 405dd5b3ca6SJunchao Zhang } 406dd5b3ca6SJunchao Zhang 407d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreateLocalSF_Allgatherv(PetscSF sf, PetscSF *out) 408d71ae5a4SJacob Faibussowitsch { 409dd5b3ca6SJunchao Zhang PetscInt i, nroots, nleaves, rstart, *ilocal; 410dd5b3ca6SJunchao Zhang PetscSFNode *iremote; 411dd5b3ca6SJunchao Zhang PetscSF lsf; 412dd5b3ca6SJunchao Zhang 413dd5b3ca6SJunchao Zhang PetscFunctionBegin; 414eb02082bSJunchao Zhang nleaves = sf->nleaves ? sf->nroots : 0; /* sf->nleaves can be zero with SFGather(v) */ 415eb02082bSJunchao Zhang nroots = nleaves; 4169566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(nleaves, &ilocal)); 4179566063dSJacob Faibussowitsch PetscCall(PetscMalloc1(nleaves, &iremote)); 4189566063dSJacob Faibussowitsch PetscCall(PetscLayoutGetRange(sf->map, &rstart, NULL)); 419dd5b3ca6SJunchao Zhang 420dd5b3ca6SJunchao Zhang for (i = 0; i < nleaves; i++) { 421dd5b3ca6SJunchao Zhang ilocal[i] = rstart + i; /* lsf does not change leave indices */ 422dd5b3ca6SJunchao Zhang iremote[i].rank = 0; /* rank in PETSC_COMM_SELF */ 423dd5b3ca6SJunchao Zhang iremote[i].index = i; /* root index */ 424dd5b3ca6SJunchao Zhang } 425dd5b3ca6SJunchao Zhang 4269566063dSJacob Faibussowitsch PetscCall(PetscSFCreate(PETSC_COMM_SELF, &lsf)); 4279566063dSJacob Faibussowitsch PetscCall(PetscSFSetGraph(lsf, nroots, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER)); 4289566063dSJacob Faibussowitsch PetscCall(PetscSFSetUp(lsf)); 429dd5b3ca6SJunchao Zhang *out = lsf; 4303ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 431dd5b3ca6SJunchao Zhang } 432dd5b3ca6SJunchao Zhang 433d71ae5a4SJacob Faibussowitsch PETSC_INTERN PetscErrorCode PetscSFCreate_Allgatherv(PetscSF sf) 434d71ae5a4SJacob Faibussowitsch { 435dd5b3ca6SJunchao Zhang PetscSF_Allgatherv *dat = (PetscSF_Allgatherv *)sf->data; 436dd5b3ca6SJunchao Zhang 437dd5b3ca6SJunchao Zhang PetscFunctionBegin; 438ad227feaSJunchao Zhang sf->ops->BcastEnd = PetscSFBcastEnd_Basic; 4399319200aSJunchao Zhang sf->ops->ReduceEnd = PetscSFReduceEnd_Allgatherv; 440cd620004SJunchao Zhang 441dd5b3ca6SJunchao Zhang sf->ops->SetUp = PetscSFSetUp_Allgatherv; 442dd5b3ca6SJunchao Zhang sf->ops->Reset = PetscSFReset_Allgatherv; 443dd5b3ca6SJunchao Zhang sf->ops->Destroy = PetscSFDestroy_Allgatherv; 444dd5b3ca6SJunchao Zhang sf->ops->GetRootRanks = PetscSFGetRootRanks_Allgatherv; 445dd5b3ca6SJunchao Zhang sf->ops->GetLeafRanks = PetscSFGetLeafRanks_Allgatherv; 446dd5b3ca6SJunchao Zhang sf->ops->GetGraph = PetscSFGetGraph_Allgatherv; 447ad227feaSJunchao Zhang sf->ops->BcastBegin = PetscSFBcastBegin_Allgatherv; 448dd5b3ca6SJunchao Zhang sf->ops->ReduceBegin = PetscSFReduceBegin_Allgatherv; 449dd5b3ca6SJunchao Zhang sf->ops->FetchAndOpBegin = PetscSFFetchAndOpBegin_Allgatherv; 450dd5b3ca6SJunchao Zhang sf->ops->FetchAndOpEnd = PetscSFFetchAndOpEnd_Allgatherv; 451dd5b3ca6SJunchao Zhang sf->ops->CreateLocalSF = PetscSFCreateLocalSF_Allgatherv; 452dd5b3ca6SJunchao Zhang sf->ops->BcastToZero = PetscSFBcastToZero_Allgatherv; 453dd5b3ca6SJunchao Zhang 4546677b1c1SJunchao Zhang sf->collective = PETSC_TRUE; 4556677b1c1SJunchao Zhang 4564dfa11a4SJacob Faibussowitsch PetscCall(PetscNew(&dat)); 45766100624SStefano Zampini dat->bcast_root = -1; 458dd5b3ca6SJunchao Zhang sf->data = (void *)dat; 4593ba16761SJacob Faibussowitsch PetscFunctionReturn(PETSC_SUCCESS); 460dd5b3ca6SJunchao Zhang } 461