xref: /petsc/src/vec/is/sf/impls/basic/neighbor/sfneighbor.c (revision f5d27ee7dde11c933a8c6f6ef7ab9e5456705271)
1 #include <../src/vec/is/sf/impls/basic/sfpack.h>
2 #include <../src/vec/is/sf/impls/basic/sfbasic.h>
3 
4 /* Convenience local types */
5 #if defined(PETSC_HAVE_MPI_LARGE_COUNT) && defined(PETSC_USE_64BIT_INDICES)
6 typedef MPI_Count PetscSFCount;
7 typedef MPI_Aint  PetscSFAint;
8 #else
9 typedef PetscMPIInt PetscSFCount;
10 typedef PetscMPIInt PetscSFAint;
11 #endif
12 
13 typedef struct {
14   SFBASICHEADER;
15   MPI_Comm      comms[2];                /* Communicators with distributed topology in both directions */
16   PetscBool     initialized[2];          /* Are the two communicators initialized? */
17   PetscSFCount *rootcounts, *leafcounts; /* counts for non-distinguished ranks */
18   PetscSFAint  *rootdispls, *leafdispls; /* displs for non-distinguished ranks */
19   PetscMPIInt  *rootweights, *leafweights;
20   PetscInt      rootdegree, leafdegree;
21 } PetscSF_Neighbor;
22 
23 /*===================================================================================*/
24 /*              Internal utility routines                                            */
25 /*===================================================================================*/
26 
27 static inline PetscErrorCode PetscLogMPIMessages(PetscInt nsend, PetscSFCount *sendcnts, MPI_Datatype sendtype, PetscInt nrecv, PetscSFCount *recvcnts, MPI_Datatype recvtype)
28 {
29   PetscFunctionBegin;
30   if (PetscDefined(USE_LOG)) {
31     petsc_isend_ct += (PetscLogDouble)nsend;
32     petsc_irecv_ct += (PetscLogDouble)nrecv;
33 
34     if (sendtype != MPI_DATATYPE_NULL) {
35       PetscMPIInt i, typesize;
36       PetscCallMPI(MPI_Type_size(sendtype, &typesize));
37       for (i = 0; i < nsend; i++) petsc_isend_len += (PetscLogDouble)(sendcnts[i] * typesize);
38     }
39 
40     if (recvtype != MPI_DATATYPE_NULL) {
41       PetscMPIInt i, typesize;
42       PetscCallMPI(MPI_Type_size(recvtype, &typesize));
43       for (i = 0; i < nrecv; i++) petsc_irecv_len += (PetscLogDouble)(recvcnts[i] * typesize);
44     }
45   }
46   PetscFunctionReturn(PETSC_SUCCESS);
47 }
48 
49 /* Get the communicator with distributed graph topology, which is not cheap to build so we do it on demand (instead of at PetscSFSetUp time) */
50 static PetscErrorCode PetscSFGetDistComm_Neighbor(PetscSF sf, PetscSFDirection direction, MPI_Comm *distcomm)
51 {
52   PetscSF_Neighbor *dat = (PetscSF_Neighbor *)sf->data;
53 
54   PetscFunctionBegin;
55   if (!dat->initialized[direction]) {
56     PetscInt           nrootranks, ndrootranks, nleafranks, ndleafranks;
57     PetscMPIInt        indegree, outdegree;
58     const PetscMPIInt *rootranks, *leafranks, *sources, *destinations;
59     MPI_Comm           comm, *mycomm = &dat->comms[direction];
60 
61     PetscCall(PetscSFGetRootInfo_Basic(sf, &nrootranks, &ndrootranks, &rootranks, NULL, NULL));       /* Which ranks will access my roots (I am a destination) */
62     PetscCall(PetscSFGetLeafInfo_Basic(sf, &nleafranks, &ndleafranks, &leafranks, NULL, NULL, NULL)); /* My leaves will access whose roots (I am a source) */
63     indegree     = nrootranks - ndrootranks;
64     outdegree    = nleafranks - ndleafranks;
65     sources      = rootranks + ndrootranks;
66     destinations = leafranks + ndleafranks;
67     PetscCall(PetscObjectGetComm((PetscObject)sf, &comm));
68     if (direction == PETSCSF_LEAF2ROOT) {
69       PetscCallMPI(MPI_Dist_graph_create_adjacent(comm, indegree, sources, dat->rootweights, outdegree, destinations, dat->leafweights, MPI_INFO_NULL, 1 /*reorder*/, mycomm));
70     } else { /* PETSCSF_ROOT2LEAF, reverse src & dest */
71       PetscCallMPI(MPI_Dist_graph_create_adjacent(comm, outdegree, destinations, dat->leafweights, indegree, sources, dat->rootweights, MPI_INFO_NULL, 1 /*reorder*/, mycomm));
72     }
73     dat->initialized[direction] = PETSC_TRUE;
74   }
75   *distcomm = dat->comms[direction];
76   PetscFunctionReturn(PETSC_SUCCESS);
77 }
78 
79 // start MPI_Ineighbor_alltoallv (only used for inter-proccess communication)
80 static PetscErrorCode PetscSFLinkStartCommunication_Neighbor(PetscSF sf, PetscSFLink link, PetscSFDirection direction)
81 {
82   PetscSF_Neighbor *dat      = (PetscSF_Neighbor *)sf->data;
83   MPI_Comm          distcomm = MPI_COMM_NULL;
84   void             *rootbuf = NULL, *leafbuf = NULL;
85   MPI_Request      *req = NULL;
86 
87   PetscFunctionBegin;
88   if (direction == PETSCSF_ROOT2LEAF) {
89     PetscCall(PetscSFLinkCopyRootBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host before sending */));
90   } else {
91     PetscCall(PetscSFLinkCopyLeafBufferInCaseNotUseGpuAwareMPI(sf, link, PETSC_TRUE /* device2host */));
92   }
93 
94   PetscCall(PetscSFGetDistComm_Neighbor(sf, direction, &distcomm));
95   PetscCall(PetscSFLinkGetMPIBuffersAndRequests(sf, link, direction, &rootbuf, &leafbuf, &req, NULL));
96   PetscCall(PetscSFLinkSyncStreamBeforeCallMPI(sf, link, direction));
97 
98   if (dat->rootdegree || dat->leafdegree) { // OpenMPI-3.0 ran into error with rootdegree = leafdegree = 0, so we skip the call in this case
99     if (direction == PETSCSF_ROOT2LEAF) {
100       PetscCallMPI(MPIU_Ineighbor_alltoallv(rootbuf, dat->rootcounts, dat->rootdispls, link->unit, leafbuf, dat->leafcounts, dat->leafdispls, link->unit, distcomm, req));
101       PetscCall(PetscLogMPIMessages(dat->rootdegree, dat->rootcounts, link->unit, dat->leafdegree, dat->leafcounts, link->unit));
102     } else {
103       PetscCallMPI(MPIU_Ineighbor_alltoallv(leafbuf, dat->leafcounts, dat->leafdispls, link->unit, rootbuf, dat->rootcounts, dat->rootdispls, link->unit, distcomm, req));
104       PetscCall(PetscLogMPIMessages(dat->leafdegree, dat->leafcounts, link->unit, dat->rootdegree, dat->rootcounts, link->unit));
105     }
106   }
107   PetscFunctionReturn(PETSC_SUCCESS);
108 }
109 
110 static PetscErrorCode PetscSFSetCommunicationOps_Neighbor(PetscSF sf, PetscSFLink link)
111 {
112   PetscFunctionBegin;
113   link->StartCommunication = PetscSFLinkStartCommunication_Neighbor;
114   PetscFunctionReturn(PETSC_SUCCESS);
115 }
116 
117 /*===================================================================================*/
118 /*              Implementations of SF public APIs                                    */
119 /*===================================================================================*/
120 static PetscErrorCode PetscSFSetUp_Neighbor(PetscSF sf)
121 {
122   PetscSF_Neighbor *dat = (PetscSF_Neighbor *)sf->data;
123   PetscInt          i, j, nrootranks, ndrootranks, nleafranks, ndleafranks;
124   const PetscInt   *rootoffset, *leafoffset;
125   PetscMPIInt       m, n;
126 
127   PetscFunctionBegin;
128   /* SFNeighbor inherits from Basic */
129   PetscCall(PetscSFSetUp_Basic(sf));
130   /* SFNeighbor specific */
131   sf->persistent = PETSC_FALSE;
132   PetscCall(PetscSFGetRootInfo_Basic(sf, &nrootranks, &ndrootranks, NULL, &rootoffset, NULL));
133   PetscCall(PetscSFGetLeafInfo_Basic(sf, &nleafranks, &ndleafranks, NULL, &leafoffset, NULL, NULL));
134   dat->rootdegree = m = (PetscMPIInt)(nrootranks - ndrootranks);
135   dat->leafdegree = n = (PetscMPIInt)(nleafranks - ndleafranks);
136   sf->nleafreqs       = 0;
137   dat->nrootreqs      = 1; // collectives only need one MPI_Request. We just put it in rootreqs[]
138 
139   /* Only setup MPI displs/counts for non-distinguished ranks. Distinguished ranks use shared memory */
140   PetscCall(PetscMalloc6(m, &dat->rootdispls, m, &dat->rootcounts, m, &dat->rootweights, n, &dat->leafdispls, n, &dat->leafcounts, n, &dat->leafweights));
141 
142 #if defined(PETSC_HAVE_MPI_LARGE_COUNT) && defined(PETSC_USE_64BIT_INDICES)
143   for (i = ndrootranks, j = 0; i < nrootranks; i++, j++) {
144     dat->rootdispls[j]  = rootoffset[i] - rootoffset[ndrootranks];
145     dat->rootcounts[j]  = rootoffset[i + 1] - rootoffset[i];
146     dat->rootweights[j] = (PetscMPIInt)((PetscReal)dat->rootcounts[j] / (PetscReal)PETSC_MAX_INT * 2147483647); /* Scale to range of PetscMPIInt */
147   }
148 
149   for (i = ndleafranks, j = 0; i < nleafranks; i++, j++) {
150     dat->leafdispls[j]  = leafoffset[i] - leafoffset[ndleafranks];
151     dat->leafcounts[j]  = leafoffset[i + 1] - leafoffset[i];
152     dat->leafweights[j] = (PetscMPIInt)((PetscReal)dat->leafcounts[j] / (PetscReal)PETSC_MAX_INT * 2147483647);
153   }
154 #else
155   for (i = ndrootranks, j = 0; i < nrootranks; i++, j++) {
156     PetscCall(PetscMPIIntCast(rootoffset[i] - rootoffset[ndrootranks], &m));
157     dat->rootdispls[j] = m;
158     PetscCall(PetscMPIIntCast(rootoffset[i + 1] - rootoffset[i], &n));
159     dat->rootcounts[j]  = n;
160     dat->rootweights[j] = n;
161   }
162 
163   for (i = ndleafranks, j = 0; i < nleafranks; i++, j++) {
164     PetscCall(PetscMPIIntCast(leafoffset[i] - leafoffset[ndleafranks], &m));
165     dat->leafdispls[j] = m;
166     PetscCall(PetscMPIIntCast(leafoffset[i + 1] - leafoffset[i], &n));
167     dat->leafcounts[j]  = n;
168     dat->leafweights[j] = n;
169   }
170 #endif
171   PetscFunctionReturn(PETSC_SUCCESS);
172 }
173 
174 static PetscErrorCode PetscSFReset_Neighbor(PetscSF sf)
175 {
176   PetscInt          i;
177   PetscSF_Neighbor *dat = (PetscSF_Neighbor *)sf->data;
178 
179   PetscFunctionBegin;
180   PetscCheck(!dat->inuse, PetscObjectComm((PetscObject)sf), PETSC_ERR_ARG_WRONGSTATE, "Outstanding operation has not been completed");
181   PetscCall(PetscFree6(dat->rootdispls, dat->rootcounts, dat->rootweights, dat->leafdispls, dat->leafcounts, dat->leafweights));
182   for (i = 0; i < 2; i++) {
183     if (dat->initialized[i]) {
184       PetscCallMPI(MPI_Comm_free(&dat->comms[i]));
185       dat->initialized[i] = PETSC_FALSE;
186     }
187   }
188   PetscCall(PetscSFReset_Basic(sf)); /* Common part */
189   PetscFunctionReturn(PETSC_SUCCESS);
190 }
191 
192 static PetscErrorCode PetscSFDestroy_Neighbor(PetscSF sf)
193 {
194   PetscFunctionBegin;
195   PetscCall(PetscSFReset_Neighbor(sf));
196   PetscCall(PetscFree(sf->data));
197   PetscFunctionReturn(PETSC_SUCCESS);
198 }
199 
200 PETSC_INTERN PetscErrorCode PetscSFCreate_Neighbor(PetscSF sf)
201 {
202   PetscSF_Neighbor *dat;
203 
204   PetscFunctionBegin;
205   sf->ops->CreateEmbeddedRootSF = PetscSFCreateEmbeddedRootSF_Basic;
206   sf->ops->BcastBegin           = PetscSFBcastBegin_Basic;
207   sf->ops->BcastEnd             = PetscSFBcastEnd_Basic;
208   sf->ops->ReduceBegin          = PetscSFReduceBegin_Basic;
209   sf->ops->ReduceEnd            = PetscSFReduceEnd_Basic;
210   sf->ops->FetchAndOpBegin      = PetscSFFetchAndOpBegin_Basic;
211   sf->ops->FetchAndOpEnd        = PetscSFFetchAndOpEnd_Basic;
212   sf->ops->GetLeafRanks         = PetscSFGetLeafRanks_Basic;
213   sf->ops->View                 = PetscSFView_Basic;
214 
215   sf->ops->SetUp               = PetscSFSetUp_Neighbor;
216   sf->ops->Reset               = PetscSFReset_Neighbor;
217   sf->ops->Destroy             = PetscSFDestroy_Neighbor;
218   sf->ops->SetCommunicationOps = PetscSFSetCommunicationOps_Neighbor;
219 
220   PetscCall(PetscNew(&dat));
221   sf->data = (void *)dat;
222   PetscFunctionReturn(PETSC_SUCCESS);
223 }
224