xref: /petsc/src/sys/objects/tagm.c (revision bebe2cf65d55febe21a5af8db2bd2e168caaa2e7)
1 
2 /*
3       Some PETSc utilites
4 */
5 #include <petsc/private/petscimpl.h>             /*I    "petscsys.h"   I*/
6 /* ---------------------------------------------------------------- */
7 /*
8    A simple way to manage tags inside a communicator.
9 
10    It uses the attributes to determine if a new communicator
11       is needed and to store the available tags.
12 
13 */
14 
15 
16 #undef __FUNCT__
17 #define __FUNCT__ "PetscObjectGetNewTag"
18 /*@C
19     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
20     processors that share the object MUST call this routine EXACTLY the same
21     number of times.  This tag should only be used with the current objects
22     communicator; do NOT use it with any other MPI communicator.
23 
24     Collective on PetscObject
25 
26     Input Parameter:
27 .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
28          PetscObjectGetNewTag((PetscObject)mat,&tag);
29 
30     Output Parameter:
31 .   tag - the new tag
32 
33     Level: developer
34 
35     Concepts: tag^getting
36     Concepts: message tag^getting
37     Concepts: MPI message tag^getting
38 
39 .seealso: PetscCommGetNewTag()
40 @*/
41 PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
42 {
43   PetscErrorCode ierr;
44 
45   PetscFunctionBegin;
46   ierr = PetscCommGetNewTag(obj->comm,tag);CHKERRQ(ierr);
47   PetscFunctionReturn(0);
48 }
49 
50 #undef __FUNCT__
51 #define __FUNCT__ "PetscCommGetNewTag"
52 /*@
53     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
54     processors that share the communicator MUST call this routine EXACTLY the same
55     number of times.  This tag should only be used with the current objects
56     communicator; do NOT use it with any other MPI communicator.
57 
58     Collective on comm
59 
60     Input Parameter:
61 .   comm - the MPI communicator
62 
63     Output Parameter:
64 .   tag - the new tag
65 
66     Level: developer
67 
68     Concepts: tag^getting
69     Concepts: message tag^getting
70     Concepts: MPI message tag^getting
71 
72 .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
73 @*/
74 PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
75 {
76   PetscErrorCode   ierr;
77   PetscCommCounter *counter;
78   PetscMPIInt      *maxval,flg;
79 
80   PetscFunctionBegin;
81   PetscValidIntPointer(tag,2);
82 
83   ierr = MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
84   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
85 
86   if (counter->tag < 1) {
87     ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
88     ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
89     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
90     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
91   }
92 
93   *tag = counter->tag--;
94 #if defined(PETSC_USE_DEBUG)
95   /*
96      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
97   */
98   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
99 #endif
100   PetscFunctionReturn(0);
101 }
102 
103 #undef __FUNCT__
104 #define __FUNCT__ "PetscCommDuplicate"
105 /*@C
106   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
107 
108   Collective on MPI_Comm
109 
110   Input Parameters:
111 . comm_in - Input communicator
112 
113   Output Parameters:
114 + comm_out - Output communicator.  May be comm_in.
115 - first_tag - Tag available that has not already been used with this communicator (you may
116               pass in NULL if you do not need a tag)
117 
118   PETSc communicators are just regular MPI communicators that keep track of which
119   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
120   a PETSc creation routine it will attach a private communicator for use in the objects communications.
121   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
122   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
123 
124   Level: developer
125 
126   Concepts: communicator^duplicate
127 
128 .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
129 @*/
130 PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
131 {
132   PetscErrorCode   ierr;
133   PetscCommCounter *counter;
134   PetscMPIInt      *maxval,flg;
135 
136   PetscFunctionBegin;
137   ierr = MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
138 
139   if (!flg) {  /* this is NOT a PETSc comm */
140     union {MPI_Comm comm; void *ptr;} ucomm;
141     /* check if this communicator has a PETSc communicator imbedded in it */
142     ierr = MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
143     if (!flg) {
144       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
145       ierr = MPI_Comm_dup(comm_in,comm_out);CHKERRQ(ierr);
146       ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
147       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
148       ierr = PetscNew(&counter);CHKERRQ(ierr);
149 
150       counter->tag       = *maxval;
151       counter->refcount  = 0;
152       counter->namecount = 0;
153 
154       ierr = MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);CHKERRQ(ierr);
155       ierr = PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);CHKERRQ(ierr);
156 
157       /* save PETSc communicator inside user communicator, so we can get it next time */
158       ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
159       ierr = MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);CHKERRQ(ierr);
160       ucomm.comm = comm_in;
161       ierr = MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);CHKERRQ(ierr);
162     } else {
163       *comm_out = ucomm.comm;
164       /* pull out the inner MPI_Comm and hand it back to the caller */
165       ierr = MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
166       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
167       ierr = PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);CHKERRQ(ierr);
168     }
169   } else *comm_out = comm_in;
170 
171 #if defined(PETSC_USE_DEBUG)
172   /*
173      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
174      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
175      ALL processes that share a communicator MUST shared objects created from that communicator.
176   */
177   ierr = MPI_Barrier(comm_in);CHKERRQ(ierr);
178 #endif
179 
180   if (counter->tag < 1) {
181     ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
182     ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
183     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
184     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
185   }
186 
187   if (first_tag) *first_tag = counter->tag--;
188 
189   counter->refcount++; /* number of references to this comm */
190   PetscFunctionReturn(0);
191 }
192 
193 #undef __FUNCT__
194 #define __FUNCT__ "PetscCommDestroy"
195 /*@C
196    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().
197 
198    Collective on MPI_Comm
199 
200    Input Parameter:
201 .  comm - the communicator to free
202 
203    Level: developer
204 
205    Concepts: communicator^destroy
206 
207 .seealso:   PetscCommDuplicate()
208 @*/
209 PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
210 {
211   PetscErrorCode   ierr;
212   PetscCommCounter *counter;
213   PetscMPIInt      flg;
214   MPI_Comm         icomm = *comm,ocomm;
215   union {MPI_Comm comm; void *ptr;} ucomm;
216 
217   PetscFunctionBegin;
218   if (*comm == MPI_COMM_NULL) PetscFunctionReturn(0);
219   ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
220   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
221     ierr = MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
222     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
223     icomm = ucomm.comm;
224     ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
225     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
226   }
227 
228   counter->refcount--;
229 
230   if (!counter->refcount) {
231     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
232     ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
233     if (flg) {
234       ocomm = ucomm.comm;
235       ierr = MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
236       if (flg) {
237         ierr = MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);CHKERRQ(ierr);
238       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
239     }
240 
241     ierr = PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);CHKERRQ(ierr);
242     ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
243   }
244   *comm = MPI_COMM_NULL;
245   PetscFunctionReturn(0);
246 }
247 
248 #undef  __FUNCT__
249 #define __FUNCT__ "PetscObjectsGetGlobalNumbering"
250 /*@C
251     PetscObjectsGetGlobalNumbering - computes a global numbering
252     of PetscObjects living on subcommunicators of a given communicator.
253     This results in a deadlock-free ordering of the subcommunicators
254     and, hence, the objects.
255 
256 
257     Collective on comm.
258 
259     Input Parameters:
260 +   comm    - MPI_Comm
261 .   len     - length of objlist
262 -   objlist - a list of PETSc objects living on subcommunicators of comm
263                 (subcommunicator ordering is assumed to be deadlock-free)
264 
265     Output Parameters:
266 +   count      - number of globally-distinct subcommunicators on objlist
267 .   numbering  - global numbers of objlist entries (allocated by user)
268 
269 
270     Level: developer
271 
272     Concepts: MPI subcomm^numbering
273 
274 @*/
275 PetscErrorCode  PetscObjectsGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
276 {
277   PetscErrorCode ierr;
278   PetscInt       i, roots, offset;
279   PetscMPIInt    size, rank;
280 
281   PetscFunctionBegin;
282   PetscValidPointer(objlist,3);
283   PetscValidPointer(count,4);
284   PetscValidPointer(numbering,5);
285   ierr  = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
286   ierr  = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
287   roots = 0;
288   for (i = 0; i < len; ++i) {
289     PetscMPIInt srank;
290     ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
291     /* Am I the root of the i-th subcomm? */
292     if (!srank) ++roots;
293   }
294   /* Obtain the sum of all roots -- the global number of distinct subcomms. */
295   ierr = MPI_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
296   /* Now introduce a global numbering for subcomms, initially known only by subcomm roots. */
297   /*
298    At the subcomm roots number the subcomms in the subcomm-root local manner,
299    and make it global by calculating the shift.
300    */
301   ierr    = MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
302   offset -= roots;
303   /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
304   /*
305      This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
306      broadcast is collective on the subcomm.
307    */
308   roots = 0;
309   for (i = 0; i < len; ++i) {
310     PetscMPIInt srank;
311     numbering[i] = offset + roots; /* only meaningful if !srank. */
312 
313     ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
314     ierr = MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);CHKERRQ(ierr);
315     if (!srank) ++roots;
316   }
317   PetscFunctionReturn(0);
318 }
319