xref: /petsc/src/sys/objects/tagm.c (revision 77b2f2ac03f962b1bc469d44871d3508a4d15da4)
1 
2 /*
3       Some PETSc utilites
4 */
5 #include <petsc/private/petscimpl.h>             /*I    "petscsys.h"   I*/
6 /* ---------------------------------------------------------------- */
7 /*
8    A simple way to manage tags inside a communicator.
9 
10    It uses the attributes to determine if a new communicator
11       is needed and to store the available tags.
12 
13 */
14 
15 
16 #undef __FUNCT__
17 #define __FUNCT__ "PetscObjectGetNewTag"
18 /*@C
19     PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
20     processors that share the object MUST call this routine EXACTLY the same
21     number of times.  This tag should only be used with the current objects
22     communicator; do NOT use it with any other MPI communicator.
23 
24     Collective on PetscObject
25 
26     Input Parameter:
27 .   obj - the PETSc object; this must be cast with a (PetscObject), for example,
28          PetscObjectGetNewTag((PetscObject)mat,&tag);
29 
30     Output Parameter:
31 .   tag - the new tag
32 
33     Level: developer
34 
35     Concepts: tag^getting
36     Concepts: message tag^getting
37     Concepts: MPI message tag^getting
38 
39 .seealso: PetscCommGetNewTag()
40 @*/
41 PetscErrorCode  PetscObjectGetNewTag(PetscObject obj,PetscMPIInt *tag)
42 {
43   PetscErrorCode ierr;
44 
45   PetscFunctionBegin;
46   ierr = PetscCommGetNewTag(obj->comm,tag);CHKERRQ(ierr);
47   PetscFunctionReturn(0);
48 }
49 
50 #undef __FUNCT__
51 #define __FUNCT__ "PetscCommGetNewTag"
52 /*@
53     PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
54     processors that share the communicator MUST call this routine EXACTLY the same
55     number of times.  This tag should only be used with the current objects
56     communicator; do NOT use it with any other MPI communicator.
57 
58     Collective on comm
59 
60     Input Parameter:
61 .   comm - the MPI communicator
62 
63     Output Parameter:
64 .   tag - the new tag
65 
66     Level: developer
67 
68     Concepts: tag^getting
69     Concepts: message tag^getting
70     Concepts: MPI message tag^getting
71 
72 .seealso: PetscObjectGetNewTag(), PetscCommDuplicate()
73 @*/
74 PetscErrorCode  PetscCommGetNewTag(MPI_Comm comm,PetscMPIInt *tag)
75 {
76   PetscErrorCode   ierr;
77   PetscCommCounter *counter;
78   PetscMPIInt      *maxval,flg;
79 
80   PetscFunctionBegin;
81   PetscValidIntPointer(tag,2);
82 
83   ierr = MPI_Attr_get(comm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
84   if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Bad MPI communicator supplied; must be a PETSc communicator");
85 
86   if (counter->tag < 1) {
87     ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
88     ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
89     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
90     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
91   }
92 
93   *tag = counter->tag--;
94 #if defined(PETSC_USE_DEBUG)
95   /*
96      Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
97   */
98   ierr = MPI_Barrier(comm);CHKERRQ(ierr);
99 #endif
100   PetscFunctionReturn(0);
101 }
102 
103 #undef __FUNCT__
104 #define __FUNCT__ "PetscCommDuplicate"
105 /*@C
106   PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
107 
108   Collective on MPI_Comm
109 
110   Input Parameters:
111 . comm_in - Input communicator
112 
113   Output Parameters:
114 + comm_out - Output communicator.  May be comm_in.
115 - first_tag - Tag available that has not already been used with this communicator (you may
116               pass in NULL if you do not need a tag)
117 
118   PETSc communicators are just regular MPI communicators that keep track of which
119   tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
120   a PETSc creation routine it will attach a private communicator for use in the objects communications.
121   The internal MPI_Comm is used to perform all the MPI calls for PETSc, the outer MPI_Comm is a user
122   level MPI_Comm that may be performing communication for the user or other library and so IS NOT used by PETSc.
123 
124   Level: developer
125 
126   Concepts: communicator^duplicate
127 
128 .seealso: PetscObjectGetNewTag(), PetscCommGetNewTag(), PetscCommDestroy()
129 @*/
130 PetscErrorCode  PetscCommDuplicate(MPI_Comm comm_in,MPI_Comm *comm_out,PetscMPIInt *first_tag)
131 {
132   PetscErrorCode   ierr;
133   PetscCommCounter *counter;
134   PetscMPIInt      *maxval,flg;
135 
136   PetscFunctionBegin;
137   ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr);
138   ierr = MPI_Attr_get(comm_in,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
139 
140   if (!flg) {  /* this is NOT a PETSc comm */
141     union {MPI_Comm comm; void *ptr;} ucomm;
142     /* check if this communicator has a PETSc communicator imbedded in it */
143     ierr = MPI_Attr_get(comm_in,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
144     if (!flg) {
145       /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
146       ierr = MPI_Comm_dup(comm_in,comm_out);CHKERRQ(ierr);
147       ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
148       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
149       ierr = PetscNew(&counter);CHKERRQ(ierr);
150 
151       counter->tag       = *maxval;
152       counter->refcount  = 0;
153       counter->namecount = 0;
154 
155       ierr = MPI_Attr_put(*comm_out,Petsc_Counter_keyval,counter);CHKERRQ(ierr);
156       ierr = PetscInfo3(0,"Duplicating a communicator %ld %ld max tags = %d\n",(long)comm_in,(long)*comm_out,*maxval);CHKERRQ(ierr);
157 
158       /* save PETSc communicator inside user communicator, so we can get it next time */
159       ucomm.comm = *comm_out;   /* ONLY the comm part of the union is significant. */
160       ierr = MPI_Attr_put(comm_in,Petsc_InnerComm_keyval,ucomm.ptr);CHKERRQ(ierr);
161       ucomm.comm = comm_in;
162       ierr = MPI_Attr_put(*comm_out,Petsc_OuterComm_keyval,ucomm.ptr);CHKERRQ(ierr);
163     } else {
164       *comm_out = ucomm.comm;
165       /* pull out the inner MPI_Comm and hand it back to the caller */
166       ierr = MPI_Attr_get(*comm_out,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
167       if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Inner PETSc communicator does not have its tag/name counter attribute set");
168       ierr = PetscInfo2(0,"Using internal PETSc communicator %ld %ld\n",(long)comm_in,(long)*comm_out);CHKERRQ(ierr);
169     }
170   } else *comm_out = comm_in;
171 
172 #if defined(PETSC_USE_DEBUG)
173   /*
174      Hanging here means that some processes have called PetscCommDuplicate() and others have not.
175      This likley means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
176      ALL processes that share a communicator MUST shared objects created from that communicator.
177   */
178   ierr = MPI_Barrier(comm_in);CHKERRQ(ierr);
179 #endif
180 
181   if (counter->tag < 1) {
182     ierr = PetscInfo1(0,"Out of tags for object, starting to recycle. Comm reference count %d\n",counter->refcount);CHKERRQ(ierr);
183     ierr = MPI_Attr_get(MPI_COMM_WORLD,MPI_TAG_UB,&maxval,&flg);CHKERRQ(ierr);
184     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"MPI error: MPI_Attr_get() is not returning a MPI_TAG_UB");
185     counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
186   }
187 
188   if (first_tag) *first_tag = counter->tag--;
189 
190   counter->refcount++; /* number of references to this comm */
191   ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr);
192   PetscFunctionReturn(0);
193 }
194 
195 #undef __FUNCT__
196 #define __FUNCT__ "PetscCommDestroy"
197 /*@C
198    PetscCommDestroy - Frees communicator.  Use in conjunction with PetscCommDuplicate().
199 
200    Collective on MPI_Comm
201 
202    Input Parameter:
203 .  comm - the communicator to free
204 
205    Level: developer
206 
207    Concepts: communicator^destroy
208 
209 .seealso:   PetscCommDuplicate()
210 @*/
211 PetscErrorCode  PetscCommDestroy(MPI_Comm *comm)
212 {
213   PetscErrorCode   ierr;
214   PetscCommCounter *counter;
215   PetscMPIInt      flg;
216   MPI_Comm         icomm = *comm,ocomm;
217   union {MPI_Comm comm; void *ptr;} ucomm;
218 
219   PetscFunctionBegin;
220   if (*comm == MPI_COMM_NULL) PetscFunctionReturn(0);
221   ierr = PetscSpinlockLock(&PetscCommSpinLock);CHKERRQ(ierr);
222   ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
223   if (!flg) { /* not a PETSc comm, check if it has an inner comm */
224     ierr = MPI_Attr_get(icomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
225     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"MPI_Comm does not have tag/name counter nor does it have inner MPI_Comm");
226     icomm = ucomm.comm;
227     ierr = MPI_Attr_get(icomm,Petsc_Counter_keyval,&counter,&flg);CHKERRQ(ierr);
228     if (!flg) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Inner MPI_Comm does not have expected tag/name counter, problem with corrupted memory");
229   }
230 
231   counter->refcount--;
232 
233   if (!counter->refcount) {
234     /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
235     ierr = MPI_Attr_get(icomm,Petsc_OuterComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
236     if (flg) {
237       ocomm = ucomm.comm;
238       ierr = MPI_Attr_get(ocomm,Petsc_InnerComm_keyval,&ucomm,&flg);CHKERRQ(ierr);
239       if (flg) {
240         ierr = MPI_Attr_delete(ocomm,Petsc_InnerComm_keyval);CHKERRQ(ierr);
241       } else SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_ARG_CORRUPT,"Outer MPI_Comm %ld does not have expected reference to inner comm %d, problem with corrupted memory",(long int)ocomm,(long int)icomm);
242     }
243 
244     ierr = PetscInfo1(0,"Deleting PETSc MPI_Comm %ld\n",(long)icomm);CHKERRQ(ierr);
245     ierr = MPI_Comm_free(&icomm);CHKERRQ(ierr);
246   }
247   *comm = MPI_COMM_NULL;
248   ierr = PetscSpinlockUnlock(&PetscCommSpinLock);CHKERRQ(ierr);
249   PetscFunctionReturn(0);
250 }
251 
252 #undef  __FUNCT__
253 #define __FUNCT__ "PetscObjectsListGetGlobalNumbering"
254 /*@C
255     PetscObjectsListGetGlobalNumbering - computes a global numbering
256     of PetscObjects living on subcommunicators of a given communicator.
257 
258 
259     Collective on comm.
260 
261     Input Parameters:
262 +   comm    - MPI_Comm
263 .   len     - local length of objlist
264 -   objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
265               (subcomm ordering is assumed to be deadlock-free)
266 
267     Output Parameters:
268 +   count      - global number of distinct subcommunicators on objlist (may be > len)
269 -   numbering  - global numbers of objlist entries (allocated by user)
270 
271 
272     Level: developer
273 
274     Concepts: MPI subcomm^numbering
275 
276 @*/
277 PetscErrorCode  PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
278 {
279   PetscErrorCode ierr;
280   PetscInt       i, roots, offset;
281   PetscMPIInt    size, rank;
282 
283   PetscFunctionBegin;
284   PetscValidPointer(objlist,3);
285   if (!count && !numbering) PetscFunctionReturn(0);
286 
287   ierr  = MPI_Comm_size(comm, &size);CHKERRQ(ierr);
288   ierr  = MPI_Comm_rank(comm, &rank);CHKERRQ(ierr);
289   roots = 0;
290   for (i = 0; i < len; ++i) {
291     PetscMPIInt srank;
292     ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
293     /* Am I the root of the i-th subcomm? */
294     if (!srank) ++roots;
295   }
296   if (count) {
297     /* Obtain the sum of all roots -- the global number of distinct subcomms. */
298     ierr = MPI_Allreduce(&roots,count,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
299   }
300   if (numbering) {
301     /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
302     /*
303       At each subcomm root number all of the subcomms it owns locally
304       and make it global by calculating the shift among all of the roots.
305       The roots are ordered using the comm ordering.
306     */
307     ierr    = MPI_Scan(&roots,&offset,1,MPIU_INT,MPI_SUM,comm);CHKERRQ(ierr);
308     offset -= roots;
309     /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
310     /*
311       This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
312       broadcast is collective on the subcomm.
313     */
314     roots = 0;
315     for (i = 0; i < len; ++i) {
316       PetscMPIInt srank;
317       numbering[i] = offset + roots; /* only meaningful if !srank. */
318 
319       ierr = MPI_Comm_rank(objlist[i]->comm, &srank);CHKERRQ(ierr);
320       ierr = MPI_Bcast(numbering+i,1,MPIU_INT,0,objlist[i]->comm);CHKERRQ(ierr);
321       if (!srank) ++roots;
322     }
323   }
324   PetscFunctionReturn(0);
325 }
326