Actual source code: tagm.c
1: #include <petsc/private/petscimpl.h>
2: /* ---------------------------------------------------------------- */
3: /*
4: A simple way to manage tags inside a communicator.
6: It uses the attributes to determine if a new communicator
7: is needed and to store the available tags.
9: */
11: /*@C
12: PetscObjectGetNewTag - Gets a unique new tag from a PETSc object. All
13: processors that share the object MUST call this routine EXACTLY the same
14: number of times. This tag should only be used with the current objects
15: communicator; do NOT use it with any other MPI communicator.
17: Collective on obj
19: Input Parameter:
20: . obj - the PETSc object; this must be cast with a (`PetscObject`), for example,
21: `PetscObjectGetNewTag`((`PetscObject`)mat,&tag);
23: Output Parameter:
24: . tag - the new tag
26: Level: developer
28: Note:
29: This tag is needed if one is writing MPI communication code involving message passing and needs unique MPI tags to ensure the messages are connected to this specific
30: object.
32: .seealso: `PetscCommGetNewTag()`
33: @*/
34: PetscErrorCode PetscObjectGetNewTag(PetscObject obj, PetscMPIInt *tag)
35: {
36: PetscCommGetNewTag(obj->comm, tag);
37: return 0;
38: }
40: /*@
41: PetscCommGetNewTag - Gets a unique new tag from a PETSc communicator. All
42: processors that share the communicator MUST call this routine EXACTLY the same
43: number of times. This tag should only be used with the current objects
44: communicator; do NOT use it with any other MPI communicator.
46: Collective
48: Input Parameter:
49: . comm - the MPI communicator
51: Output Parameter:
52: . tag - the new tag
54: Level: developer
56: .seealso: `PetscObjectGetNewTag()`, `PetscCommDuplicate()`
57: @*/
58: PetscErrorCode PetscCommGetNewTag(MPI_Comm comm, PetscMPIInt *tag)
59: {
60: PetscCommCounter *counter;
61: PetscMPIInt *maxval, flg;
65: MPI_Comm_get_attr(comm, Petsc_Counter_keyval, &counter, &flg);
68: if (counter->tag < 1) {
69: PetscInfo(NULL, "Out of tags for object, starting to recycle. Comm reference count %" PetscInt_FMT "\n", counter->refcount);
70: MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flg);
72: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
73: }
75: *tag = counter->tag--;
76: if (PetscDefined(USE_DEBUG)) {
77: /*
78: Hanging here means that some processes have called PetscCommGetNewTag() and others have not.
79: */
80: MPI_Barrier(comm);
81: }
82: return 0;
83: }
85: /*@C
86: PetscCommGetComm - get a new MPI communicator from a PETSc communicator that can be passed off to another package
88: Collective
90: Input Parameter:
91: . comm_in - Input communicator
93: Output Parameters:
94: . comm_out - Output communicator
96: Notes:
97: Use `PetscCommRestoreComm()` to return the communicator when the external package no longer needs it
99: Certain MPI implementations have `MPI_Comm_free()` that do not work, thus one can run out of available MPI communicators causing
100: mysterious crashes in the code after running a long time. This routine allows reusing previously obtained MPI communicators that
101: are no longer needed.
103: Level: developer
105: .seealso: `PetscObjectGetNewTag()`, `PetscCommGetNewTag()`, `PetscCommDestroy()`, `PetscCommRestoreComm()`
106: @*/
107: PetscErrorCode PetscCommGetComm(MPI_Comm comm_in, MPI_Comm *comm_out)
108: {
109: PetscCommCounter *counter;
110: PetscMPIInt flg;
112: PetscSpinlockLock(&PetscCommSpinLock);
113: MPI_Comm_get_attr(comm_in, Petsc_Counter_keyval, &counter, &flg);
116: if (counter->comms) {
117: struct PetscCommStash *pcomms = counter->comms;
119: *comm_out = pcomms->comm;
120: counter->comms = pcomms->next;
121: PetscFree(pcomms);
122: PetscInfo(NULL, "Reusing a communicator %ld %ld\n", (long)comm_in, (long)*comm_out);
123: } else {
124: MPI_Comm_dup(comm_in, comm_out);
125: }
126: PetscSpinlockUnlock(&PetscCommSpinLock);
127: return 0;
128: }
130: /*@C
131: PetscCommRestoreComm - restores an MPI communicator that was obtained with `PetscCommGetComm()`
133: Collective
135: Input Parameters:
136: + comm_in - Input communicator
137: - comm_out - returned communicator
139: Level: developer
141: .seealso: `PetscObjectGetNewTag()`, `PetscCommGetNewTag()`, `PetscCommDestroy()`, `PetscCommRestoreComm()`
142: @*/
143: PetscErrorCode PetscCommRestoreComm(MPI_Comm comm_in, MPI_Comm *comm_out)
144: {
145: PetscCommCounter *counter;
146: PetscMPIInt flg;
147: struct PetscCommStash *pcomms, *ncomm;
149: PetscSpinlockLock(&PetscCommSpinLock);
150: MPI_Comm_get_attr(comm_in, Petsc_Counter_keyval, &counter, &flg);
153: PetscMalloc(sizeof(struct PetscCommStash), &ncomm);
154: ncomm->comm = *comm_out;
155: ncomm->next = NULL;
156: pcomms = counter->comms;
157: while (pcomms && pcomms->next) pcomms = pcomms->next;
158: if (pcomms) {
159: pcomms->next = ncomm;
160: } else {
161: counter->comms = ncomm;
162: }
163: *comm_out = 0;
164: PetscSpinlockUnlock(&PetscCommSpinLock);
165: return 0;
166: }
168: /*@C
169: PetscCommDuplicate - Duplicates the communicator only if it is not already a PETSc communicator.
171: Collective
173: Input Parameter:
174: . comm_in - Input communicator
176: Output Parameters:
177: + comm_out - Output communicator. May be comm_in.
178: - first_tag - Tag available that has not already been used with this communicator (you may pass in NULL if you do not need a tag)
180: Note:
181: PETSc communicators are just regular MPI communicators that keep track of which
182: tags have been used to prevent tag conflict. If you pass a non-PETSc communicator into
183: a PETSc creation routine it will attach a private communicator for use in the objects communications.
184: The internal `MPI_Comm` is used to perform all the MPI calls for PETSc, the outer `MPI_Comm` is a user
185: level `MPI_Comm` that may be performing communication for the user or other library and so IS NOT used by PETSc.
187: Level: developer
189: .seealso: `PetscObjectGetNewTag()`, `PetscCommGetNewTag()`, `PetscCommDestroy()`
190: @*/
191: PetscErrorCode PetscCommDuplicate(MPI_Comm comm_in, MPI_Comm *comm_out, PetscMPIInt *first_tag)
192: {
193: PetscCommCounter *counter;
194: PetscMPIInt *maxval, flg;
196: PetscSpinlockLock(&PetscCommSpinLock);
197: MPI_Comm_get_attr(comm_in, Petsc_Counter_keyval, &counter, &flg);
199: if (!flg) { /* this is NOT a PETSc comm */
200: union
201: {
202: MPI_Comm comm;
203: void *ptr;
204: } ucomm;
205: /* check if this communicator has a PETSc communicator embedded in it */
206: MPI_Comm_get_attr(comm_in, Petsc_InnerComm_keyval, &ucomm, &flg);
207: if (!flg) {
208: /* This communicator is not yet known to this system, so we duplicate it and make an internal communicator */
209: MPI_Comm_dup(comm_in, comm_out);
210: MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flg);
212: PetscNew(&counter); /* all fields of counter are zero'ed */
213: counter->tag = *maxval;
214: MPI_Comm_set_attr(*comm_out, Petsc_Counter_keyval, counter);
215: PetscInfo(NULL, "Duplicating a communicator %ld %ld max tags = %d\n", (long)comm_in, (long)*comm_out, *maxval);
217: /* save PETSc communicator inside user communicator, so we can get it next time */
218: ucomm.comm = *comm_out; /* ONLY the comm part of the union is significant. */
219: MPI_Comm_set_attr(comm_in, Petsc_InnerComm_keyval, ucomm.ptr);
220: ucomm.comm = comm_in;
221: MPI_Comm_set_attr(*comm_out, Petsc_OuterComm_keyval, ucomm.ptr);
222: } else {
223: *comm_out = ucomm.comm;
224: /* pull out the inner MPI_Comm and hand it back to the caller */
225: MPI_Comm_get_attr(*comm_out, Petsc_Counter_keyval, &counter, &flg);
227: PetscInfo(NULL, "Using internal PETSc communicator %ld %ld\n", (long)comm_in, (long)*comm_out);
228: }
229: } else *comm_out = comm_in;
231: if (PetscDefined(USE_DEBUG)) {
232: /*
233: Hanging here means that some processes have called PetscCommDuplicate() and others have not.
234: This likely means that a subset of processes in a MPI_Comm have attempted to create a PetscObject!
235: ALL processes that share a communicator MUST shared objects created from that communicator.
236: */
237: MPI_Barrier(comm_in);
238: }
240: if (counter->tag < 1) {
241: PetscInfo(NULL, "Out of tags for object, starting to recycle. Comm reference count %" PetscInt_FMT "\n", counter->refcount);
242: MPI_Comm_get_attr(MPI_COMM_WORLD, MPI_TAG_UB, &maxval, &flg);
244: counter->tag = *maxval - 128; /* hope that any still active tags were issued right at the beginning of the run */
245: }
247: if (first_tag) *first_tag = counter->tag--;
249: counter->refcount++; /* number of references to this comm */
250: PetscSpinlockUnlock(&PetscCommSpinLock);
251: return 0;
252: }
254: /*@C
255: PetscCommDestroy - Frees communicator obtained with `PetscCommDuplicate()`.
257: Collective
259: Input Parameter:
260: . comm - the communicator to free
262: Level: developer
264: .seealso: `PetscCommDuplicate()`
265: @*/
266: PetscErrorCode PetscCommDestroy(MPI_Comm *comm)
267: {
268: PetscCommCounter *counter;
269: PetscMPIInt flg;
270: MPI_Comm icomm = *comm, ocomm;
271: union
272: {
273: MPI_Comm comm;
274: void *ptr;
275: } ucomm;
277: if (*comm == MPI_COMM_NULL) return 0;
278: PetscSpinlockLock(&PetscCommSpinLock);
279: MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg);
280: if (!flg) { /* not a PETSc comm, check if it has an inner comm */
281: MPI_Comm_get_attr(icomm, Petsc_InnerComm_keyval, &ucomm, &flg);
283: icomm = ucomm.comm;
284: MPI_Comm_get_attr(icomm, Petsc_Counter_keyval, &counter, &flg);
286: }
288: counter->refcount--;
290: if (!counter->refcount) {
291: /* if MPI_Comm has outer comm then remove reference to inner MPI_Comm from outer MPI_Comm */
292: MPI_Comm_get_attr(icomm, Petsc_OuterComm_keyval, &ucomm, &flg);
293: if (flg) {
294: ocomm = ucomm.comm;
295: MPI_Comm_get_attr(ocomm, Petsc_InnerComm_keyval, &ucomm, &flg);
297: MPI_Comm_delete_attr(ocomm, Petsc_InnerComm_keyval);
298: }
300: PetscInfo(NULL, "Deleting PETSc MPI_Comm %ld\n", (long)icomm);
301: MPI_Comm_free(&icomm);
302: }
303: *comm = MPI_COMM_NULL;
304: PetscSpinlockUnlock(&PetscCommSpinLock);
305: return 0;
306: }
308: /*@C
309: PetscObjectsListGetGlobalNumbering - computes a global numbering
310: of `PetscObject`s living on subcommunicators of a given communicator.
312: Collective.
314: Input Parameters:
315: + comm - the `MPI_Comm`
316: . len - local length of objlist
317: - objlist - a list of PETSc objects living on subcomms of comm and containing this comm rank
318: (subcomm ordering is assumed to be deadlock-free)
320: Output Parameters:
321: + count - global number of distinct subcommunicators on objlist (may be > len)
322: - numbering - global numbers of objlist entries (allocated by user)
324: Level: developer
326: Note:
327: This is needed when PETSc is used with certain languages that do garbage collection to manage object life cycles.
329: @*/
330: PetscErrorCode PetscObjectsListGetGlobalNumbering(MPI_Comm comm, PetscInt len, PetscObject *objlist, PetscInt *count, PetscInt *numbering)
331: {
332: PetscInt i, roots, offset;
333: PetscMPIInt size, rank;
336: if (!count && !numbering) return 0;
338: MPI_Comm_size(comm, &size);
339: MPI_Comm_rank(comm, &rank);
340: roots = 0;
341: for (i = 0; i < len; ++i) {
342: PetscMPIInt srank;
343: MPI_Comm_rank(objlist[i]->comm, &srank);
344: /* Am I the root of the i-th subcomm? */
345: if (!srank) ++roots;
346: }
347: if (count) {
348: /* Obtain the sum of all roots -- the global number of distinct subcomms. */
349: MPIU_Allreduce(&roots, count, 1, MPIU_INT, MPI_SUM, comm);
350: }
351: if (numbering) {
352: /* Introduce a global numbering for subcomms, initially known only by subcomm roots. */
353: /*
354: At each subcomm root number all of the subcomms it owns locally
355: and make it global by calculating the shift among all of the roots.
356: The roots are ordered using the comm ordering.
357: */
358: MPI_Scan(&roots, &offset, 1, MPIU_INT, MPI_SUM, comm);
359: offset -= roots;
360: /* Now we are ready to broadcast global subcomm numbers within each subcomm.*/
361: /*
362: This is where the assumption of a deadlock-free ordering of the subcomms is assumed:
363: broadcast is collective on the subcomm.
364: */
365: roots = 0;
366: for (i = 0; i < len; ++i) {
367: PetscMPIInt srank;
368: numbering[i] = offset + roots; /* only meaningful if !srank. */
370: MPI_Comm_rank(objlist[i]->comm, &srank);
371: MPI_Bcast(numbering + i, 1, MPIU_INT, 0, objlist[i]->comm);
372: if (!srank) ++roots;
373: }
374: }
375: return 0;
376: }