Actual source code: sfutils.c
1: #include <petsc/private/sfimpl.h>
2: #include <petsc/private/sectionimpl.h>
4: /*@
5: PetscSFSetGraphLayout - Set a parallel star forest via global indices and a PetscLayout
7: Collective
9: Input Parameters:
10: + sf - star forest
11: . layout - PetscLayout defining the global space for roots
12: . nleaves - number of leaf vertices on the current process, each of these references a root on any process
13: . ilocal - locations of leaves in leafdata buffers, pass NULL for contiguous storage
14: . localmode - copy mode for ilocal
15: - gremote - root vertices in global numbering corresponding to leaves in ilocal
17: Level: intermediate
19: Notes:
20: Global indices must lie in [0, N) where N is the global size of layout.
21: Leaf indices in ilocal get sorted; this means the user-provided array gets sorted if localmode is PETSC_OWN_POINTER.
23: Developer Notes:
24: Local indices which are the identity permutation in the range [0,nleaves) are discarded as they
25: encode contiguous storage. In such case, if localmode is PETSC_OWN_POINTER, the memory is deallocated as it is not
26: needed
28: .seealso: `PetscSFGetGraphLayout()`, `PetscSFCreate()`, `PetscSFView()`, `PetscSFSetGraph()`, `PetscSFGetGraph()`
29: @*/
30: PetscErrorCode PetscSFSetGraphLayout(PetscSF sf, PetscLayout layout, PetscInt nleaves, PetscInt *ilocal, PetscCopyMode localmode, const PetscInt *gremote)
31: {
32: const PetscInt *range;
33: PetscInt i, nroots, ls = -1, ln = -1;
34: PetscMPIInt lr = -1;
35: PetscSFNode *remote;
37: PetscLayoutGetLocalSize(layout, &nroots);
38: PetscLayoutGetRanges(layout, &range);
39: PetscMalloc1(nleaves, &remote);
40: if (nleaves) ls = gremote[0] + 1;
41: for (i = 0; i < nleaves; i++) {
42: const PetscInt idx = gremote[i] - ls;
43: if (idx < 0 || idx >= ln) { /* short-circuit the search */
44: PetscLayoutFindOwnerIndex(layout, gremote[i], &lr, &remote[i].index);
45: remote[i].rank = lr;
46: ls = range[lr];
47: ln = range[lr + 1] - ls;
48: } else {
49: remote[i].rank = lr;
50: remote[i].index = idx;
51: }
52: }
53: PetscSFSetGraph(sf, nroots, nleaves, ilocal, localmode, remote, PETSC_OWN_POINTER);
54: return 0;
55: }
57: /*@C
58: PetscSFGetGraphLayout - Get the global indices and PetscLayout that describe this star forest
60: Collective
62: Input Parameter:
63: . sf - star forest
65: Output Parameters:
66: + layout - PetscLayout defining the global space for roots
67: . nleaves - number of leaf vertices on the current process, each of these references a root on any process
68: . ilocal - locations of leaves in leafdata buffers, or NULL for contiguous storage
69: - gremote - root vertices in global numbering corresponding to leaves in ilocal
71: Level: intermediate
73: Notes:
74: The outputs are such that passing them as inputs to `PetscSFSetGraphLayout()` would lead to the same star forest.
75: The outputs layout and gremote are freshly created each time this function is called,
76: so they need to be freed by user and cannot be qualified as const.
78: .seealso: `PetscSFSetGraphLayout()`, `PetscSFCreate()`, `PetscSFView()`, `PetscSFSetGraph()`, `PetscSFGetGraph()`
79: @*/
80: PetscErrorCode PetscSFGetGraphLayout(PetscSF sf, PetscLayout *layout, PetscInt *nleaves, const PetscInt *ilocal[], PetscInt *gremote[])
81: {
82: PetscInt nr, nl;
83: const PetscSFNode *ir;
84: PetscLayout lt;
86: PetscSFGetGraph(sf, &nr, &nl, ilocal, &ir);
87: PetscLayoutCreateFromSizes(PetscObjectComm((PetscObject)sf), nr, PETSC_DECIDE, 1, <);
88: if (gremote) {
89: PetscInt i;
90: const PetscInt *range;
91: PetscInt *gr;
93: PetscLayoutGetRanges(lt, &range);
94: PetscMalloc1(nl, &gr);
95: for (i = 0; i < nl; i++) gr[i] = range[ir[i].rank] + ir[i].index;
96: *gremote = gr;
97: }
98: if (nleaves) *nleaves = nl;
99: if (layout) *layout = lt;
100: else PetscLayoutDestroy(<);
101: return 0;
102: }
104: /*@
105: PetscSFSetGraphSection - Sets the PetscSF graph encoding the parallel dof overlap based upon the PetscSections
106: describing the data layout.
108: Input Parameters:
109: + sf - The SF
110: . localSection - PetscSection describing the local data layout
111: - globalSection - PetscSection describing the global data layout
113: Level: developer
115: .seealso: `PetscSFSetGraph()`, `PetscSFSetGraphLayout()`
116: @*/
117: PetscErrorCode PetscSFSetGraphSection(PetscSF sf, PetscSection localSection, PetscSection globalSection)
118: {
119: MPI_Comm comm;
120: PetscLayout layout;
121: const PetscInt *ranges;
122: PetscInt *local;
123: PetscSFNode *remote;
124: PetscInt pStart, pEnd, p, nroots, nleaves = 0, l;
125: PetscMPIInt size, rank;
131: PetscObjectGetComm((PetscObject)sf, &comm);
132: MPI_Comm_size(comm, &size);
133: MPI_Comm_rank(comm, &rank);
134: PetscSectionGetChart(globalSection, &pStart, &pEnd);
135: PetscSectionGetConstrainedStorageSize(globalSection, &nroots);
136: PetscLayoutCreate(comm, &layout);
137: PetscLayoutSetBlockSize(layout, 1);
138: PetscLayoutSetLocalSize(layout, nroots);
139: PetscLayoutSetUp(layout);
140: PetscLayoutGetRanges(layout, &ranges);
141: for (p = pStart; p < pEnd; ++p) {
142: PetscInt gdof, gcdof;
144: PetscSectionGetDof(globalSection, p, &gdof);
145: PetscSectionGetConstraintDof(globalSection, p, &gcdof);
147: nleaves += gdof < 0 ? -(gdof + 1) - gcdof : gdof - gcdof;
148: }
149: PetscMalloc1(nleaves, &local);
150: PetscMalloc1(nleaves, &remote);
151: for (p = pStart, l = 0; p < pEnd; ++p) {
152: const PetscInt *cind;
153: PetscInt dof, cdof, off, gdof, gcdof, goff, gsize, d, c;
155: PetscSectionGetDof(localSection, p, &dof);
156: PetscSectionGetOffset(localSection, p, &off);
157: PetscSectionGetConstraintDof(localSection, p, &cdof);
158: PetscSectionGetConstraintIndices(localSection, p, &cind);
159: PetscSectionGetDof(globalSection, p, &gdof);
160: PetscSectionGetConstraintDof(globalSection, p, &gcdof);
161: PetscSectionGetOffset(globalSection, p, &goff);
162: if (!gdof) continue; /* Censored point */
163: gsize = gdof < 0 ? -(gdof + 1) - gcdof : gdof - gcdof;
164: if (gsize != dof - cdof) {
166: cdof = 0; /* Ignore constraints */
167: }
168: for (d = 0, c = 0; d < dof; ++d) {
169: if ((c < cdof) && (cind[c] == d)) {
170: ++c;
171: continue;
172: }
173: local[l + d - c] = off + d;
174: }
176: if (gdof < 0) {
177: for (d = 0; d < gsize; ++d, ++l) {
178: PetscInt offset = -(goff + 1) + d, r;
180: PetscFindInt(offset, size + 1, ranges, &r);
181: if (r < 0) r = -(r + 2);
183: remote[l].rank = r;
184: remote[l].index = offset - ranges[r];
185: }
186: } else {
187: for (d = 0; d < gsize; ++d, ++l) {
188: remote[l].rank = rank;
189: remote[l].index = goff + d - ranges[rank];
190: }
191: }
192: }
194: PetscLayoutDestroy(&layout);
195: PetscSFSetGraph(sf, nroots, nleaves, local, PETSC_OWN_POINTER, remote, PETSC_OWN_POINTER);
196: return 0;
197: }
199: /*@C
200: PetscSFDistributeSection - Create a new PetscSection reorganized, moving from the root to the leaves of the SF
202: Collective on sf
204: Input Parameters:
205: + sf - The SF
206: - rootSection - Section defined on root space
208: Output Parameters:
209: + remoteOffsets - root offsets in leaf storage, or NULL
210: - leafSection - Section defined on the leaf space
212: Level: advanced
214: .seealso: `PetscSFCreate()`
215: @*/
216: PetscErrorCode PetscSFDistributeSection(PetscSF sf, PetscSection rootSection, PetscInt **remoteOffsets, PetscSection leafSection)
217: {
218: PetscSF embedSF;
219: const PetscInt *indices;
220: IS selected;
221: PetscInt numFields, nroots, rpStart, rpEnd, lpStart = PETSC_MAX_INT, lpEnd = -1, f, c;
222: PetscBool *sub, hasc;
224: PetscLogEventBegin(PETSCSF_DistSect, sf, 0, 0, 0);
225: PetscSectionGetNumFields(rootSection, &numFields);
226: if (numFields) {
227: IS perm;
229: /* PetscSectionSetNumFields() calls PetscSectionReset(), which destroys
230: leafSection->perm. To keep this permutation set by the user, we grab
231: the reference before calling PetscSectionSetNumFields() and set it
232: back after. */
233: PetscSectionGetPermutation(leafSection, &perm);
234: PetscObjectReference((PetscObject)perm);
235: PetscSectionSetNumFields(leafSection, numFields);
236: PetscSectionSetPermutation(leafSection, perm);
237: ISDestroy(&perm);
238: }
239: PetscMalloc1(numFields + 2, &sub);
240: sub[1] = rootSection->bc ? PETSC_TRUE : PETSC_FALSE;
241: for (f = 0; f < numFields; ++f) {
242: PetscSectionSym sym, dsym = NULL;
243: const char *name = NULL;
244: PetscInt numComp = 0;
246: sub[2 + f] = rootSection->field[f]->bc ? PETSC_TRUE : PETSC_FALSE;
247: PetscSectionGetFieldComponents(rootSection, f, &numComp);
248: PetscSectionGetFieldName(rootSection, f, &name);
249: PetscSectionGetFieldSym(rootSection, f, &sym);
250: if (sym) PetscSectionSymDistribute(sym, sf, &dsym);
251: PetscSectionSetFieldComponents(leafSection, f, numComp);
252: PetscSectionSetFieldName(leafSection, f, name);
253: PetscSectionSetFieldSym(leafSection, f, dsym);
254: PetscSectionSymDestroy(&dsym);
255: for (c = 0; c < rootSection->numFieldComponents[f]; ++c) {
256: PetscSectionGetComponentName(rootSection, f, c, &name);
257: PetscSectionSetComponentName(leafSection, f, c, name);
258: }
259: }
260: PetscSectionGetChart(rootSection, &rpStart, &rpEnd);
261: PetscSFGetGraph(sf, &nroots, NULL, NULL, NULL);
262: rpEnd = PetscMin(rpEnd, nroots);
263: rpEnd = PetscMax(rpStart, rpEnd);
264: /* see if we can avoid creating the embedded SF, since it can cost more than an allreduce */
265: sub[0] = (PetscBool)(nroots != rpEnd - rpStart);
266: MPIU_Allreduce(MPI_IN_PLACE, sub, 2 + numFields, MPIU_BOOL, MPI_LOR, PetscObjectComm((PetscObject)sf));
267: if (sub[0]) {
268: ISCreateStride(PETSC_COMM_SELF, rpEnd - rpStart, rpStart, 1, &selected);
269: ISGetIndices(selected, &indices);
270: PetscSFCreateEmbeddedRootSF(sf, rpEnd - rpStart, indices, &embedSF);
271: ISRestoreIndices(selected, &indices);
272: ISDestroy(&selected);
273: } else {
274: PetscObjectReference((PetscObject)sf);
275: embedSF = sf;
276: }
277: PetscSFGetLeafRange(embedSF, &lpStart, &lpEnd);
278: lpEnd++;
280: PetscSectionSetChart(leafSection, lpStart, lpEnd);
282: /* Constrained dof section */
283: hasc = sub[1];
284: for (f = 0; f < numFields; ++f) hasc = (PetscBool)(hasc || sub[2 + f]);
286: /* Could fuse these at the cost of copies and extra allocation */
287: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasDof[-rpStart], &leafSection->atlasDof[-lpStart], MPI_REPLACE);
288: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasDof[-rpStart], &leafSection->atlasDof[-lpStart], MPI_REPLACE);
289: if (sub[1]) {
290: PetscSectionCheckConstraints_Private(rootSection);
291: PetscSectionCheckConstraints_Private(leafSection);
292: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->bc->atlasDof[-rpStart], &leafSection->bc->atlasDof[-lpStart], MPI_REPLACE);
293: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->bc->atlasDof[-rpStart], &leafSection->bc->atlasDof[-lpStart], MPI_REPLACE);
294: }
295: for (f = 0; f < numFields; ++f) {
296: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->atlasDof[-rpStart], &leafSection->field[f]->atlasDof[-lpStart], MPI_REPLACE);
297: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->atlasDof[-rpStart], &leafSection->field[f]->atlasDof[-lpStart], MPI_REPLACE);
298: if (sub[2 + f]) {
299: PetscSectionCheckConstraints_Private(rootSection->field[f]);
300: PetscSectionCheckConstraints_Private(leafSection->field[f]);
301: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasDof[-rpStart], &leafSection->field[f]->bc->atlasDof[-lpStart], MPI_REPLACE);
302: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasDof[-rpStart], &leafSection->field[f]->bc->atlasDof[-lpStart], MPI_REPLACE);
303: }
304: }
305: if (remoteOffsets) {
306: PetscMalloc1(lpEnd - lpStart, remoteOffsets);
307: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
308: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
309: }
310: PetscSectionInvalidateMaxDof_Internal(leafSection);
311: PetscSectionSetUp(leafSection);
312: if (hasc) { /* need to communicate bcIndices */
313: PetscSF bcSF;
314: PetscInt *rOffBc;
316: PetscMalloc1(lpEnd - lpStart, &rOffBc);
317: if (sub[1]) {
318: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
319: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
320: PetscSFCreateSectionSF(embedSF, rootSection->bc, rOffBc, leafSection->bc, &bcSF);
321: PetscSFBcastBegin(bcSF, MPIU_INT, rootSection->bcIndices, leafSection->bcIndices, MPI_REPLACE);
322: PetscSFBcastEnd(bcSF, MPIU_INT, rootSection->bcIndices, leafSection->bcIndices, MPI_REPLACE);
323: PetscSFDestroy(&bcSF);
324: }
325: for (f = 0; f < numFields; ++f) {
326: if (sub[2 + f]) {
327: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
328: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
329: PetscSFCreateSectionSF(embedSF, rootSection->field[f]->bc, rOffBc, leafSection->field[f]->bc, &bcSF);
330: PetscSFBcastBegin(bcSF, MPIU_INT, rootSection->field[f]->bcIndices, leafSection->field[f]->bcIndices, MPI_REPLACE);
331: PetscSFBcastEnd(bcSF, MPIU_INT, rootSection->field[f]->bcIndices, leafSection->field[f]->bcIndices, MPI_REPLACE);
332: PetscSFDestroy(&bcSF);
333: }
334: }
335: PetscFree(rOffBc);
336: }
337: PetscSFDestroy(&embedSF);
338: PetscFree(sub);
339: PetscLogEventEnd(PETSCSF_DistSect, sf, 0, 0, 0);
340: return 0;
341: }
343: /*@C
344: PetscSFCreateRemoteOffsets - Create offsets for point data on remote processes
346: Collective on sf
348: Input Parameters:
349: + sf - The SF
350: . rootSection - Data layout of remote points for outgoing data (this is layout for SF roots)
351: - leafSection - Data layout of local points for incoming data (this is layout for SF leaves)
353: Output Parameter:
354: . remoteOffsets - Offsets for point data on remote processes (these are offsets from the root section), or NULL
356: Level: developer
358: .seealso: `PetscSFCreate()`
359: @*/
360: PetscErrorCode PetscSFCreateRemoteOffsets(PetscSF sf, PetscSection rootSection, PetscSection leafSection, PetscInt **remoteOffsets)
361: {
362: PetscSF embedSF;
363: const PetscInt *indices;
364: IS selected;
365: PetscInt numRoots, rpStart = 0, rpEnd = 0, lpStart = 0, lpEnd = 0;
367: *remoteOffsets = NULL;
368: PetscSFGetGraph(sf, &numRoots, NULL, NULL, NULL);
369: if (numRoots < 0) return 0;
370: PetscLogEventBegin(PETSCSF_RemoteOff, sf, 0, 0, 0);
371: PetscSectionGetChart(rootSection, &rpStart, &rpEnd);
372: PetscSectionGetChart(leafSection, &lpStart, &lpEnd);
373: ISCreateStride(PETSC_COMM_SELF, rpEnd - rpStart, rpStart, 1, &selected);
374: ISGetIndices(selected, &indices);
375: PetscSFCreateEmbeddedRootSF(sf, rpEnd - rpStart, indices, &embedSF);
376: ISRestoreIndices(selected, &indices);
377: ISDestroy(&selected);
378: PetscCalloc1(lpEnd - lpStart, remoteOffsets);
379: PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
380: PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
381: PetscSFDestroy(&embedSF);
382: PetscLogEventEnd(PETSCSF_RemoteOff, sf, 0, 0, 0);
383: return 0;
384: }
386: /*@C
387: PetscSFCreateSectionSF - Create an expanded SF of dofs, assuming the input SF relates points
389: Collective on sf
391: Input Parameters:
392: + sf - The SF
393: . rootSection - Data layout of remote points for outgoing data (this is usually the serial section)
394: . remoteOffsets - Offsets for point data on remote processes (these are offsets from the root section), or NULL
395: - leafSection - Data layout of local points for incoming data (this is the distributed section)
397: Output Parameters:
398: - sectionSF - The new SF
400: Note: Either rootSection or remoteOffsets can be specified
402: Level: advanced
404: .seealso: `PetscSFCreate()`
405: @*/
406: PetscErrorCode PetscSFCreateSectionSF(PetscSF sf, PetscSection rootSection, PetscInt remoteOffsets[], PetscSection leafSection, PetscSF *sectionSF)
407: {
408: MPI_Comm comm;
409: const PetscInt *localPoints;
410: const PetscSFNode *remotePoints;
411: PetscInt lpStart, lpEnd;
412: PetscInt numRoots, numSectionRoots, numPoints, numIndices = 0;
413: PetscInt *localIndices;
414: PetscSFNode *remoteIndices;
415: PetscInt i, ind;
422: PetscObjectGetComm((PetscObject)sf, &comm);
423: PetscSFCreate(comm, sectionSF);
424: PetscSectionGetChart(leafSection, &lpStart, &lpEnd);
425: PetscSectionGetStorageSize(rootSection, &numSectionRoots);
426: PetscSFGetGraph(sf, &numRoots, &numPoints, &localPoints, &remotePoints);
427: if (numRoots < 0) return 0;
428: PetscLogEventBegin(PETSCSF_SectSF, sf, 0, 0, 0);
429: for (i = 0; i < numPoints; ++i) {
430: PetscInt localPoint = localPoints ? localPoints[i] : i;
431: PetscInt dof;
433: if ((localPoint >= lpStart) && (localPoint < lpEnd)) {
434: PetscSectionGetDof(leafSection, localPoint, &dof);
435: numIndices += dof < 0 ? 0 : dof;
436: }
437: }
438: PetscMalloc1(numIndices, &localIndices);
439: PetscMalloc1(numIndices, &remoteIndices);
440: /* Create new index graph */
441: for (i = 0, ind = 0; i < numPoints; ++i) {
442: PetscInt localPoint = localPoints ? localPoints[i] : i;
443: PetscInt rank = remotePoints[i].rank;
445: if ((localPoint >= lpStart) && (localPoint < lpEnd)) {
446: PetscInt remoteOffset = remoteOffsets[localPoint - lpStart];
447: PetscInt loff, dof, d;
449: PetscSectionGetOffset(leafSection, localPoint, &loff);
450: PetscSectionGetDof(leafSection, localPoint, &dof);
451: for (d = 0; d < dof; ++d, ++ind) {
452: localIndices[ind] = loff + d;
453: remoteIndices[ind].rank = rank;
454: remoteIndices[ind].index = remoteOffset + d;
455: }
456: }
457: }
459: PetscSFSetGraph(*sectionSF, numSectionRoots, numIndices, localIndices, PETSC_OWN_POINTER, remoteIndices, PETSC_OWN_POINTER);
460: PetscSFSetUp(*sectionSF);
461: PetscLogEventEnd(PETSCSF_SectSF, sf, 0, 0, 0);
462: return 0;
463: }
465: /*@C
466: PetscSFCreateFromLayouts - Creates a parallel star forest mapping two PetscLayout objects
468: Collective
470: Input Parameters:
471: + rmap - PetscLayout defining the global root space
472: - lmap - PetscLayout defining the global leaf space
474: Output Parameter:
475: . sf - The parallel star forest
477: Level: intermediate
479: .seealso: `PetscSFCreate()`, `PetscLayoutCreate()`, `PetscSFSetGraphLayout()`
480: @*/
481: PetscErrorCode PetscSFCreateFromLayouts(PetscLayout rmap, PetscLayout lmap, PetscSF *sf)
482: {
483: PetscInt i, nroots, nleaves = 0;
484: PetscInt rN, lst, len;
485: PetscMPIInt owner = -1;
486: PetscSFNode *remote;
487: MPI_Comm rcomm = rmap->comm;
488: MPI_Comm lcomm = lmap->comm;
489: PetscMPIInt flg;
494: MPI_Comm_compare(rcomm, lcomm, &flg);
496: PetscSFCreate(rcomm, sf);
497: PetscLayoutGetLocalSize(rmap, &nroots);
498: PetscLayoutGetSize(rmap, &rN);
499: PetscLayoutGetRange(lmap, &lst, &len);
500: PetscMalloc1(len - lst, &remote);
501: for (i = lst; i < len && i < rN; i++) {
502: if (owner < -1 || i >= rmap->range[owner + 1]) PetscLayoutFindOwner(rmap, i, &owner);
503: remote[nleaves].rank = owner;
504: remote[nleaves].index = i - rmap->range[owner];
505: nleaves++;
506: }
507: PetscSFSetGraph(*sf, nroots, nleaves, NULL, PETSC_OWN_POINTER, remote, PETSC_COPY_VALUES);
508: PetscFree(remote);
509: return 0;
510: }
512: /* TODO: handle nooffprocentries like MatZeroRowsMapLocal_Private, since this code is the same */
513: PetscErrorCode PetscLayoutMapLocal(PetscLayout map, PetscInt N, const PetscInt idxs[], PetscInt *on, PetscInt **oidxs, PetscInt **ogidxs)
514: {
515: PetscInt *owners = map->range;
516: PetscInt n = map->n;
517: PetscSF sf;
518: PetscInt *lidxs, *work = NULL;
519: PetscSFNode *ridxs;
520: PetscMPIInt rank, p = 0;
521: PetscInt r, len = 0;
523: if (on) *on = 0; /* squelch -Wmaybe-uninitialized */
524: /* Create SF where leaves are input idxs and roots are owned idxs */
525: MPI_Comm_rank(map->comm, &rank);
526: PetscMalloc1(n, &lidxs);
527: for (r = 0; r < n; ++r) lidxs[r] = -1;
528: PetscMalloc1(N, &ridxs);
529: for (r = 0; r < N; ++r) {
530: const PetscInt idx = idxs[r];
532: if (idx < owners[p] || owners[p + 1] <= idx) { /* short-circuit the search if the last p owns this idx too */
533: PetscLayoutFindOwner(map, idx, &p);
534: }
535: ridxs[r].rank = p;
536: ridxs[r].index = idxs[r] - owners[p];
537: }
538: PetscSFCreate(map->comm, &sf);
539: PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, ridxs, PETSC_OWN_POINTER);
540: PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *)idxs, lidxs, MPI_LOR);
541: PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *)idxs, lidxs, MPI_LOR);
542: if (ogidxs) { /* communicate global idxs */
543: PetscInt cum = 0, start, *work2;
545: PetscMalloc1(n, &work);
546: PetscCalloc1(N, &work2);
547: for (r = 0; r < N; ++r)
548: if (idxs[r] >= 0) cum++;
549: MPI_Scan(&cum, &start, 1, MPIU_INT, MPI_SUM, map->comm);
550: start -= cum;
551: cum = 0;
552: for (r = 0; r < N; ++r)
553: if (idxs[r] >= 0) work2[r] = start + cum++;
554: PetscSFReduceBegin(sf, MPIU_INT, work2, work, MPI_REPLACE);
555: PetscSFReduceEnd(sf, MPIU_INT, work2, work, MPI_REPLACE);
556: PetscFree(work2);
557: }
558: PetscSFDestroy(&sf);
559: /* Compress and put in indices */
560: for (r = 0; r < n; ++r)
561: if (lidxs[r] >= 0) {
562: if (work) work[len] = work[r];
563: lidxs[len++] = r;
564: }
565: if (on) *on = len;
566: if (oidxs) *oidxs = lidxs;
567: if (ogidxs) *ogidxs = work;
568: return 0;
569: }
571: /*@
572: PetscSFCreateByMatchingIndices - Create SF by matching root and leaf indices
574: Collective
576: Input Parameters:
577: + layout - PetscLayout defining the global index space and the rank that brokers each index
578: . numRootIndices - size of rootIndices
579: . rootIndices - PetscInt array of global indices of which this process requests ownership
580: . rootLocalIndices - root local index permutation (NULL if no permutation)
581: . rootLocalOffset - offset to be added to root local indices
582: . numLeafIndices - size of leafIndices
583: . leafIndices - PetscInt array of global indices with which this process requires data associated
584: . leafLocalIndices - leaf local index permutation (NULL if no permutation)
585: - leafLocalOffset - offset to be added to leaf local indices
587: Output Parameters:
588: + sfA - star forest representing the communication pattern from the layout space to the leaf space (NULL if not needed)
589: - sf - star forest representing the communication pattern from the root space to the leaf space
591: Example 1:
592: $
593: $ rank : 0 1 2
594: $ rootIndices : [1 0 2] [3] [3]
595: $ rootLocalOffset : 100 200 300
596: $ layout : [0 1] [2] [3]
597: $ leafIndices : [0] [2] [0 3]
598: $ leafLocalOffset : 400 500 600
599: $
600: would build the following SF
601: $
602: $ [0] 400 <- (0,101)
603: $ [1] 500 <- (0,102)
604: $ [2] 600 <- (0,101)
605: $ [2] 601 <- (2,300)
606: $
607: Example 2:
608: $
609: $ rank : 0 1 2
610: $ rootIndices : [1 0 2] [3] [3]
611: $ rootLocalOffset : 100 200 300
612: $ layout : [0 1] [2] [3]
613: $ leafIndices : rootIndices rootIndices rootIndices
614: $ leafLocalOffset : rootLocalOffset rootLocalOffset rootLocalOffset
615: $
616: would build the following SF
617: $
618: $ [1] 200 <- (2,300)
619: $
620: Example 3:
621: $
622: $ No process requests ownership of global index 1, but no process needs it.
623: $
624: $ rank : 0 1 2
625: $ numRootIndices : 2 1 1
626: $ rootIndices : [0 2] [3] [3]
627: $ rootLocalOffset : 100 200 300
628: $ layout : [0 1] [2] [3]
629: $ numLeafIndices : 1 1 2
630: $ leafIndices : [0] [2] [0 3]
631: $ leafLocalOffset : 400 500 600
632: $
633: would build the following SF
634: $
635: $ [0] 400 <- (0,100)
636: $ [1] 500 <- (0,101)
637: $ [2] 600 <- (0,100)
638: $ [2] 601 <- (2,300)
639: $
641: Notes:
642: The layout parameter represents any partitioning of [0, N), where N is the total number of global indices, and its
643: local size can be set to PETSC_DECIDE.
644: If a global index x lies in the partition owned by process i, each process whose rootIndices contains x requests
645: ownership of x and sends its own rank and the local index of x to process i.
646: If multiple processes request ownership of x, the one with the highest rank is to own x.
647: Process i then broadcasts the ownership information, so that each process whose leafIndices contains x knows the
648: ownership information of x.
649: The output sf is constructed by associating each leaf point to a root point in this way.
651: Suppose there is point data ordered according to the global indices and partitioned according to the given layout.
652: The optional output PetscSF object sfA can be used to push such data to leaf points.
654: All indices in rootIndices and leafIndices must lie in the layout range. The union (over all processes) of rootIndices
655: must cover that of leafIndices, but need not cover the entire layout.
657: If (leafIndices, leafLocalIndices, leafLocalOffset) == (rootIndices, rootLocalIndices, rootLocalOffset), the output
658: star forest is almost identity, so will only include non-trivial part of the map.
660: Developer Notes:
661: Current approach of a process of the highest rank gaining the ownership may cause load imbalance; consider using
662: hash(rank, root_local_index) as the bid for the ownership determination.
664: Level: advanced
666: .seealso: `PetscSFCreate()`
667: @*/
668: PetscErrorCode PetscSFCreateByMatchingIndices(PetscLayout layout, PetscInt numRootIndices, const PetscInt *rootIndices, const PetscInt *rootLocalIndices, PetscInt rootLocalOffset, PetscInt numLeafIndices, const PetscInt *leafIndices, const PetscInt *leafLocalIndices, PetscInt leafLocalOffset, PetscSF *sfA, PetscSF *sf)
669: {
670: MPI_Comm comm = layout->comm;
671: PetscMPIInt size, rank;
672: PetscSF sf1;
673: PetscSFNode *owners, *buffer, *iremote;
674: PetscInt *ilocal, nleaves, N, n, i;
675: #if defined(PETSC_USE_DEBUG)
676: PetscInt N1;
677: #endif
678: PetscBool flag;
688: MPI_Comm_size(comm, &size);
689: MPI_Comm_rank(comm, &rank);
690: PetscLayoutSetUp(layout);
691: PetscLayoutGetSize(layout, &N);
692: PetscLayoutGetLocalSize(layout, &n);
693: flag = (PetscBool)(leafIndices == rootIndices);
694: MPIU_Allreduce(MPI_IN_PLACE, &flag, 1, MPIU_BOOL, MPI_LAND, comm);
696: #if defined(PETSC_USE_DEBUG)
697: N1 = PETSC_MIN_INT;
698: for (i = 0; i < numRootIndices; i++)
699: if (rootIndices[i] > N1) N1 = rootIndices[i];
700: MPI_Allreduce(MPI_IN_PLACE, &N1, 1, MPIU_INT, MPI_MAX, comm);
702: if (!flag) {
703: N1 = PETSC_MIN_INT;
704: for (i = 0; i < numLeafIndices; i++)
705: if (leafIndices[i] > N1) N1 = leafIndices[i];
706: MPI_Allreduce(MPI_IN_PLACE, &N1, 1, MPIU_INT, MPI_MAX, comm);
708: }
709: #endif
710: /* Reduce: owners -> buffer */
711: PetscMalloc1(n, &buffer);
712: PetscSFCreate(comm, &sf1);
713: PetscSFSetFromOptions(sf1);
714: PetscSFSetGraphLayout(sf1, layout, numRootIndices, NULL, PETSC_OWN_POINTER, rootIndices);
715: PetscMalloc1(numRootIndices, &owners);
716: for (i = 0; i < numRootIndices; ++i) {
717: owners[i].rank = rank;
718: owners[i].index = rootLocalOffset + (rootLocalIndices ? rootLocalIndices[i] : i);
719: }
720: for (i = 0; i < n; ++i) {
721: buffer[i].index = -1;
722: buffer[i].rank = -1;
723: }
724: PetscSFReduceBegin(sf1, MPIU_2INT, owners, buffer, MPI_MAXLOC);
725: PetscSFReduceEnd(sf1, MPIU_2INT, owners, buffer, MPI_MAXLOC);
726: /* Bcast: buffer -> owners */
727: if (!flag) {
728: /* leafIndices is different from rootIndices */
729: PetscFree(owners);
730: PetscSFSetGraphLayout(sf1, layout, numLeafIndices, NULL, PETSC_OWN_POINTER, leafIndices);
731: PetscMalloc1(numLeafIndices, &owners);
732: }
733: PetscSFBcastBegin(sf1, MPIU_2INT, buffer, owners, MPI_REPLACE);
734: PetscSFBcastEnd(sf1, MPIU_2INT, buffer, owners, MPI_REPLACE);
736: PetscFree(buffer);
737: if (sfA) {
738: *sfA = sf1;
739: } else PetscSFDestroy(&sf1);
740: /* Create sf */
741: if (flag && rootLocalIndices == leafLocalIndices && leafLocalOffset == rootLocalOffset) {
742: /* leaf space == root space */
743: for (i = 0, nleaves = 0; i < numLeafIndices; ++i)
744: if (owners[i].rank != rank) ++nleaves;
745: PetscMalloc1(nleaves, &ilocal);
746: PetscMalloc1(nleaves, &iremote);
747: for (i = 0, nleaves = 0; i < numLeafIndices; ++i) {
748: if (owners[i].rank != rank) {
749: ilocal[nleaves] = leafLocalOffset + i;
750: iremote[nleaves].rank = owners[i].rank;
751: iremote[nleaves].index = owners[i].index;
752: ++nleaves;
753: }
754: }
755: PetscFree(owners);
756: } else {
757: nleaves = numLeafIndices;
758: PetscMalloc1(nleaves, &ilocal);
759: for (i = 0; i < nleaves; ++i) ilocal[i] = leafLocalOffset + (leafLocalIndices ? leafLocalIndices[i] : i);
760: iremote = owners;
761: }
762: PetscSFCreate(comm, sf);
763: PetscSFSetFromOptions(*sf);
764: PetscSFSetGraph(*sf, rootLocalOffset + numRootIndices, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
765: return 0;
766: }