Actual source code: sfutils.c

  1: #include <petsc/private/sfimpl.h>
  2: #include <petsc/private/sectionimpl.h>

  4: /*@
  5:    PetscSFSetGraphLayout - Set a parallel star forest via global indices and a PetscLayout

  7:    Collective

  9:    Input Parameters:
 10: +  sf - star forest
 11: .  layout - PetscLayout defining the global space for roots
 12: .  nleaves - number of leaf vertices on the current process, each of these references a root on any process
 13: .  ilocal - locations of leaves in leafdata buffers, pass NULL for contiguous storage
 14: .  localmode - copy mode for ilocal
 15: -  gremote - root vertices in global numbering corresponding to leaves in ilocal

 17:    Level: intermediate

 19:    Notes:
 20:    Global indices must lie in [0, N) where N is the global size of layout.
 21:    Leaf indices in ilocal get sorted; this means the user-provided array gets sorted if localmode is PETSC_OWN_POINTER.

 23:    Developer Notes:
 24:    Local indices which are the identity permutation in the range [0,nleaves) are discarded as they
 25:    encode contiguous storage. In such case, if localmode is PETSC_OWN_POINTER, the memory is deallocated as it is not
 26:    needed

 28: .seealso: `PetscSFGetGraphLayout()`, `PetscSFCreate()`, `PetscSFView()`, `PetscSFSetGraph()`, `PetscSFGetGraph()`
 29: @*/
 30: PetscErrorCode PetscSFSetGraphLayout(PetscSF sf, PetscLayout layout, PetscInt nleaves, PetscInt *ilocal, PetscCopyMode localmode, const PetscInt *gremote)
 31: {
 32:   const PetscInt *range;
 33:   PetscInt        i, nroots, ls = -1, ln = -1;
 34:   PetscMPIInt     lr = -1;
 35:   PetscSFNode    *remote;

 37:   PetscLayoutGetLocalSize(layout, &nroots);
 38:   PetscLayoutGetRanges(layout, &range);
 39:   PetscMalloc1(nleaves, &remote);
 40:   if (nleaves) ls = gremote[0] + 1;
 41:   for (i = 0; i < nleaves; i++) {
 42:     const PetscInt idx = gremote[i] - ls;
 43:     if (idx < 0 || idx >= ln) { /* short-circuit the search */
 44:       PetscLayoutFindOwnerIndex(layout, gremote[i], &lr, &remote[i].index);
 45:       remote[i].rank = lr;
 46:       ls             = range[lr];
 47:       ln             = range[lr + 1] - ls;
 48:     } else {
 49:       remote[i].rank  = lr;
 50:       remote[i].index = idx;
 51:     }
 52:   }
 53:   PetscSFSetGraph(sf, nroots, nleaves, ilocal, localmode, remote, PETSC_OWN_POINTER);
 54:   return 0;
 55: }

 57: /*@C
 58:    PetscSFGetGraphLayout - Get the global indices and PetscLayout that describe this star forest

 60:    Collective

 62:    Input Parameter:
 63: .  sf - star forest

 65:    Output Parameters:
 66: +  layout - PetscLayout defining the global space for roots
 67: .  nleaves - number of leaf vertices on the current process, each of these references a root on any process
 68: .  ilocal - locations of leaves in leafdata buffers, or NULL for contiguous storage
 69: -  gremote - root vertices in global numbering corresponding to leaves in ilocal

 71:    Level: intermediate

 73:    Notes:
 74:    The outputs are such that passing them as inputs to `PetscSFSetGraphLayout()` would lead to the same star forest.
 75:    The outputs layout and gremote are freshly created each time this function is called,
 76:    so they need to be freed by user and cannot be qualified as const.


 79: .seealso: `PetscSFSetGraphLayout()`, `PetscSFCreate()`, `PetscSFView()`, `PetscSFSetGraph()`, `PetscSFGetGraph()`
 80: @*/
 81: PetscErrorCode PetscSFGetGraphLayout(PetscSF sf, PetscLayout *layout, PetscInt *nleaves, const PetscInt *ilocal[], PetscInt *gremote[])
 82: {
 83:   PetscInt           nr, nl;
 84:   const PetscSFNode *ir;
 85:   PetscLayout        lt;

 87:   PetscSFGetGraph(sf, &nr, &nl, ilocal, &ir);
 88:   PetscLayoutCreateFromSizes(PetscObjectComm((PetscObject)sf), nr, PETSC_DECIDE, 1, &lt);
 89:   if (gremote) {
 90:     PetscInt        i;
 91:     const PetscInt *range;
 92:     PetscInt       *gr;

 94:     PetscLayoutGetRanges(lt, &range);
 95:     PetscMalloc1(nl, &gr);
 96:     for (i = 0; i < nl; i++) gr[i] = range[ir[i].rank] + ir[i].index;
 97:     *gremote = gr;
 98:   }
 99:   if (nleaves) *nleaves = nl;
100:   if (layout) *layout = lt;
101:   else PetscLayoutDestroy(&lt);
102:   return 0;
103: }

105: /*@
106:   PetscSFSetGraphSection - Sets the PetscSF graph encoding the parallel dof overlap based upon the PetscSections
107:   describing the data layout.

109:   Input Parameters:
110: + sf - The SF
111: . localSection - PetscSection describing the local data layout
112: - globalSection - PetscSection describing the global data layout

114:   Level: developer

116: .seealso: `PetscSFSetGraph()`, `PetscSFSetGraphLayout()`
117: @*/
118: PetscErrorCode PetscSFSetGraphSection(PetscSF sf, PetscSection localSection, PetscSection globalSection)
119: {
120:   MPI_Comm        comm;
121:   PetscLayout     layout;
122:   const PetscInt *ranges;
123:   PetscInt       *local;
124:   PetscSFNode    *remote;
125:   PetscInt        pStart, pEnd, p, nroots, nleaves = 0, l;
126:   PetscMPIInt     size, rank;


132:   PetscObjectGetComm((PetscObject)sf, &comm);
133:   MPI_Comm_size(comm, &size);
134:   MPI_Comm_rank(comm, &rank);
135:   PetscSectionGetChart(globalSection, &pStart, &pEnd);
136:   PetscSectionGetConstrainedStorageSize(globalSection, &nroots);
137:   PetscLayoutCreate(comm, &layout);
138:   PetscLayoutSetBlockSize(layout, 1);
139:   PetscLayoutSetLocalSize(layout, nroots);
140:   PetscLayoutSetUp(layout);
141:   PetscLayoutGetRanges(layout, &ranges);
142:   for (p = pStart; p < pEnd; ++p) {
143:     PetscInt gdof, gcdof;

145:     PetscSectionGetDof(globalSection, p, &gdof);
146:     PetscSectionGetConstraintDof(globalSection, p, &gcdof);
148:     nleaves += gdof < 0 ? -(gdof + 1) - gcdof : gdof - gcdof;
149:   }
150:   PetscMalloc1(nleaves, &local);
151:   PetscMalloc1(nleaves, &remote);
152:   for (p = pStart, l = 0; p < pEnd; ++p) {
153:     const PetscInt *cind;
154:     PetscInt        dof, cdof, off, gdof, gcdof, goff, gsize, d, c;

156:     PetscSectionGetDof(localSection, p, &dof);
157:     PetscSectionGetOffset(localSection, p, &off);
158:     PetscSectionGetConstraintDof(localSection, p, &cdof);
159:     PetscSectionGetConstraintIndices(localSection, p, &cind);
160:     PetscSectionGetDof(globalSection, p, &gdof);
161:     PetscSectionGetConstraintDof(globalSection, p, &gcdof);
162:     PetscSectionGetOffset(globalSection, p, &goff);
163:     if (!gdof) continue; /* Censored point */
164:     gsize = gdof < 0 ? -(gdof + 1) - gcdof : gdof - gcdof;
165:     if (gsize != dof - cdof) {
167:       cdof = 0; /* Ignore constraints */
168:     }
169:     for (d = 0, c = 0; d < dof; ++d) {
170:       if ((c < cdof) && (cind[c] == d)) {
171:         ++c;
172:         continue;
173:       }
174:       local[l + d - c] = off + d;
175:     }
177:     if (gdof < 0) {
178:       for (d = 0; d < gsize; ++d, ++l) {
179:         PetscInt offset = -(goff + 1) + d, r;

181:         PetscFindInt(offset, size + 1, ranges, &r);
182:         if (r < 0) r = -(r + 2);
184:         remote[l].rank  = r;
185:         remote[l].index = offset - ranges[r];
186:       }
187:     } else {
188:       for (d = 0; d < gsize; ++d, ++l) {
189:         remote[l].rank  = rank;
190:         remote[l].index = goff + d - ranges[rank];
191:       }
192:     }
193:   }
195:   PetscLayoutDestroy(&layout);
196:   PetscSFSetGraph(sf, nroots, nleaves, local, PETSC_OWN_POINTER, remote, PETSC_OWN_POINTER);
197:   return 0;
198: }

200: /*@C
201:   PetscSFDistributeSection - Create a new PetscSection reorganized, moving from the root to the leaves of the SF

203:   Collective on sf

205:   Input Parameters:
206: + sf - The SF
207: - rootSection - Section defined on root space

209:   Output Parameters:
210: + remoteOffsets - root offsets in leaf storage, or NULL
211: - leafSection - Section defined on the leaf space

213:   Level: advanced

215: .seealso: `PetscSFCreate()`
216: @*/
217: PetscErrorCode PetscSFDistributeSection(PetscSF sf, PetscSection rootSection, PetscInt **remoteOffsets, PetscSection leafSection)
218: {
219:   PetscSF         embedSF;
220:   const PetscInt *indices;
221:   IS              selected;
222:   PetscInt        numFields, nroots, rpStart, rpEnd, lpStart = PETSC_MAX_INT, lpEnd = -1, f, c;
223:   PetscBool      *sub, hasc;

225:   PetscLogEventBegin(PETSCSF_DistSect, sf, 0, 0, 0);
226:   PetscSectionGetNumFields(rootSection, &numFields);
227:   if (numFields) {
228:     IS perm;

230:     /* PetscSectionSetNumFields() calls PetscSectionReset(), which destroys
231:        leafSection->perm. To keep this permutation set by the user, we grab
232:        the reference before calling PetscSectionSetNumFields() and set it
233:        back after. */
234:     PetscSectionGetPermutation(leafSection, &perm);
235:     PetscObjectReference((PetscObject)perm);
236:     PetscSectionSetNumFields(leafSection, numFields);
237:     PetscSectionSetPermutation(leafSection, perm);
238:     ISDestroy(&perm);
239:   }
240:   PetscMalloc1(numFields + 2, &sub);
241:   sub[1] = rootSection->bc ? PETSC_TRUE : PETSC_FALSE;
242:   for (f = 0; f < numFields; ++f) {
243:     PetscSectionSym sym, dsym = NULL;
244:     const char     *name    = NULL;
245:     PetscInt        numComp = 0;

247:     sub[2 + f] = rootSection->field[f]->bc ? PETSC_TRUE : PETSC_FALSE;
248:     PetscSectionGetFieldComponents(rootSection, f, &numComp);
249:     PetscSectionGetFieldName(rootSection, f, &name);
250:     PetscSectionGetFieldSym(rootSection, f, &sym);
251:     if (sym) PetscSectionSymDistribute(sym, sf, &dsym);
252:     PetscSectionSetFieldComponents(leafSection, f, numComp);
253:     PetscSectionSetFieldName(leafSection, f, name);
254:     PetscSectionSetFieldSym(leafSection, f, dsym);
255:     PetscSectionSymDestroy(&dsym);
256:     for (c = 0; c < rootSection->numFieldComponents[f]; ++c) {
257:       PetscSectionGetComponentName(rootSection, f, c, &name);
258:       PetscSectionSetComponentName(leafSection, f, c, name);
259:     }
260:   }
261:   PetscSectionGetChart(rootSection, &rpStart, &rpEnd);
262:   PetscSFGetGraph(sf, &nroots, NULL, NULL, NULL);
263:   rpEnd = PetscMin(rpEnd, nroots);
264:   rpEnd = PetscMax(rpStart, rpEnd);
265:   /* see if we can avoid creating the embedded SF, since it can cost more than an allreduce */
266:   sub[0] = (PetscBool)(nroots != rpEnd - rpStart);
267:   MPIU_Allreduce(MPI_IN_PLACE, sub, 2 + numFields, MPIU_BOOL, MPI_LOR, PetscObjectComm((PetscObject)sf));
268:   if (sub[0]) {
269:     ISCreateStride(PETSC_COMM_SELF, rpEnd - rpStart, rpStart, 1, &selected);
270:     ISGetIndices(selected, &indices);
271:     PetscSFCreateEmbeddedRootSF(sf, rpEnd - rpStart, indices, &embedSF);
272:     ISRestoreIndices(selected, &indices);
273:     ISDestroy(&selected);
274:   } else {
275:     PetscObjectReference((PetscObject)sf);
276:     embedSF = sf;
277:   }
278:   PetscSFGetLeafRange(embedSF, &lpStart, &lpEnd);
279:   lpEnd++;

281:   PetscSectionSetChart(leafSection, lpStart, lpEnd);

283:   /* Constrained dof section */
284:   hasc = sub[1];
285:   for (f = 0; f < numFields; ++f) hasc = (PetscBool)(hasc || sub[2 + f]);

287:   /* Could fuse these at the cost of copies and extra allocation */
288:   PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasDof[-rpStart], &leafSection->atlasDof[-lpStart], MPI_REPLACE);
289:   PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasDof[-rpStart], &leafSection->atlasDof[-lpStart], MPI_REPLACE);
290:   if (sub[1]) {
291:     PetscSectionCheckConstraints_Private(rootSection);
292:     PetscSectionCheckConstraints_Private(leafSection);
293:     PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->bc->atlasDof[-rpStart], &leafSection->bc->atlasDof[-lpStart], MPI_REPLACE);
294:     PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->bc->atlasDof[-rpStart], &leafSection->bc->atlasDof[-lpStart], MPI_REPLACE);
295:   }
296:   for (f = 0; f < numFields; ++f) {
297:     PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->atlasDof[-rpStart], &leafSection->field[f]->atlasDof[-lpStart], MPI_REPLACE);
298:     PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->atlasDof[-rpStart], &leafSection->field[f]->atlasDof[-lpStart], MPI_REPLACE);
299:     if (sub[2 + f]) {
300:       PetscSectionCheckConstraints_Private(rootSection->field[f]);
301:       PetscSectionCheckConstraints_Private(leafSection->field[f]);
302:       PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasDof[-rpStart], &leafSection->field[f]->bc->atlasDof[-lpStart], MPI_REPLACE);
303:       PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasDof[-rpStart], &leafSection->field[f]->bc->atlasDof[-lpStart], MPI_REPLACE);
304:     }
305:   }
306:   if (remoteOffsets) {
307:     PetscMalloc1(lpEnd - lpStart, remoteOffsets);
308:     PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
309:     PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
310:   }
311:   PetscSectionInvalidateMaxDof_Internal(leafSection);
312:   PetscSectionSetUp(leafSection);
313:   if (hasc) { /* need to communicate bcIndices */
314:     PetscSF   bcSF;
315:     PetscInt *rOffBc;

317:     PetscMalloc1(lpEnd - lpStart, &rOffBc);
318:     if (sub[1]) {
319:       PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
320:       PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
321:       PetscSFCreateSectionSF(embedSF, rootSection->bc, rOffBc, leafSection->bc, &bcSF);
322:       PetscSFBcastBegin(bcSF, MPIU_INT, rootSection->bcIndices, leafSection->bcIndices, MPI_REPLACE);
323:       PetscSFBcastEnd(bcSF, MPIU_INT, rootSection->bcIndices, leafSection->bcIndices, MPI_REPLACE);
324:       PetscSFDestroy(&bcSF);
325:     }
326:     for (f = 0; f < numFields; ++f) {
327:       if (sub[2 + f]) {
328:         PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
329:         PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->field[f]->bc->atlasOff[-rpStart], &rOffBc[-lpStart], MPI_REPLACE);
330:         PetscSFCreateSectionSF(embedSF, rootSection->field[f]->bc, rOffBc, leafSection->field[f]->bc, &bcSF);
331:         PetscSFBcastBegin(bcSF, MPIU_INT, rootSection->field[f]->bcIndices, leafSection->field[f]->bcIndices, MPI_REPLACE);
332:         PetscSFBcastEnd(bcSF, MPIU_INT, rootSection->field[f]->bcIndices, leafSection->field[f]->bcIndices, MPI_REPLACE);
333:         PetscSFDestroy(&bcSF);
334:       }
335:     }
336:     PetscFree(rOffBc);
337:   }
338:   PetscSFDestroy(&embedSF);
339:   PetscFree(sub);
340:   PetscLogEventEnd(PETSCSF_DistSect, sf, 0, 0, 0);
341:   return 0;
342: }

344: /*@C
345:   PetscSFCreateRemoteOffsets - Create offsets for point data on remote processes

347:   Collective on sf

349:   Input Parameters:
350: + sf          - The SF
351: . rootSection - Data layout of remote points for outgoing data (this is layout for SF roots)
352: - leafSection - Data layout of local points for incoming data  (this is layout for SF leaves)

354:   Output Parameter:
355: . remoteOffsets - Offsets for point data on remote processes (these are offsets from the root section), or NULL

357:   Level: developer

359: .seealso: `PetscSFCreate()`
360: @*/
361: PetscErrorCode PetscSFCreateRemoteOffsets(PetscSF sf, PetscSection rootSection, PetscSection leafSection, PetscInt **remoteOffsets)
362: {
363:   PetscSF         embedSF;
364:   const PetscInt *indices;
365:   IS              selected;
366:   PetscInt        numRoots, rpStart = 0, rpEnd = 0, lpStart = 0, lpEnd = 0;

368:   *remoteOffsets = NULL;
369:   PetscSFGetGraph(sf, &numRoots, NULL, NULL, NULL);
370:   if (numRoots < 0) return 0;
371:   PetscLogEventBegin(PETSCSF_RemoteOff, sf, 0, 0, 0);
372:   PetscSectionGetChart(rootSection, &rpStart, &rpEnd);
373:   PetscSectionGetChart(leafSection, &lpStart, &lpEnd);
374:   ISCreateStride(PETSC_COMM_SELF, rpEnd - rpStart, rpStart, 1, &selected);
375:   ISGetIndices(selected, &indices);
376:   PetscSFCreateEmbeddedRootSF(sf, rpEnd - rpStart, indices, &embedSF);
377:   ISRestoreIndices(selected, &indices);
378:   ISDestroy(&selected);
379:   PetscCalloc1(lpEnd - lpStart, remoteOffsets);
380:   PetscSFBcastBegin(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
381:   PetscSFBcastEnd(embedSF, MPIU_INT, &rootSection->atlasOff[-rpStart], &(*remoteOffsets)[-lpStart], MPI_REPLACE);
382:   PetscSFDestroy(&embedSF);
383:   PetscLogEventEnd(PETSCSF_RemoteOff, sf, 0, 0, 0);
384:   return 0;
385: }

387: /*@C
388:   PetscSFCreateSectionSF - Create an expanded SF of dofs, assuming the input SF relates points

390:   Collective on sf

392:   Input Parameters:
393: + sf - The SF
394: . rootSection - Data layout of remote points for outgoing data (this is usually the serial section)
395: . remoteOffsets - Offsets for point data on remote processes (these are offsets from the root section), or NULL
396: - leafSection - Data layout of local points for incoming data  (this is the distributed section)

398:   Output Parameters:
399: - sectionSF - The new SF

401:   Note: Either rootSection or remoteOffsets can be specified

403:   Level: advanced

405: .seealso: `PetscSFCreate()`
406: @*/
407: PetscErrorCode PetscSFCreateSectionSF(PetscSF sf, PetscSection rootSection, PetscInt remoteOffsets[], PetscSection leafSection, PetscSF *sectionSF)
408: {
409:   MPI_Comm           comm;
410:   const PetscInt    *localPoints;
411:   const PetscSFNode *remotePoints;
412:   PetscInt           lpStart, lpEnd;
413:   PetscInt           numRoots, numSectionRoots, numPoints, numIndices = 0;
414:   PetscInt          *localIndices;
415:   PetscSFNode       *remoteIndices;
416:   PetscInt           i, ind;

423:   PetscObjectGetComm((PetscObject)sf, &comm);
424:   PetscSFCreate(comm, sectionSF);
425:   PetscSectionGetChart(leafSection, &lpStart, &lpEnd);
426:   PetscSectionGetStorageSize(rootSection, &numSectionRoots);
427:   PetscSFGetGraph(sf, &numRoots, &numPoints, &localPoints, &remotePoints);
428:   if (numRoots < 0) return 0;
429:   PetscLogEventBegin(PETSCSF_SectSF, sf, 0, 0, 0);
430:   for (i = 0; i < numPoints; ++i) {
431:     PetscInt localPoint = localPoints ? localPoints[i] : i;
432:     PetscInt dof;

434:     if ((localPoint >= lpStart) && (localPoint < lpEnd)) {
435:       PetscSectionGetDof(leafSection, localPoint, &dof);
436:       numIndices += dof < 0 ? 0 : dof;
437:     }
438:   }
439:   PetscMalloc1(numIndices, &localIndices);
440:   PetscMalloc1(numIndices, &remoteIndices);
441:   /* Create new index graph */
442:   for (i = 0, ind = 0; i < numPoints; ++i) {
443:     PetscInt localPoint = localPoints ? localPoints[i] : i;
444:     PetscInt rank       = remotePoints[i].rank;

446:     if ((localPoint >= lpStart) && (localPoint < lpEnd)) {
447:       PetscInt remoteOffset = remoteOffsets[localPoint - lpStart];
448:       PetscInt loff, dof, d;

450:       PetscSectionGetOffset(leafSection, localPoint, &loff);
451:       PetscSectionGetDof(leafSection, localPoint, &dof);
452:       for (d = 0; d < dof; ++d, ++ind) {
453:         localIndices[ind]        = loff + d;
454:         remoteIndices[ind].rank  = rank;
455:         remoteIndices[ind].index = remoteOffset + d;
456:       }
457:     }
458:   }
460:   PetscSFSetGraph(*sectionSF, numSectionRoots, numIndices, localIndices, PETSC_OWN_POINTER, remoteIndices, PETSC_OWN_POINTER);
461:   PetscSFSetUp(*sectionSF);
462:   PetscLogEventEnd(PETSCSF_SectSF, sf, 0, 0, 0);
463:   return 0;
464: }

466: /*@C
467:    PetscSFCreateFromLayouts - Creates a parallel star forest mapping two PetscLayout objects

469:    Collective

471:    Input Parameters:
472: +  rmap - PetscLayout defining the global root space
473: -  lmap - PetscLayout defining the global leaf space

475:    Output Parameter:
476: .  sf - The parallel star forest

478:    Level: intermediate

480: .seealso: `PetscSFCreate()`, `PetscLayoutCreate()`, `PetscSFSetGraphLayout()`
481: @*/
482: PetscErrorCode PetscSFCreateFromLayouts(PetscLayout rmap, PetscLayout lmap, PetscSF *sf)
483: {
484:   PetscInt     i, nroots, nleaves = 0;
485:   PetscInt     rN, lst, len;
486:   PetscMPIInt  owner = -1;
487:   PetscSFNode *remote;
488:   MPI_Comm     rcomm = rmap->comm;
489:   MPI_Comm     lcomm = lmap->comm;
490:   PetscMPIInt  flg;

495:   MPI_Comm_compare(rcomm, lcomm, &flg);
497:   PetscSFCreate(rcomm, sf);
498:   PetscLayoutGetLocalSize(rmap, &nroots);
499:   PetscLayoutGetSize(rmap, &rN);
500:   PetscLayoutGetRange(lmap, &lst, &len);
501:   PetscMalloc1(len - lst, &remote);
502:   for (i = lst; i < len && i < rN; i++) {
503:     if (owner < -1 || i >= rmap->range[owner + 1]) PetscLayoutFindOwner(rmap, i, &owner);
504:     remote[nleaves].rank  = owner;
505:     remote[nleaves].index = i - rmap->range[owner];
506:     nleaves++;
507:   }
508:   PetscSFSetGraph(*sf, nroots, nleaves, NULL, PETSC_OWN_POINTER, remote, PETSC_COPY_VALUES);
509:   PetscFree(remote);
510:   return 0;
511: }

513: /* TODO: handle nooffprocentries like MatZeroRowsMapLocal_Private, since this code is the same */
514: PetscErrorCode PetscLayoutMapLocal(PetscLayout map, PetscInt N, const PetscInt idxs[], PetscInt *on, PetscInt **oidxs, PetscInt **ogidxs)
515: {
516:   PetscInt    *owners = map->range;
517:   PetscInt     n      = map->n;
518:   PetscSF      sf;
519:   PetscInt    *lidxs, *work = NULL;
520:   PetscSFNode *ridxs;
521:   PetscMPIInt  rank, p = 0;
522:   PetscInt     r, len  = 0;

524:   if (on) *on = 0; /* squelch -Wmaybe-uninitialized */
525:   /* Create SF where leaves are input idxs and roots are owned idxs */
526:   MPI_Comm_rank(map->comm, &rank);
527:   PetscMalloc1(n, &lidxs);
528:   for (r = 0; r < n; ++r) lidxs[r] = -1;
529:   PetscMalloc1(N, &ridxs);
530:   for (r = 0; r < N; ++r) {
531:     const PetscInt idx = idxs[r];
533:     if (idx < owners[p] || owners[p + 1] <= idx) { /* short-circuit the search if the last p owns this idx too */
534:       PetscLayoutFindOwner(map, idx, &p);
535:     }
536:     ridxs[r].rank  = p;
537:     ridxs[r].index = idxs[r] - owners[p];
538:   }
539:   PetscSFCreate(map->comm, &sf);
540:   PetscSFSetGraph(sf, n, N, NULL, PETSC_OWN_POINTER, ridxs, PETSC_OWN_POINTER);
541:   PetscSFReduceBegin(sf, MPIU_INT, (PetscInt *)idxs, lidxs, MPI_LOR);
542:   PetscSFReduceEnd(sf, MPIU_INT, (PetscInt *)idxs, lidxs, MPI_LOR);
543:   if (ogidxs) { /* communicate global idxs */
544:     PetscInt cum = 0, start, *work2;

546:     PetscMalloc1(n, &work);
547:     PetscCalloc1(N, &work2);
548:     for (r = 0; r < N; ++r)
549:       if (idxs[r] >= 0) cum++;
550:     MPI_Scan(&cum, &start, 1, MPIU_INT, MPI_SUM, map->comm);
551:     start -= cum;
552:     cum = 0;
553:     for (r = 0; r < N; ++r)
554:       if (idxs[r] >= 0) work2[r] = start + cum++;
555:     PetscSFReduceBegin(sf, MPIU_INT, work2, work, MPI_REPLACE);
556:     PetscSFReduceEnd(sf, MPIU_INT, work2, work, MPI_REPLACE);
557:     PetscFree(work2);
558:   }
559:   PetscSFDestroy(&sf);
560:   /* Compress and put in indices */
561:   for (r = 0; r < n; ++r)
562:     if (lidxs[r] >= 0) {
563:       if (work) work[len] = work[r];
564:       lidxs[len++] = r;
565:     }
566:   if (on) *on = len;
567:   if (oidxs) *oidxs = lidxs;
568:   if (ogidxs) *ogidxs = work;
569:   return 0;
570: }

572: /*@
573:   PetscSFCreateByMatchingIndices - Create SF by matching root and leaf indices

575:   Collective

577:   Input Parameters:
578: + layout - PetscLayout defining the global index space and the rank that brokers each index
579: . numRootIndices - size of rootIndices
580: . rootIndices - PetscInt array of global indices of which this process requests ownership
581: . rootLocalIndices - root local index permutation (NULL if no permutation)
582: . rootLocalOffset - offset to be added to root local indices
583: . numLeafIndices - size of leafIndices
584: . leafIndices - PetscInt array of global indices with which this process requires data associated
585: . leafLocalIndices - leaf local index permutation (NULL if no permutation)
586: - leafLocalOffset - offset to be added to leaf local indices

588:   Output Parameters:
589: + sfA - star forest representing the communication pattern from the layout space to the leaf space (NULL if not needed)
590: - sf - star forest representing the communication pattern from the root space to the leaf space

592:   Example 1:
593: $
594: $  rank             : 0            1            2
595: $  rootIndices      : [1 0 2]      [3]          [3]
596: $  rootLocalOffset  : 100          200          300
597: $  layout           : [0 1]        [2]          [3]
598: $  leafIndices      : [0]          [2]          [0 3]
599: $  leafLocalOffset  : 400          500          600
600: $
601: would build the following SF
602: $
603: $  [0] 400 <- (0,101)
604: $  [1] 500 <- (0,102)
605: $  [2] 600 <- (0,101)
606: $  [2] 601 <- (2,300)
607: $
608:   Example 2:
609: $
610: $  rank             : 0               1               2
611: $  rootIndices      : [1 0 2]         [3]             [3]
612: $  rootLocalOffset  : 100             200             300
613: $  layout           : [0 1]           [2]             [3]
614: $  leafIndices      : rootIndices     rootIndices     rootIndices
615: $  leafLocalOffset  : rootLocalOffset rootLocalOffset rootLocalOffset
616: $
617: would build the following SF
618: $
619: $  [1] 200 <- (2,300)
620: $
621:   Example 3:
622: $
623: $  No process requests ownership of global index 1, but no process needs it.
624: $
625: $  rank             : 0            1            2
626: $  numRootIndices   : 2            1            1
627: $  rootIndices      : [0 2]        [3]          [3]
628: $  rootLocalOffset  : 100          200          300
629: $  layout           : [0 1]        [2]          [3]
630: $  numLeafIndices   : 1            1            2
631: $  leafIndices      : [0]          [2]          [0 3]
632: $  leafLocalOffset  : 400          500          600
633: $
634: would build the following SF
635: $
636: $  [0] 400 <- (0,100)
637: $  [1] 500 <- (0,101)
638: $  [2] 600 <- (0,100)
639: $  [2] 601 <- (2,300)
640: $

642:   Notes:
643:   The layout parameter represents any partitioning of [0, N), where N is the total number of global indices, and its
644:   local size can be set to PETSC_DECIDE.
645:   If a global index x lies in the partition owned by process i, each process whose rootIndices contains x requests
646:   ownership of x and sends its own rank and the local index of x to process i.
647:   If multiple processes request ownership of x, the one with the highest rank is to own x.
648:   Process i then broadcasts the ownership information, so that each process whose leafIndices contains x knows the
649:   ownership information of x.
650:   The output sf is constructed by associating each leaf point to a root point in this way.

652:   Suppose there is point data ordered according to the global indices and partitioned according to the given layout.
653:   The optional output PetscSF object sfA can be used to push such data to leaf points.

655:   All indices in rootIndices and leafIndices must lie in the layout range. The union (over all processes) of rootIndices
656:   must cover that of leafIndices, but need not cover the entire layout.

658:   If (leafIndices, leafLocalIndices, leafLocalOffset) == (rootIndices, rootLocalIndices, rootLocalOffset), the output
659:   star forest is almost identity, so will only include non-trivial part of the map.

661:   Developer Notes:
662:   Current approach of a process of the highest rank gaining the ownership may cause load imbalance; consider using
663:   hash(rank, root_local_index) as the bid for the ownership determination.

665:   Level: advanced

667: .seealso: `PetscSFCreate()`
668: @*/
669: PetscErrorCode PetscSFCreateByMatchingIndices(PetscLayout layout, PetscInt numRootIndices, const PetscInt *rootIndices, const PetscInt *rootLocalIndices, PetscInt rootLocalOffset, PetscInt numLeafIndices, const PetscInt *leafIndices, const PetscInt *leafLocalIndices, PetscInt leafLocalOffset, PetscSF *sfA, PetscSF *sf)
670: {
671:   MPI_Comm     comm = layout->comm;
672:   PetscMPIInt  size, rank;
673:   PetscSF      sf1;
674:   PetscSFNode *owners, *buffer, *iremote;
675:   PetscInt    *ilocal, nleaves, N, n, i;
676: #if defined(PETSC_USE_DEBUG)
677:   PetscInt N1;
678: #endif
679:   PetscBool flag;

689:   MPI_Comm_size(comm, &size);
690:   MPI_Comm_rank(comm, &rank);
691:   PetscLayoutSetUp(layout);
692:   PetscLayoutGetSize(layout, &N);
693:   PetscLayoutGetLocalSize(layout, &n);
694:   flag = (PetscBool)(leafIndices == rootIndices);
695:   MPIU_Allreduce(MPI_IN_PLACE, &flag, 1, MPIU_BOOL, MPI_LAND, comm);
697: #if defined(PETSC_USE_DEBUG)
698:   N1 = PETSC_MIN_INT;
699:   for (i = 0; i < numRootIndices; i++)
700:     if (rootIndices[i] > N1) N1 = rootIndices[i];
701:   MPI_Allreduce(MPI_IN_PLACE, &N1, 1, MPIU_INT, MPI_MAX, comm);
703:   if (!flag) {
704:     N1 = PETSC_MIN_INT;
705:     for (i = 0; i < numLeafIndices; i++)
706:       if (leafIndices[i] > N1) N1 = leafIndices[i];
707:     MPI_Allreduce(MPI_IN_PLACE, &N1, 1, MPIU_INT, MPI_MAX, comm);
709:   }
710: #endif
711:   /* Reduce: owners -> buffer */
712:   PetscMalloc1(n, &buffer);
713:   PetscSFCreate(comm, &sf1);
714:   PetscSFSetFromOptions(sf1);
715:   PetscSFSetGraphLayout(sf1, layout, numRootIndices, NULL, PETSC_OWN_POINTER, rootIndices);
716:   PetscMalloc1(numRootIndices, &owners);
717:   for (i = 0; i < numRootIndices; ++i) {
718:     owners[i].rank  = rank;
719:     owners[i].index = rootLocalOffset + (rootLocalIndices ? rootLocalIndices[i] : i);
720:   }
721:   for (i = 0; i < n; ++i) {
722:     buffer[i].index = -1;
723:     buffer[i].rank  = -1;
724:   }
725:   PetscSFReduceBegin(sf1, MPIU_2INT, owners, buffer, MPI_MAXLOC);
726:   PetscSFReduceEnd(sf1, MPIU_2INT, owners, buffer, MPI_MAXLOC);
727:   /* Bcast: buffer -> owners */
728:   if (!flag) {
729:     /* leafIndices is different from rootIndices */
730:     PetscFree(owners);
731:     PetscSFSetGraphLayout(sf1, layout, numLeafIndices, NULL, PETSC_OWN_POINTER, leafIndices);
732:     PetscMalloc1(numLeafIndices, &owners);
733:   }
734:   PetscSFBcastBegin(sf1, MPIU_2INT, buffer, owners, MPI_REPLACE);
735:   PetscSFBcastEnd(sf1, MPIU_2INT, buffer, owners, MPI_REPLACE);
737:   PetscFree(buffer);
738:   if (sfA) {
739:     *sfA = sf1;
740:   } else PetscSFDestroy(&sf1);
741:   /* Create sf */
742:   if (flag && rootLocalIndices == leafLocalIndices && leafLocalOffset == rootLocalOffset) {
743:     /* leaf space == root space */
744:     for (i = 0, nleaves = 0; i < numLeafIndices; ++i)
745:       if (owners[i].rank != rank) ++nleaves;
746:     PetscMalloc1(nleaves, &ilocal);
747:     PetscMalloc1(nleaves, &iremote);
748:     for (i = 0, nleaves = 0; i < numLeafIndices; ++i) {
749:       if (owners[i].rank != rank) {
750:         ilocal[nleaves]        = leafLocalOffset + i;
751:         iremote[nleaves].rank  = owners[i].rank;
752:         iremote[nleaves].index = owners[i].index;
753:         ++nleaves;
754:       }
755:     }
756:     PetscFree(owners);
757:   } else {
758:     nleaves = numLeafIndices;
759:     PetscMalloc1(nleaves, &ilocal);
760:     for (i = 0; i < nleaves; ++i) ilocal[i] = leafLocalOffset + (leafLocalIndices ? leafLocalIndices[i] : i);
761:     iremote = owners;
762:   }
763:   PetscSFCreate(comm, sf);
764:   PetscSFSetFromOptions(*sf);
765:   PetscSFSetGraph(*sf, rootLocalOffset + numRootIndices, nleaves, ilocal, PETSC_OWN_POINTER, iremote, PETSC_OWN_POINTER);
766:   return 0;
767: }