Actual source code: part2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: part2d.c,v 1.14 2000/01/31 17:40:21 knepley Exp $";
  3: #endif

  5: #include "src/mesh/impls/triangular/2d/2dimpl.h"         /*I "mesh.h" I*/
  6: #ifdef PETSC_HAVE_PARMETIS
  7: EXTERN_C_BEGIN
  8: #include "parmetis.h"
  9: EXTERN_C_END
 10: #endif
 11: #include "part2d.h"

 13: #undef  __FUNCT__
 15: static int PartitionView_Triangular_2D_File(Partition p, PetscViewer viewer) {
 16:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
 17:   FILE                    *fd;
 18:   int                      numLocElements = p->numLocElements;
 19:   int                      numLocNodes    = q->numLocNodes;
 20:   int                      numLocEdges    = q->numLocEdges;
 21:   int                      i;
 22:   int                      ierr;

 25:   PetscViewerASCIIPrintf(viewer, "Partition Object:\n");
 26:   PetscViewerASCIIPrintf(viewer, "  Partition of triangular 2D grid with %d elements and %d nodes\n", p->numElements, q->numNodes);
 27:   PetscViewerASCIIGetPointer(viewer, &fd);
 28:   PetscSynchronizedFPrintf(p->comm, fd, "    Proc %d: %d elements %d nodes %d edges\n",
 29:                            p->rank, numLocElements, numLocNodes, numLocEdges);
 30:   PetscSynchronizedFlush(p->comm);
 31:   if (p->ordering != PETSC_NULL) {
 32:     PetscViewerASCIIPrintf(viewer, "  Global element renumbering:\n");
 33:     AOView(p->ordering, viewer);
 34:   }
 35:   if (q->nodeOrdering != PETSC_NULL) {
 36:     PetscViewerASCIIPrintf(viewer, "  Global node renumbering:\n");
 37:     AOView(q->nodeOrdering, viewer);
 38:   }
 39:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost elements on proc %d\n", p->numOverlapElements - numLocElements, p->rank);
 40:   for(i = 0; i < p->numOverlapElements - numLocElements; i++)
 41:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %d %d\n", i, p->ghostElements[i], p->ghostElementProcs[i]);
 42:   PetscSynchronizedFlush(p->comm);
 43:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost nodes on proc %d\n", q->numOverlapNodes - numLocNodes, p->rank);
 44:   for(i = 0; i < q->numOverlapNodes - numLocNodes; i++)
 45:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %d\n", i, q->ghostNodes[i]);
 46:   PetscSynchronizedFlush(p->comm);

 48:   return(0);
 49: }

 51: #undef  __FUNCT__
 53: static int PartitionView_Triangular_2D_Draw(Partition p, PetscViewer v) {
 55:   return(0);
 56: }

 58: #undef  __FUNCT__
 60: static int PartitionView_Triangular_2D(Partition p, PetscViewer viewer) {
 61:   PetscTruth isascii, isdraw;
 62:   int        ierr;

 65:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
 66:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW,  &isdraw);
 67:   if (isascii == PETSC_TRUE) {
 68:     PartitionView_Triangular_2D_File(p, viewer);
 69:   } else if (isdraw == PETSC_TRUE) {
 70:     PartitionView_Triangular_2D_Draw(p, viewer);
 71:   }
 72:   return(0);
 73: }

 75: #undef  __FUNCT__
 77: static int PartitionViewFromOptions_Private(Partition part, char *title) {
 78:   PetscViewer viewer;
 79:   PetscDraw   draw;
 80:   PetscTruth  opt;
 81:   int         ierr;

 84:   PetscOptionsHasName(part->prefix, "-part_view", &opt);
 85:   if (opt == PETSC_TRUE) {
 86:     PartitionView(part, PETSC_NULL);
 87:   }
 88:   PetscOptionsHasName(part->prefix, "-part_view_draw", &opt);
 89:   if (opt == PETSC_TRUE) {
 90:     PetscViewerDrawOpen(part->comm, 0, 0, 0, 0, 300, 300, &viewer);
 91:     PetscViewerDrawGetDraw(viewer, 0, &draw);
 92:     if (title != PETSC_NULL) {
 93:       PetscDrawSetTitle(draw, title);
 94:     }
 95:     PartitionView(part, viewer);
 96:     PetscViewerFlush(viewer);
 97:     PetscDrawPause(draw);
 98:     PetscViewerDestroy(viewer);
 99:   }
100:   return(0);
101: }

103: #undef  __FUNCT__
105: static int PartitionDestroy_Triangular_2D(Partition p) {
106:   Partition_Triangular_2D *s = (Partition_Triangular_2D *) p->data;
107:   int                      ierr;

110:   PetscFree(p->firstElement);
111:   if (p->ordering != PETSC_NULL)
112:     {AODestroy(p->ordering);                                                                      }
113:   if (p->ghostElements != PETSC_NULL)
114:     {PetscFree(p->ghostElements);                                                                 }
115:   if (p->ghostElementProcs != PETSC_NULL)
116:     {PetscFree(p->ghostElementProcs);                                                             }
117:   PetscFree(s->firstNode);
118:   PetscFree(s->firstBdNode);
119:   if (s->nodeOrdering != PETSC_NULL)
120:     {AODestroy(s->nodeOrdering);                                                                  }
121:   if (s->ghostNodes != PETSC_NULL)
122:     {PetscFree(s->ghostNodes);                                                                    }
123:   if (s->ghostNodeProcs != PETSC_NULL)
124:     {PetscFree(s->ghostNodeProcs);                                                                }
125:   if (s->ghostBdNodes != PETSC_NULL)
126:     {PetscFree(s->ghostBdNodes);                                                                  }
127:   PetscFree(s->firstEdge);
128:   if (s->edgeOrdering != PETSC_NULL)
129:     {AODestroy(s->edgeOrdering);                                                                  }
130:   PetscFree(s);

132:   return(0);
133: }

135: #undef  __FUNCT__
137: static int PartitionGhostNodeExchange_Triangular_2D(Partition part, InsertMode addv, ScatterMode mode, int *locVars, int *ghostVars) {
138:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) part->data;
139:   Mesh                     mesh = part->mesh;
140:   int                      ierr;

143:   PetscGhostExchange(part->comm, q->numOverlapNodes - mesh->numNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
144:                             q->firstNode, addv, mode, locVars, ghostVars);
145: 
146:   return(0);
147: }

149: #undef  __FUNCT__
151: static int PartitionGetTotalNodes_Triangular_2D(Partition p, int *size) {
152:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

155:   *size = q->numNodes;
156:   return(0);
157: }

159: #undef  __FUNCT__
161: static int PartitionGetStartNode_Triangular_2D(Partition p, int *node) {
162:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

165:   *node = q->firstNode[p->rank];
166:   return(0);
167: }

169: #undef  __FUNCT__
171: static int PartitionGetEndNode_Triangular_2D(Partition p, int *node) {
172:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

175:   *node = q->firstNode[p->rank+1];
176:   return(0);
177: }

179: #undef  __FUNCT__
181: static int PartitionGetNumNodes_Triangular_2D(Partition p, int *size) {
182:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

185:   *size = q->numLocNodes;
186:   return(0);
187: }

189: #undef  __FUNCT__
191: static int PartitionGetNumOverlapNodes_Triangular_2D(Partition p, int *size) {
192:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

195:   *size = q->numOverlapNodes;
196:   return(0);
197: }

199: #undef  __FUNCT__
201: int PartitionGhostNodeIndex_Private(Partition p, int node, int *gNode) {
202:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
203:   int                      low, high, mid;

206:   /* Use bisection since the array is assumed to be sorted */
207:   low  = 0;
208:   high = q->numOverlapNodes - (q->firstNode[p->rank+1] - q->firstNode[p->rank]) - 1;
209:   while (low <= high) {
210:     mid = (low + high)/2;
211:     if (node == q->ghostNodes[mid]) {
212:       *gNode = mid;
213:       return(0);
214:     } else if (node < q->ghostNodes[mid]) {
215:       high = mid - 1;
216:     } else {
217:       low  = mid + 1;
218:     }
219:   }
220:   *gNode = -1;
221:   /* Flag for ghost node not present */
222:   PetscFunctionReturn(1);
223: }

225: #undef  __FUNCT__
227: static int PartitionGlobalToLocalNodeIndex_Triangular_2D(Partition p, int node, int *locNode) {
228:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
229:   int                      numLocNodes = q->numLocNodes;
230:   int                      gNode; /* Local ghost node number */
231:   int                      ierr;

234:   if (node < 0) {
235:     *locNode = node;
236:     return(0);
237:   }
238:   /* Check for ghost node */
239:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
240:     /* Search for canonical number */
241:     PartitionGhostNodeIndex_Private(p, node, &gNode);
242:     *locNode = numLocNodes + gNode;
243:   } else {
244:     *locNode = node - q->firstNode[p->rank];
245:   }
246:   return(0);
247: }

249: #undef  __FUNCT__
251: static int PartitionLocalToGlobalNodeIndex_Triangular_2D(Partition p, int locNode, int *node) {
252:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
253:   int                      numLocNodes = q->numLocNodes;

256:   if (locNode < 0) {
257:     *node = locNode;
258:     return(0);
259:   }
260:   /* Check for ghost node */
261:   if (locNode >= numLocNodes) {
262:     *node = q->ghostNodes[locNode - numLocNodes];
263:   } else {
264:     *node = locNode + q->firstNode[p->rank];
265:   }
266:   return(0);
267: }

269: #undef  __FUNCT__
271: static int PartitionGlobalToGhostNodeIndex_Triangular_2D(Partition p, int node, int *ghostNode, int *ghostProc) {
272:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
273:   int                      ierr;

276:   if (node < 0) {
277:     *ghostNode = node;
278:     *ghostProc = -1;
279:     return(0);
280:   }
281:   /* Check for ghost node */
282:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
283:     PartitionGhostNodeIndex_Private(p, node, ghostNode);
284:     *ghostProc = q->ghostNodeProcs[*ghostNode];
285:   } else {
286:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Global node %d is not a ghost node", node);
287:   }
288:   return(0);
289: }

291: #undef  __FUNCT__
293: static int PartitionGhostToGlobalNodeIndex_Triangular_2D(Partition p, int ghostNode, int *node, int *ghostProc) {
294:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
295:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;

298:   if (ghostNode < 0) {
299:     *node      = ghostNode;
300:     *ghostProc = -1;
301:     return(0);
302:   }
303:   /* Check for ghost node */
304:   if (ghostNode < numGhostNodes) {
305:     *node      = q->ghostNodes[ghostNode];
306:     *ghostProc = q->ghostNodeProcs[ghostNode];
307:   } else {
308:     SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Ghost node %d does not exist", ghostNode);
309:   }
310:   return(0);
311: }

313: #undef  __FUNCT__
315: static int PartitionGetNodeOrdering_Triangular_2D(Partition p, AO *order) {
316:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

319:   *order = q->nodeOrdering;
320:   return(0);
321: }

323: #undef  __FUNCT__
325: static int PartitionGetTotalEdges_Triangular_2D(Partition p, int *size) {
326:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

329:   *size = q->numEdges;
330:   return(0);
331: }

333: #undef  __FUNCT__
335: static int PartitionGetStartEdge_Triangular_2D(Partition p, int *edge) {
336:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

339:   *edge = q->firstEdge[p->rank];
340:   return(0);
341: }

343: #undef  __FUNCT__
345: static int PartitionGetEndEdge_Triangular_2D(Partition p, int *edge) {
346:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

349:   *edge = q->firstEdge[p->rank+1];
350:   return(0);
351: }

353: #undef  __FUNCT__
355: static int PartitionGetNumEdges_Triangular_2D(Partition p, int *size) {
356:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

359:   *size = q->numLocEdges;
360:   return(0);
361: }

363: #undef  __FUNCT__
365: static int PartitionGetNumOverlapEdges_Triangular_2D(Partition p, int *size) {
366:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

369:   /* We do not maintain ghost edges */
370:   *size = q->numLocEdges;
371:   return(0);
372: }

374: #undef  __FUNCT__
376: static int PartitionGetEdgeOrdering_Triangular_2D(Partition p, AO *order) {
377:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

380:   *order = q->edgeOrdering;
381:   return(0);
382: }

384: static struct _PartitionOps POps = {/* Generic Operations */
385:                                     PETSC_NULL/* PartitionSetup */,
386:                                     PETSC_NULL/* PartitionSetFromOptions */,
387:                                     PartitionView_Triangular_2D,
388:                                     PETSC_NULL/* PartitionCopy */,
389:                                     PETSC_NULL/* PartitionDuplicate */,
390:                                     PartitionDestroy_Triangular_2D,
391:                                     /* Partition-Specific Operations */
392:                                     PartitionGhostNodeExchange_Triangular_2D,
393:                                     /* Node Query Functions */
394:                                     PartitionGetTotalNodes_Triangular_2D,
395:                                     PartitionGetStartNode_Triangular_2D,
396:                                     PartitionGetEndNode_Triangular_2D,
397:                                     PartitionGetNumNodes_Triangular_2D,
398:                                     PartitionGetNumOverlapNodes_Triangular_2D,
399:                                     PartitionGlobalToLocalNodeIndex_Triangular_2D,
400:                                     PartitionLocalToGlobalNodeIndex_Triangular_2D,
401:                                     PartitionGlobalToGhostNodeIndex_Triangular_2D,
402:                                     PartitionGhostToGlobalNodeIndex_Triangular_2D,
403:                                     PartitionGetNodeOrdering_Triangular_2D,
404:                                     /* Face Query Functions */
405:                                     PartitionGetTotalElements,
406:                                     PartitionGetStartElement,
407:                                     PartitionGetEndElement,
408:                                     PartitionGetNumElements,
409:                                     PartitionGetNumOverlapElements,
410:                                     PartitionGlobalToLocalElementIndex,
411:                                     PartitionLocalToGlobalElementIndex,
412:                                     PartitionGetElementOrdering,
413:                                     /* Edge Query Functions */
414:                                     PartitionGetTotalEdges_Triangular_2D,
415:                                     PartitionGetStartEdge_Triangular_2D,
416:                                     PartitionGetEndEdge_Triangular_2D,
417:                                     PartitionGetNumEdges_Triangular_2D,
418:                                     PartitionGetNumOverlapEdges_Triangular_2D,
419:                                     PETSC_NULL/* PartitionGlobalToLocalEdgeIndex */,
420:                                     PETSC_NULL/* PartitionLocalToGlobalEdgeIndex */,
421:                                     PartitionGetEdgeOrdering_Triangular_2D};

423: #undef  __FUNCT__
425: int PartitionCalcCut_Private(Partition p, int *cut)
426: {
427:   Mesh_Triangular *tri            = (Mesh_Triangular *) p->mesh->data;
428:   int              numLocElements = p->numLocElements;
429:   int             *neighbors      = tri->neighbors;
430:   int              locCut;        /* The number of edges of the dual crossing the partition from this domain */
431:   int              elem, neighbor;
432:   int              ierr;

435:   for(elem = 0, locCut = 0; elem < numLocElements; elem++) {
436:     for(neighbor = 0; neighbor < 3; neighbor++) {
437:       if (neighbors[elem*3+neighbor] >= numLocElements)
438:         locCut++;
439:     }
440:   }
441:   MPI_Allreduce(&locCut, cut, 1, MPI_INT, MPI_SUM, p->comm);
442:   *cut /= 2;
443:   return(0);
444: }

446: int PartitionDebugAO_Private(Partition p, int *nodeProcs)
447: {
448:   Mesh                     mesh        = p->mesh;
449:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
450:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
451:   int                      numCorners  = mesh->numCorners;
452:   int                      numElements = mesh->numFaces;
453:   int                     *elements    = tri->faces;
454:   int                      numNodes    = q->numNodes;
455:   int                      numProcs    = p->numProcs;
456:   int                      rank        = p->rank;
457:   int                     *support;
458:   int                     *temp;
459:   int                      proc, nProc, elem, nElem, sElem, corner, nCorner, node, degree, idx;
460:   int                      ierr;

463:   PetscMalloc(numProcs * sizeof(int), &temp);
464:   for(node = 0; node < numNodes; node++) {
465:     PetscSynchronizedPrintf(p->comm, " %d", nodeProcs[node]);
466:     PetscSynchronizedFlush(p->comm);
467:     PetscPrintf(p->comm, "\n");
468:     MPI_Allgather(&nodeProcs[node], 1, MPI_INT, temp, 1, MPI_INT, p->comm);
469:     for(proc = 0, idx = 0; proc < numProcs; proc++) {
470:       if (temp[proc] == proc) idx++;
471:     }

473:     /* If a node is not scheduled for a unique domain */
474:     if (idx != 1) {
475:       for(elem = 0; elem < numElements; elem++) {
476:         for(corner = 0; corner < numCorners; corner++) {
477:           /* Locate an element containing the node */
478:           if (node != elements[elem*numCorners+corner])
479:             continue;
480: 
481:           /* Check the support of the node */
482:           PetscPrintf(PETSC_COMM_SELF, "[%d]elem: %d corner: %d node: %d\n", rank, elem, corner, node);
483:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
484:           for(sElem = 0; sElem < degree; sElem++) {
485:             nElem = support[sElem];
486:             PetscPrintf(PETSC_COMM_SELF, "[%d]support[%d] = %d\n", rank, sElem, nElem);
487:             /* See if neighbor is in another domain */
488:             if (nElem >= numElements) {
489:               /* Check to see if node is contained in the neighboring element */
490:               for(nCorner = 0; nCorner < numCorners; nCorner++)
491:                 if (elements[nElem*numCorners+nCorner] == node) {
492:                   nProc = p->ghostElementProcs[nElem-numElements];
493:                   PetscPrintf(PETSC_COMM_SELF, "[%d]Found in corner %d proc: %d\n", rank, nCorner, nProc);
494:                   break;
495:                 }
496:             }
497:           }
498:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
499:           if (nodeProcs[node] < 0)
500:             nodeProcs[node] = rank;
501:           PetscPrintf(PETSC_COMM_SELF, "[%d]nodeProcs[%d]: %d\n", rank, node, nodeProcs[node]);
502:         }
503:       }
504:     }
505:   }
506:   PetscFree(temp);
507:   PetscBarrier((PetscObject) p);
508:   PetscFunctionReturn(1);
509: }

511: #undef  __FUNCT__
513: /*
514:   PartitionSortGhosts_Private - This function sorts the ghost array and
515:   removes any duplicates.

517:   Input Parameters:
518: . p          - The Partition
519: . numGhosts  - The number of ghosts
520: . ghostArray - The ghost indices

522:   Output Paramters:
523: . numGhosts  - The new size of the ghost array
524: . ghostArray - The sorted ghost indices

526: .seealso:
527: */
528: int PartitionSortGhosts_Private(Partition p, int *numGhosts, int *ghostArray, int **ghostPerm)
529: {
530:   int *perm, *temp;
531:   int  size;
532:   int  ghost, newGhost;
533:   int  ierr;

536:   size = *numGhosts;
537:   PetscMalloc(size * sizeof(int), &perm);
538:   PetscMalloc(size * sizeof(int), &temp);

540:   /* Sort ghosts */
541:   for(ghost = 0; ghost < size; ghost++) perm[ghost] = ghost;
542:   PetscSortIntWithPermutation(size, ghostArray, perm);

544:   /* Permute ghosts and eliminates duplicates */
545:   for(ghost = 0, newGhost = 0; ghost < size; ghost++) {
546:     if ((newGhost == 0) || (temp[newGhost-1] != ghostArray[perm[ghost]])) {
547:       /* Keep ghost */
548:       temp[newGhost++] = ghostArray[perm[ghost]];
549:     } else {
550:       /* Eliminate redundant ghost */
551:       PetscMemmove(&perm[ghost], &perm[ghost+1], (size - (ghost+1)) * sizeof(int));
552:       ghost--;
553:       size--;
554:     }
555:   }
556:   for(ghost = 0; ghost < size; ghost++) {
557:     ghostArray[ghost] = temp[ghost];
558:   }
559:   PetscFree(temp);

561:   *numGhosts = size;
562:   *ghostPerm = perm;
563:   return(0);
564: }

566: #undef  __FUNCT__
568: int PartitionGetNewGhostNodes_Serial(Partition p, int *newProcNodes, int *newNodes)
569: {
570:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
571:   int                      numLocNodes   = q->numLocNodes;
572:   int                      numGhostNodes = q->numOverlapNodes - numLocNodes;
573:   int                      numProcs      = p->numProcs;
574:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
575:   int                      numNewNodes;    /* Total number of new ghost nodes to add */
576:   int                     *temp;
577:   int                      proc, node, i;
578:   int                      ierr;

581:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++)
582:     numNewNodes += newProcNodes[proc];

584:   /* Add in new ghost nodes */
585:   if (numNewNodes > 0) {
586:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
587:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
588:     for(node = 0; node < numNewNodes; node++) {
589:       temp[numGhostNodes+node] = newNodes[node];
590:     }
591:     if (q->ghostNodes != PETSC_NULL) {
592:       PetscFree(q->ghostNodes);
593:     }
594:     q->ghostNodes = temp;

596:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
597:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
598:     for(proc = 0, node = 0; proc < numProcs; proc++) {
599:       for(i = 0; i < newProcNodes[proc]; i++)
600:         temp[numGhostNodes+(node++)] = proc;
601:     }
602:     if (q->ghostNodeProcs != PETSC_NULL) {
603:       PetscFree(q->ghostNodeProcs);
604:     }
605:     q->ghostNodeProcs = temp;

607:     /* Resort ghost nodes and remove duplicates */
608:     numGhostNodes += numNewNodes;
609:     PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
610:     q->numOverlapNodes = numLocNodes + numGhostNodes;
611:     PetscMalloc(numGhostNodes * sizeof(int), &temp);
612:     for(node = 0; node < numGhostNodes; node++) {
613:       temp[node] = q->ghostNodeProcs[nodePerm[node]];
614:     }
615:     PetscFree(q->ghostNodeProcs);
616:     q->ghostNodeProcs = temp;
617:     PetscFree(nodePerm);
618:   }
619: #ifdef PETSC_USE_BOPT_g
620:   /* Consistency check for ghost nodes */
621:   for(node = 0; node < numGhostNodes; node++) {
622:     if ((q->ghostNodes[node] <  q->firstNode[q->ghostNodeProcs[node]]) ||
623:         (q->ghostNodes[node] >= q->firstNode[q->ghostNodeProcs[node]+1])) {
624:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
625:     }
626:   }
627: #endif
628:   return(0);
629: }

631: #undef  __FUNCT__
633: int PartitionGetNewGhostNodes_Parallel(Partition p, int *sendGhostNodes, int *sendNodes, int *recvGhostNodes, int *recvNodes)
634: {
635:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
636:   Mesh                     mesh            = p->mesh;
637:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
638:   int                      numLocNodes     = q->numLocNodes;
639:   int                     *firstNode       = q->firstNode;
640:   double                  *nodes           = tri->nodes;
641:   int                     *markers         = tri->markers;
642:   int                     *degrees         = tri->degrees;
643:   int                      numProcs        = p->numProcs;
644:   int                      rank            = p->rank;
645:   int                      numGhostNodes;
646:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
647:   int                      numSendNodes;    /* Total number of new ghost nodes to receive */
648:   int                      numRecvNodes;    /* Total number of new ghost nodes to send */
649:   int                     *sumSendNodes;    /* Prefix sums of sendGhostNodes */
650:   int                     *sumRecvNodes;    /* Prefix sums of recvGhostNodes */
651:   int                     *sendGhostCoords; /* Number of new ghost nodes needed from a given processor */
652:   int                     *recvGhostCoords; /* Number of new ghost nodes needed by a given processor */
653:   int                     *sumSendCoords;   /* Prefix sums of sendGhostCoords */
654:   int                     *sumRecvCoords;   /* Prefix sums of recvGhostCoords */
655:   double                  *sendCoords;      /* Coordinates of ghost nodes for other domains */
656:   double                  *recvCoords;      /* Coordinates of ghost nodes for this domains */
657:   int                     *sendMarkers;     /* Markers of ghost nodes for other domains */
658:   int                     *recvMarkers;     /* Markers of ghost nodes for this domains */
659:   int                     *sendDegrees;     /* Degrees of ghost nodes for other domains */
660:   int                     *recvDegrees;     /* Degrees of ghost nodes for this domains */
661:   int                     *offsets;         /* Offsets into the send array for each destination proc */
662:   int                     *temp;
663:   double                  *temp2;
664:   int                      proc, node, locNode, i;
665:   int                      ierr;

668:   PetscMalloc(numProcs * sizeof(int), &sumSendNodes);
669:   PetscMalloc(numProcs * sizeof(int), &sumRecvNodes);
670:   PetscMalloc(numProcs * sizeof(int), &sendGhostCoords);
671:   PetscMalloc(numProcs * sizeof(int), &recvGhostCoords);
672:   PetscMalloc(numProcs * sizeof(int), &sumSendCoords);
673:   PetscMalloc(numProcs * sizeof(int), &sumRecvCoords);
674:   PetscMalloc(numProcs * sizeof(int), &offsets);
675:   PetscMemzero(sumSendNodes, numProcs * sizeof(int));
676:   PetscMemzero(sumRecvNodes, numProcs * sizeof(int));
677:   PetscMemzero(offsets,      numProcs * sizeof(int));

679:   /* Compute new ghost node offsets */
680:   for(proc = 1; proc < numProcs; proc++) {
681:     sumSendNodes[proc] = sumSendNodes[proc-1] + sendGhostNodes[proc-1];
682:     sumRecvNodes[proc] = sumRecvNodes[proc-1] + recvGhostNodes[proc-1];
683:   }
684:   numSendNodes = sumSendNodes[numProcs-1] + sendGhostNodes[numProcs-1];
685:   numRecvNodes = sumRecvNodes[numProcs-1] + recvGhostNodes[numProcs-1];

687:   /* Get numbers of ghost nodes to provide */
688:   MPI_Alltoallv(sendNodes, sendGhostNodes, sumSendNodes, MPI_INT,
689:                        recvNodes, recvGhostNodes, sumRecvNodes, MPI_INT, p->comm);
690: 

692:   /* Get node coordinates, markers, and degrees */
693:   for(proc = 0; proc < numProcs; proc++) {
694:     sendGhostCoords[proc] = sendGhostNodes[proc]*2;
695:     recvGhostCoords[proc] = recvGhostNodes[proc]*2;
696:     sumSendCoords[proc]   = sumSendNodes[proc]*2;
697:     sumRecvCoords[proc]   = sumRecvNodes[proc]*2;
698:   }
699:   if (numSendNodes) {
700:     PetscMalloc(numSendNodes*2 * sizeof(double), &recvCoords);
701:     PetscMalloc(numSendNodes   * sizeof(int),    &recvMarkers);
702:     PetscMalloc(numSendNodes   * sizeof(int),    &recvDegrees);
703:   }
704:   if (numRecvNodes) {
705:     PetscMalloc(numRecvNodes*2 * sizeof(double), &sendCoords);
706:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendMarkers);
707:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendDegrees);
708:     for(node = 0; node < numRecvNodes; node++) {
709:       locNode = recvNodes[node] - firstNode[rank];
710: #ifdef PETSC_USE_BOPT_g
711:       if ((locNode < 0) || (locNode >= numLocNodes)) {
712:         SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d should be in [0,%d)", locNode, numLocNodes);
713:       }
714: #endif
715:       for(i = 0; i < 2; i++)
716:         sendCoords[node*2+i] = nodes[locNode*2+i];
717:       sendMarkers[node] = markers[locNode];
718:       sendDegrees[node] = degrees[locNode];
719:     }
720:   }

722:   /* Communicate node coordinates and markers and degrees */
723:   MPI_Alltoallv(sendCoords,  recvGhostCoords, sumRecvCoords, MPI_DOUBLE,
724:                        recvCoords,  sendGhostCoords, sumSendCoords, MPI_DOUBLE, p->comm);
725: 
726:   MPI_Alltoallv(sendMarkers, recvGhostNodes,  sumRecvNodes,  MPI_INT,
727:                        recvMarkers, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
728: 
729:   MPI_Alltoallv(sendDegrees, recvGhostNodes,  sumRecvNodes,  MPI_INT,
730:                        recvDegrees, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
731: 

733:   /* Add in new ghost nodes */
734:   numGhostNodes = q->numOverlapNodes - numLocNodes;
735:   if (numSendNodes > 0) {
736:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
737:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
738:     for(node = 0; node < numSendNodes; node++)
739:       temp[numGhostNodes+node] = sendNodes[node];
740:     if (q->ghostNodes != PETSC_NULL) {
741:       PetscFree(q->ghostNodes);
742:     }
743:     q->ghostNodes = temp;

745:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
746:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
747:     for(proc = 0, node = 0; proc < numProcs; proc++) {
748:       for(i = 0; i < sendGhostNodes[proc]; i++)
749:         temp[numGhostNodes+(node++)] = proc;
750:     }
751:     if (q->ghostNodeProcs != PETSC_NULL) {
752:       PetscFree(q->ghostNodeProcs);
753:     }
754:     q->ghostNodeProcs = temp;

756:     PetscMalloc((q->numOverlapNodes + numSendNodes)*2 * sizeof(double), &temp2);
757:     PetscMemcpy(temp2, nodes, q->numOverlapNodes*2 * sizeof(double));
758:     for(node = 0; node < numSendNodes*2; node++)
759:       temp2[q->numOverlapNodes*2+node] = recvCoords[node];
760:     PetscFree(nodes);
761:     tri->nodes = temp2;

763:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
764:     PetscMemcpy(temp, markers, q->numOverlapNodes * sizeof(int));
765:     for(node = 0; node < numSendNodes; node++)
766:       temp[q->numOverlapNodes+node] = recvMarkers[node];
767:     PetscFree(markers);
768:     tri->markers = temp;

770:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
771:     PetscMemcpy(temp, degrees, q->numOverlapNodes * sizeof(int));
772:     for(node = 0; node < numSendNodes; node++)
773:       temp[q->numOverlapNodes+node] = recvDegrees[node];
774:     PetscFree(degrees);
775:     tri->degrees = temp;
776:   }

778:   /* Resort ghost nodes and remove duplicates */
779:   numGhostNodes     += numSendNodes;
780:   PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
781:   q->numOverlapNodes = numLocNodes + numGhostNodes;

783:   PetscMalloc(numGhostNodes * sizeof(int), &temp);
784:   for(node = 0; node < numGhostNodes; node++)
785:     temp[node] = q->ghostNodeProcs[nodePerm[node]];
786:   for(node = 0; node < numGhostNodes; node++)
787:     q->ghostNodeProcs[node] = temp[node];

789:   for(node = 0; node < numGhostNodes; node++)
790:     temp[node] = tri->markers[mesh->numNodes+nodePerm[node]];
791:   for(node = 0; node < numGhostNodes; node++)
792:     tri->markers[mesh->numNodes+node] = temp[node];

794:   for(node = 0; node < numGhostNodes; node++)
795:     temp[node] = tri->degrees[mesh->numNodes+nodePerm[node]];
796:   for(node = 0; node < numGhostNodes; node++)
797:     tri->degrees[mesh->numNodes+node] = temp[node];
798:   PetscFree(temp);

800:   PetscMalloc(numGhostNodes*2 * sizeof(double), &temp2);
801:   for(node = 0; node < numGhostNodes; node++) {
802:     temp2[node*2]   = tri->nodes[(mesh->numNodes+nodePerm[node])*2];
803:     temp2[node*2+1] = tri->nodes[(mesh->numNodes+nodePerm[node])*2+1];
804:   }
805:   for(node = 0; node < numGhostNodes; node++) {
806:     tri->nodes[(mesh->numNodes+node)*2]   = temp2[node*2];
807:     tri->nodes[(mesh->numNodes+node)*2+1] = temp2[node*2+1];
808:   }
809:   PetscFree(temp2);
810:   PetscFree(nodePerm);

812: #ifdef PETSC_USE_BOPT_g
813:   /* Consistency check for ghost nodes */
814:   for(node = 0; node < numGhostNodes; node++) {
815:     if ((q->ghostNodes[node] <  firstNode[q->ghostNodeProcs[node]]) ||
816:         (q->ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1])) {
817:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
818:     }
819:   }
820: #endif

822:   /* Cleanup */
823:   PetscFree(sumSendNodes);
824:   PetscFree(sendGhostCoords);
825:   PetscFree(sumSendCoords);
826:   PetscFree(offsets);
827:   if (numSendNodes) {
828:     PetscFree(recvCoords);
829:     PetscFree(recvMarkers);
830:     PetscFree(recvDegrees);
831:   }
832:   PetscFree(sumRecvNodes);
833:   PetscFree(recvGhostCoords);
834:   PetscFree(sumRecvCoords);
835:   if (numRecvNodes) {
836:     PetscFree(sendCoords);
837:     PetscFree(sendMarkers);
838:     PetscFree(sendDegrees);
839:   }
840:   return(0);
841: }

843: #undef  __FUNCT__
845: int PartitionGetNewGhostElements_Serial(Partition p, int *newProcElements, int *newElements)
846: {
847:   int  numLocElements   = p->numLocElements;
848:   int  numGhostElements = p->numOverlapElements - numLocElements;
849:   int  numProcs         = p->numProcs;
850:   int *elemPerm;        /* The new permutation for the sorted ghost elements */
851:   int  numNewElements;  /* Total number of new ghost elements to add */
852:   int *temp;
853:   int  proc, elem, i;
854:   int  ierr;

857:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++)
858:     numNewElements += newProcElements[proc];

860:   /* Add in new ghost elements */
861:   if (numNewElements > 0) {
862:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
863:     PetscMemcpy(temp, p->ghostElements, numGhostElements * sizeof(int));
864:     for(elem = 0; elem < numNewElements; elem++)
865:       temp[numGhostElements+elem] = newElements[elem];
866:     if (p->ghostElements != PETSC_NULL) {
867:       PetscFree(p->ghostElements);
868:     }
869:     p->ghostElements = temp;

871:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
872:     PetscMemcpy(temp, p->ghostElementProcs, numGhostElements * sizeof(int));
873:     for(proc = 0, elem = 0; proc < numProcs; proc++) {
874:       for(i = 0; i < newProcElements[proc]; i++)
875:         temp[numGhostElements+(elem++)] = proc;
876:     }
877:     if (p->ghostElementProcs != PETSC_NULL) {
878:       PetscFree(p->ghostElementProcs);
879:     }
880:     p->ghostElementProcs = temp;

882:     /* Resort ghost elements and remove duplicates */
883:     numGhostElements += numNewElements;
884:     PartitionSortGhosts_Private(p, &numGhostElements, p->ghostElements, &elemPerm);
885:     p->numOverlapElements = numLocElements + numGhostElements;
886:     PetscMalloc(numGhostElements * sizeof(int), &temp);
887:     for(elem = 0; elem < numGhostElements; elem++)
888:       temp[elem] = p->ghostElementProcs[elemPerm[elem]];
889:     PetscFree(p->ghostElementProcs);
890:     p->ghostElementProcs = temp;
891:     PetscFree(elemPerm);
892:   }
893: #ifdef PETSC_USE_BOPT_g
894:   /* Consistency check for ghost elements */
895:   for(elem = 0; elem < numGhostElements; elem++) {
896:     if ((p->ghostElements[elem] <  p->firstElement[p->ghostElementProcs[elem]]) ||
897:         (p->ghostElements[elem] >= p->firstElement[p->ghostElementProcs[elem]+1])) {
898:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost element source processor");
899:     }
900:   }
901: #endif
902:   return(0);
903: }

905: #undef  __FUNCT__
907: /*
908:   PartitionCreateElementMap_METIS - This function creates a map from elements to domains,
909:   using METIS to minimize the cut and approximately balance the sizes.

911:   Input Parameters:
912: + p           - The Partition
913: - numElements - The local number of elements

915:   Output Parameter:
916: . elementMap  - The map from nodes to domains

918: .seealso: PartitionNodes_Private()
919: */
920: int PartitionCreateElementMap_METIS(Partition p, int numElements, int **elementMap)
921: {
922: #ifdef PETSC_HAVE_PARMETIS
923:   Mesh       mesh = p->mesh;
924:   int       *elemProcs;     /* The processor assigned to each element */
925:   int       *elemOffsets;   /* The offsets into elemNeighbors of each element row for dual in CSR format */
926:   int       *elemNeighbors; /* The list of element neighbors for dual in CSR format */
927:   int       *edgeWeights;   /* The list of edge weights for dual in CSR format */
928:   int        weight;        /* A weight for constrained nodes */
929:   int        options[5];    /* The option flags for METIS */
930:   MPI_Comm   metisComm;
931:   PetscTruth opt;
932:   int        ierr;

935:   /* Create the dual graph in distributed CSR format */
936:   weight = 0;
937:   PetscOptionsGetInt(mesh->prefix, "-mesh_partition_weight", &weight, &opt);
938:   MeshCreateDualCSR(mesh, &elemOffsets, &elemNeighbors, &edgeWeights, weight);

940:   /* Partition graph */
941:   if (numElements != p->numLocElements) {
942:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d for ParMETIS, should be %d", numElements, p->numLocElements);
943:   }
944:   PetscMalloc(numElements * sizeof(int), &elemProcs);
945:   options[0] = 0;   /* Returns the edge cut */
946:   options[1] = 150; /* The folding factor, 0 = no folding */
947:   options[2] = 1;   /* Serial initial partition */
948:   options[3] = 0;   /* C style numbering */
949:   options[4] = 0;   /* No timing information */
950:   MPI_Comm_dup(p->comm, &metisComm);
951:   PARKMETIS(p->firstElement, elemOffsets, PETSC_NULL, elemNeighbors, PETSC_NULL, elemProcs, options, metisComm);
952:   MPI_Comm_free(&metisComm);

954:   /* Destroy dual graph */
955:   MeshDestroyDualCSR(mesh, elemOffsets, elemNeighbors, edgeWeights);

957:   *elementMap = elemProcs;
958:   return(0);
959: #else
960:   SETERRQ(PETSC_ERR_SUP, "You must obtain George Karypis' ParMETIS software")
961: #endif
962: }

964: #undef  __FUNCT__
966: /*
967:   PartitionCreateElementMap_NodeBased - This function creates a map from elements to domains,
968:   using a previous partition of the nodes.

970:   Input Parameters:
971: + p           - The Partition
972: - numElements - The global number of nodes

974:   Output Parameter:
975: . elementMap  - The map from nodes to domains

977: .seealso: PartitionNodes_Private()
978: */
979: int PartitionCreateElementMap_NodeBased(Partition p, int numElements, int **elementMap)
980: {
981:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
982:   Mesh                     mesh         = p->mesh;
983:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
984:   int                      numCorners   = mesh->numCorners;
985:   int                     *elements     = tri->faces;
986:   int                     *firstElement = p->firstElement;
987:   int                     *firstNode    = q->firstNode;
988:   int                      numProcs     = p->numProcs;
989:   int                      rank         = p->rank;
990:   int                      startElement = firstElement[rank];
991:   int                      endElement   = firstElement[rank+1];
992:   int                     *elemProcs;     /* The processor assigned to each element */
993:   int                      proc, elem, corner, node;
994:   int                      ierr;

997:   if (numElements != p->numLocElements) {
998:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d should be %d", numElements, p->numLocElements);
999:   }
1000:   /* Count elements on this partition -- keep element if you are the lower numbered domain */
1001:   PetscMalloc(numElements * sizeof(int), &elemProcs);
1002:   for(elem = 0; elem < numElements; elem++) {
1003:     elemProcs[elem] = -1;
1004:   }

1006:   for(elem = startElement; elem < endElement; elem++) {
1007:     for(corner = 0; corner < numCorners; corner++) {
1008:       node = elements[elem*numCorners+corner];
1009:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1010:         /* Get the domain of the node */
1011:         for(proc = 0; proc < numProcs; proc++) {
1012:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1013:         }
1014:         if ((elemProcs[elem-startElement] < 0) || (proc < elemProcs[elem-startElement]))
1015:           elemProcs[elem-startElement] = proc;
1016:       }
1017:     }
1018:     /* If no one else claims it, take the element */
1019:     if (elemProcs[elem-startElement] < 0) {
1020:       elemProcs[elem-startElement] = rank;
1021:     }
1022:   }

1024:   *elementMap = elemProcs;
1025:   return(0);
1026: }

1028: /*
1029:   PartitionCreateElementPartition_Private - This function uses the element map to create
1030:   partition structures for element-based data.

1032:   Input Parameters:
1033: + p              - The Partition
1034: - elementMap     - The map from elements to domains

1036:   Output Parameters:
1037: . ordering       - The new element ordering

1039:   Output Parameters in Partition_Triangular_2D:
1040: + numLocElements - The number of local elements
1041: - firstElement   - The first elemnt in each domain

1043: .seealso: PartitionElements_Private()
1044: */
1045: #undef  __FUNCT__
1047: int PartitionCreateElementPartition_Private(Partition p, int *elementMap, AO *ordering)
1048: {
1049:   int              numLocElements = p->numLocElements; /* Number of local elements before partitioning */
1050:   int             *firstElement   = p->firstElement;
1051:   int              numProcs       = p->numProcs;
1052:   int              rank           = p->rank;
1053:   int             *partSendElements;     /* The number of elements sent to each processor for partitioning */
1054:   int             *sumSendElements;      /* The prefix sums of partSendElements */
1055:   int             *partRecvElements;     /* The number of elements received from each processor for partitioning */
1056:   int             *sumRecvElements;      /* The prefix sums of partRecvElements */
1057:   int             *offsets;              /* The offsets into the send and receive arrays */
1058:   int             *sendBuffer;
1059:   int             *AppOrdering, *PetscOrdering;
1060:   int              proc, elem;
1061:   int              ierr;

1064:   /* Initialize communication */
1065:   PetscMalloc(numProcs * sizeof(int), &partSendElements);
1066:   PetscMalloc(numProcs * sizeof(int), &sumSendElements);
1067:   PetscMalloc(numProcs * sizeof(int), &partRecvElements);
1068:   PetscMalloc(numProcs * sizeof(int), &sumRecvElements);
1069:   PetscMalloc(numProcs * sizeof(int), &offsets);
1070:   PetscMemzero(partSendElements,  numProcs * sizeof(int));
1071:   PetscMemzero(sumSendElements,   numProcs * sizeof(int));
1072:   PetscMemzero(partRecvElements,  numProcs * sizeof(int));
1073:   PetscMemzero(sumRecvElements,   numProcs * sizeof(int));
1074:   PetscMemzero(offsets,           numProcs * sizeof(int));

1076:   /* Get sizes of interior element number blocks to send to each processor */
1077:   for(elem = 0; elem < numLocElements; elem++) {
1078:     partSendElements[elementMap[elem]]++;
1079:   }

1081:   /* Get sizes of interior element number blocks to receive from each processor */
1082:   MPI_Alltoall(partSendElements, 1, MPI_INT, partRecvElements, 1, MPI_INT, p->comm);

1084:   /* Calculate offsets into the interior element number send array */
1085:   for(proc = 1; proc < numProcs; proc++) {
1086:     sumSendElements[proc] = sumSendElements[proc-1] + partSendElements[proc-1];
1087:     offsets[proc]         = sumSendElements[proc];
1088:   }

1090:   /* Calculate offsets into the interior element number receive array */
1091:   for(proc = 1; proc < numProcs; proc++) {
1092:     sumRecvElements[proc] = sumRecvElements[proc-1] + partRecvElements[proc-1];
1093:   }

1095:   /* Send interior element numbers to each processor -- could prevent copying elements already there I think */
1096:   p->numLocElements = sumRecvElements[numProcs-1] + partRecvElements[numProcs-1];
1097:   PetscMalloc(numLocElements    * sizeof(int), &sendBuffer);
1098:   PetscMalloc(p->numLocElements * sizeof(int), &AppOrdering);
1099:   PetscMalloc(p->numLocElements * sizeof(int), &PetscOrdering);
1100:   for(elem = 0; elem < numLocElements; elem++) {
1101:     sendBuffer[offsets[elementMap[elem]]++] = elem + firstElement[rank];
1102:   }
1103:   MPI_Alltoallv(sendBuffer,  partSendElements, sumSendElements, MPI_INT,
1104:                        AppOrdering, partRecvElements, sumRecvElements, MPI_INT, p->comm);
1105: 

1107:   /* If the mesh was intially distributed, we would need to send the elements themselves here */

1109:   /* Recompute size and offset of each domain */
1110:   MPI_Allgather(&p->numLocElements, 1, MPI_INT, &firstElement[1], 1, MPI_INT, p->comm);
1111:   for(proc = 1, firstElement[0] = 0; proc <= numProcs; proc++) {
1112:     firstElement[proc] = firstElement[proc] + firstElement[proc-1];
1113:   }

1115:   /* Create the global element reordering */
1116:   for(elem = 0; elem < p->numLocElements; elem++) {
1117:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1118:     PetscOrdering[elem] = elem + firstElement[rank];
1119:   }

1121:   /* Cleanup */
1122:   PetscFree(partSendElements);
1123:   PetscFree(sumSendElements);
1124:   PetscFree(partRecvElements);
1125:   PetscFree(sumRecvElements);
1126:   PetscFree(offsets);
1127:   PetscFree(sendBuffer);

1129:   /* Create the global element reordering */
1130:   AOCreateBasic(p->comm, p->numLocElements, AppOrdering, PetscOrdering, ordering);
1131:   PetscLogObjectParent(p, p->ordering);

1133:   PetscFree(AppOrdering);
1134:   PetscFree(PetscOrdering);
1135:   return(0);
1136: }

1138: #undef  __FUNCT__
1140: /*
1141:   PartitionPermuteElements_Private - This function permutes the data which is implicitly
1142:   indexed by element number

1144:   Input Parameter:
1145: . p         - The Partition

1147:   Output Parameter in Mesh_Triangular:
1148: + faces     - The nodes on each element
1149: - neighbors - The neighbors of each element

1151: .seealso: PartitionElements_Private()
1152: */
1153: int PartitionPermuteElements_Private(Partition p)
1154: {
1155:   Mesh             mesh = p->mesh;
1156:   Mesh_Triangular *tri  = (Mesh_Triangular *) mesh->data;
1157:   int              ierr;

1160:   AOApplicationToPetscPermuteInt(p->ordering, mesh->numCorners, tri->faces);
1161:   AOApplicationToPetscPermuteInt(p->ordering, 3,                tri->neighbors);
1162:   return(0);
1163: }

1165: #undef  __FUNCT__
1167: /*
1168:   PartitionRenumberElements_Private - This function renumbers the element-based data globally in
1169:   order to make the canonical numbers sequential in each domain.

1171:   Input Parameter:
1172: . p         - The Partition

1174:   Output Parameter in Mesh_Triangular:
1175: . neighbors - The neighbors of each element

1177: .seealso: PartitionElements_Private()
1178: */
1179: int PartitionRenumberElements_Private(Partition p)
1180: {
1181:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
1182:   int                      numElements = p->numElements;
1183:   int                     *neighbors   = tri->neighbors;
1184:   int                      ierr;

1187:   AOApplicationToPetsc(p->ordering, numElements*3, neighbors);
1188:   return(0);
1189: }

1191: #undef  __FUNCT__
1193: /*
1194:   PartitionCalcGhostElements_Private - This function calculates the ghost elements.

1196:   Input Parameters:
1197: . p         - The Partition

1199:   Output Parameters in Partition_Triangular_2D:

1201: .seealso: PartitionElements_Private()
1202: */
1203: int PartitionCalcGhostElements_Private(Partition p)
1204: {
1205:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
1206:   Mesh                     mesh         = p->mesh;
1207:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
1208:   int                      numCorners   = mesh->numCorners;
1209:   int                      numElements  = p->numElements;
1210:   int                     *elements     = tri->faces;
1211:   int                     *neighbors    = tri->neighbors;
1212:   int                     *firstElement = p->firstElement;
1213:   int                      numProcs     = p->numProcs;
1214:   int                      rank         = p->rank;
1215:   int                      numLocNodes  = q->numLocNodes;
1216:   int                      startNode    = q->firstNode[rank];
1217:   PetscTruth               isNodePart   = q->isNodePartitioned;
1218:   int                     *newProcElements; /* The number of new ghost elements from each processor */
1219:   int                      numNewElements;  /* The number of new ghost elements */
1220:   int                     *newElements;     /* The new ghost elements */
1221:   int                     *offsets;         /* The offsets into the send and receive arrays */
1222:   int                      degree;          /* The degree of a vertex */
1223:   int                     *support;         /* The list of elements in the support of a basis function */
1224:   int                     *elemMap;
1225:   int                      proc, elem, bElem, sElem, nElem, corner, neighbor, node;
1226:   int                      ierr;

1229:   PetscMalloc(numElements  * sizeof(int), &elemMap);
1230:   PetscMalloc(numProcs     * sizeof(int), &newProcElements);
1231:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1232:   PetscMemzero(newProcElements,  numProcs     * sizeof(int));
1233:   PetscMemzero(offsets,          (numProcs+1) * sizeof(int));
1234:   for(elem = 0; elem < numElements; elem++) {
1235:     elemMap[elem] = -1;
1236:   }
1237:   for(elem = 0; elem < numElements; elem++) {
1238:     if ((elem >= firstElement[rank]) && (elem < firstElement[rank+1])) {
1239:       /* Find a boundary element */
1240:       for(neighbor = 0; neighbor < 3; neighbor++) {
1241:         bElem = neighbors[elem*3+neighbor];
1242:         if ((bElem >= 0) && ((bElem < firstElement[rank]) || (bElem >= firstElement[rank+1])))
1243:           break;
1244:       }

1246:       if (neighbor < 3) {
1247:         /* Check the support of each vertex for off-processor elements */
1248:         for(corner = 0; corner < numCorners; corner++) {
1249:           node = elements[elem*numCorners+corner];
1250:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1251:           for(sElem = 0; sElem < degree; sElem++) {
1252:             nElem = support[sElem];
1253:             if (elemMap[nElem] >= 0) continue;
1254:             for(proc = 0; proc < numProcs; proc++) {
1255:               if ((proc != rank) && (nElem >= firstElement[proc]) && (nElem < firstElement[proc+1])) {
1256:                 elemMap[nElem] = proc;
1257:                 break;
1258:               }
1259:             }
1260:           }
1261:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
1262:         }
1263:       }
1264:     } else if (isNodePart == PETSC_TRUE) {
1265:       if (elemMap[elem] >= 0) continue;
1266:       /* We may also need elements on which we have nodes, but are not attached to */
1267:       for(corner = 0; corner < numCorners; corner++) {
1268:         node = elements[elem*numCorners+corner] - startNode;
1269:         if ((node >= 0) && (node < numLocNodes)) {
1270:           for(proc = 0; proc < numProcs; proc++) {
1271:             if ((elem >= firstElement[proc]) && (elem < firstElement[proc+1])) {
1272:               elemMap[elem] = proc;
1273:               break;
1274:             }
1275:           }
1276:         }
1277:       }
1278:     }
1279:   }

1281:   /* Compute new ghost element offsets */
1282:   for(elem = 0; elem < numElements; elem++) {
1283:     if (elemMap[elem] >= 0) {
1284:       newProcElements[elemMap[elem]]++;
1285:     }
1286:   }
1287:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++) {
1288:     numNewElements  += newProcElements[proc];
1289:     offsets[proc+1]  = offsets[proc] + newProcElements[proc];
1290:   }

1292:   /* Get ghost nodes */
1293:   if (numNewElements > 0) {
1294:     PetscMalloc(numNewElements * sizeof(int), &newElements);
1295:     for(elem = 0; elem < numElements; elem++) {
1296:       if (elemMap[elem] >= 0) {
1297:         newElements[offsets[elemMap[elem]]++] = elem;
1298:       }
1299:     }
1300:   }
1301:   for(proc = 1; proc < numProcs-1; proc++) {
1302:     if (offsets[proc] - offsets[proc-1]  != newProcElements[proc]) {
1303:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc %d should be %d",
1304:                offsets[proc] - offsets[proc-1], proc, newProcElements[proc]);
1305:     }
1306:   }
1307:   if (offsets[0] != newProcElements[0]) {
1308:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc 0 should be %d",
1309:              offsets[0], newProcElements[0]);
1310:   }

1312:   /* Add new ghosts */
1313:   p->numOverlapElements = p->numLocElements;
1314:   PartitionGetNewGhostElements_Serial(p, newProcElements, newElements);

1316:   /* Cleanup */
1317:   PetscFree(elemMap);
1318:   PetscFree(newProcElements);
1319:   PetscFree(offsets);
1320:   if (numNewElements > 0) {
1321:     PetscFree(newElements);
1322:   }
1323:   return(0);
1324: }

1326: #undef  __FUNCT__
1328: /*
1329:   PartitionDistributeElements_Private - This function distributes the element-based data, and
1330:   permutes arrays which are implicitly indexed by element number.

1332:   Input Parameters:
1333: . p         - The Partition

1335:   Output Parameters in Mesh_Triangular:
1336: + faces     - The nodes on each element
1337: - neighbors - The element neighbors

1339: .seealso: PartitionElements_Private()
1340: */
1341: int PartitionDistributeElements_Private(Partition p)
1342: {
1343:   Mesh             mesh               = p->mesh;
1344:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1345:   int              numLocElements     = p->numLocElements;
1346:   int              numOverlapElements = p->numOverlapElements;
1347:   int              numGhostElements   = numOverlapElements - numLocElements;
1348:   int              numCorners         = mesh->numCorners;
1349:   int             *firstElement       = p->firstElement;
1350:   int             *ghostElements      = p->ghostElements;
1351:   int              rank               = p->rank;
1352:   int             *temp;
1353:   int              elem, corner, neighbor;
1354:   int              ierr;

1356:   /* Note here that we can use PetscMemcpy() for the interior variables because we already permuted the
1357:      arrays so that ghost elements could be computed.
1358:   */
1360:   mesh->numFaces = numLocElements;
1361:   PetscMalloc(numOverlapElements*numCorners * sizeof(int), &temp);
1362:   /* Interior faces */
1363:   PetscMemcpy(temp, &tri->faces[firstElement[rank]*numCorners], numLocElements*numCorners * sizeof(int));
1364:   /* Ghost faces */
1365:   for(elem = 0; elem < numGhostElements; elem++) {
1366:     for(corner = 0; corner < numCorners; corner++) {
1367:       temp[(numLocElements+elem)*numCorners+corner] = tri->faces[ghostElements[elem]*numCorners+corner];
1368:     }
1369:   }
1370:   PetscFree(tri->faces);
1371:   tri->faces = temp;
1372:   PetscLogObjectMemory(p, numGhostElements*numCorners * sizeof(int));

1374:   PetscMalloc(numOverlapElements*3 * sizeof(int), &temp);
1375:   /* Interior neighbors */
1376:   PetscMemcpy(temp, &tri->neighbors[firstElement[rank]*3], numLocElements*3 * sizeof(int));
1377:   /* Ghost neighbors */
1378:   for(elem = 0; elem < numGhostElements; elem++) {
1379:     for(neighbor = 0; neighbor < 3; neighbor++) {
1380:       temp[(numLocElements+elem)*3+neighbor] = tri->neighbors[ghostElements[elem]*3+neighbor];
1381:     }
1382:   }
1383:   PetscFree(tri->neighbors);
1384:   tri->neighbors = temp;
1385:   PetscLogObjectMemory(p, numGhostElements*3 * sizeof(int));

1387:   return(0);
1388: }

1390: #undef  __FUNCT__
1392: int PartitionElementGlobalToLocal_Private(Partition p)
1393: {
1394:   Mesh_Triangular *tri                = (Mesh_Triangular *) p->mesh->data;
1395:   int              numOverlapElements = p->numOverlapElements;
1396:   int             *neighbors          = tri->neighbors;
1397:   int              neighbor;
1398:   int              ierr;

1401:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
1402:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
1403:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
1404:   }
1405:   return(0);
1406: }

1408: #undef  __FUNCT__
1410: int PartitionGetNewGhostNodes_Element(Partition p)
1411: {
1412:   Partition_Triangular_2D *q                = (Partition_Triangular_2D *) p->data;
1413:   Mesh                     mesh             = p->mesh;
1414:   Mesh_Triangular         *tri              = (Mesh_Triangular *) mesh->data;
1415:   int                      numCorners       = mesh->numCorners;
1416:   int                     *elements         = tri->faces;
1417:   int                     *firstElement     = p->firstElement;
1418:   int                      numGhostElements = p->numOverlapElements - p->numLocElements;
1419:   int                     *ghostElements    = p->ghostElements;
1420:   int                      numNodes         = q->numNodes;
1421:   int                     *firstNode        = q->firstNode;
1422:   int                      numProcs         = p->numProcs;
1423:   int                      rank             = p->rank;
1424:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1425:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1426:   int                     *newNodes;     /* New ghost nodes for this domain */
1427:   int                     *offsets;      /* The offsets into newNodes[] */
1428:   int                     *nodeMap;      /* The map of nodes to processors */
1429:   int                      proc, elem, corner, node, gNode;
1430:   int                      ierr;

1433:   if (q->isNodePartitioned == PETSC_FALSE)
1434:     return(0);
1435:   /* Initialize communication */
1436:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1437:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1438:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1439:   PetscMemzero(newProcNodes, numProcs      * sizeof(int));
1440:   PetscMemzero(offsets,     (numProcs+1)   * sizeof(int));
1441:   for(node = 0; node < numNodes; node++) {
1442:     nodeMap[node] = -1;
1443:   }

1445:   /* Check for new ghost nodes */
1446:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1447:     for(corner = 0; corner < numCorners; corner++) {
1448:       node = elements[elem*numCorners+corner];
1449:       if (nodeMap[node] >= 0) continue;

1451:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1452:         /* Get the domain of the node */
1453:         for(proc = 0; proc < numProcs; proc++) {
1454:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1455:         }
1456:         /* Check for the presence of this node */
1457:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1458:           nodeMap[node] = proc;
1459:         }
1460:       }
1461:     }
1462:   }
1463:   for(elem = 0; elem < numGhostElements; elem++) {
1464:     for(corner = 0; corner < numCorners; corner++) {
1465:       node = elements[ghostElements[elem]*numCorners+corner];
1466:       if (nodeMap[node] >= 0) continue;

1468:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1469:         /* Get the domain of the node */
1470:         for(proc = 0; proc < numProcs; proc++) {
1471:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1472:         }
1473:         /* Check for the presence of this node */
1474:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1475:           nodeMap[node] = proc;
1476:         }
1477:       }
1478:     }
1479:   }

1481:   /* Compute new ghost node offsets */
1482:   for(node = 0; node < numNodes; node++) {
1483:     if (nodeMap[node] >= 0) {
1484:       newProcNodes[nodeMap[node]]++;
1485:     }
1486:   }
1487:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1488:     numNewNodes   += newProcNodes[proc];
1489:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1490:   }

1492:   /* Get ghost nodes */
1493:   if (numNewNodes > 0) {
1494:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1495:     for(node = 0; node < numNodes; node++) {
1496:       if (nodeMap[node] >= 0) {
1497:         newNodes[offsets[nodeMap[node]]++] = node;
1498:       }
1499:     }
1500:   }
1501:   for(proc = 1; proc < numProcs-1; proc++) {
1502:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1503:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1504:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1505:     }
1506:   }
1507:   if (offsets[0] != newProcNodes[0]) {
1508:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1509:   }

1511:   /* Add new ghosts */
1512:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1514:   /* Cleanup */
1515:   PetscFree(nodeMap);
1516:   PetscFree(newProcNodes);
1517:   PetscFree(offsets);
1518:   if (numNewNodes) {
1519:     PetscFree(newNodes);
1520:   }
1521:   return(0);
1522: }

1524: #undef  __FUNCT__
1526: int PartitionGetNewGhostNodes_Edge(Partition p)
1527: {
1528:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1529:   Mesh_Triangular         *tri       = (Mesh_Triangular *) p->mesh->data;
1530:   int                     *edges     = tri->edges;
1531:   int                     *firstEdge = q->firstEdge;
1532:   int                      numNodes  = q->numNodes;
1533:   int                     *firstNode = q->firstNode;
1534:   int                      numProcs  = p->numProcs;
1535:   int                      rank      = p->rank;
1536:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1537:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1538:   int                     *newNodes;     /* New ghost nodes for this domain */
1539:   int                     *offsets;      /* The offsets into newNodes[] */
1540:   int                     *nodeMap;      /* The map of nodes to processors */
1541:   int                      proc, edge, node, startNode, endNode, ghostNode, gNode;
1542:   int                      ierr;

1545:   /* Initialize communication */
1546:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1547:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1548:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1549:   PetscMemzero(newProcNodes, numProcs    * sizeof(int));
1550:   PetscMemzero(offsets,     (numProcs+1) * sizeof(int));
1551:   for(node = 0; node < numNodes; node++) {
1552:     nodeMap[node] = -1;
1553:   }

1555:   /* Check for new ghost nodes */
1556:   for(edge = firstEdge[rank]; edge < firstEdge[rank+1]; edge++) {
1557:     /* Check for new ghost node */
1558:     startNode = edges[edge*2];
1559:     endNode   = edges[edge*2+1];
1560:     ghostNode = -1;
1561:     if ((startNode < firstNode[rank]) || (startNode >= firstNode[rank+1])) {
1562:       ghostNode = startNode;
1563:     } else if ((endNode < firstNode[rank]) || (endNode >= firstNode[rank+1])) {
1564:       ghostNode = endNode;
1565:     }
1566:     if (ghostNode >= 0) {
1567:       /* Get the domain of the node */
1568:       for(proc = 0; proc < numProcs; proc++) {
1569:         if ((ghostNode >= firstNode[proc]) && (ghostNode < firstNode[proc+1])) break;
1570:       }
1571:       /* Check for the presence of this ghost node */
1572:       if (PartitionGhostNodeIndex_Private(p, ghostNode, &gNode) && ((nodeMap[ghostNode] < 0) || (proc < nodeMap[ghostNode]))) {
1573:         /* We must add this node as a ghost node */
1574:         nodeMap[ghostNode] = proc;
1575:       }
1576:     }
1577:   }

1579:   /* Compute new ghost node offsets */
1580:   for(node = 0; node < numNodes; node++) {
1581:     if (nodeMap[node] >= 0) {
1582:       newProcNodes[nodeMap[node]]++;
1583:     }
1584:   }
1585:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1586:     numNewNodes   += newProcNodes[proc];
1587:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1588:   }

1590:   /* Get ghost nodes */
1591:   if (numNewNodes > 0) {
1592:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1593:     for(node = 0; node < numNodes; node++) {
1594:       if (nodeMap[node] >= 0) {
1595:         newNodes[offsets[nodeMap[node]]++] = node;
1596:       }
1597:     }
1598:   }
1599:   for(proc = 1; proc < numProcs-1; proc++) {
1600:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1601:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1602:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1603:     }
1604:   }
1605:   if (offsets[0] != newProcNodes[0]) {
1606:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1607:   }

1609:   /* Add new ghosts */
1610:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1612:   /* Cleanup */
1613:   PetscFree(nodeMap);
1614:   PetscFree(newProcNodes);
1615:   PetscFree(offsets);
1616:   if (numNewNodes) {
1617:     PetscFree(newNodes);
1618:   }
1619:   return(0);
1620: }

1622: #undef  __FUNCT__
1624: int PartitionElements_Private(Partition p)
1625: {
1626:   int (*f)(Partition, int, int **);
1627:   int  *elementMap; /* The map from elements to domains */
1628:   int   ierr;

1631:   /* Create a new map of elements to domains */
1632:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap", (void (**)(void)) &f);
1633:   (*f)(p, p->numLocElements, &elementMap);

1635: #if 0
1636:   /* Communicate interior elements */
1637:   GridInteriorExchange(numLocElements, elementMap, p->firstElement);
1638: #endif
1639:   /* Create the element partition */
1640:   PartitionCreateElementPartition_Private(p, elementMap, &p->ordering);
1641:   PetscFree(elementMap);

1643:   /* Permute arrays implicitly indexed by element number */
1644:   PartitionPermuteElements_Private(p);

1646:   /* Globally renumber the elements to make canonical numbers sequential in each domain */
1647:   PartitionRenumberElements_Private(p);

1649:   /* Calculate ghosts */
1650:   PartitionCalcGhostElements_Private(p);
1651: 
1652:   /* Check for new ghost nodes created by the element partition */
1653:   PartitionGetNewGhostNodes_Element(p);

1655:   p->isElementPartitioned = PETSC_TRUE;
1656:   return(0);
1657: }

1659: #undef  __FUNCT__
1661: /*
1662:   PartitionCreateNodeMap_Simple_Seq - This function creates a map from nodes to domains,
1663:   using a reordering of the serial mesh to reduce the bandwidth.

1665:   Input Parameters:
1666: + p        - The Partition
1667: - numNodes - The global number of nodes

1669:   Output Parameter:
1670: . nodeMap  - The map from nodes to domains

1672: .seealso: PartitionNodes_Private()
1673: */
1674: int PartitionCreateNodeMap_Simple_Seq(Partition p, int numNodes, int **nodeMap)
1675: {
1676:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1677:   int                     *firstNode = q->firstNode;
1678:   int                      rank      = p->rank;
1679:   int                     *nodeProcs; /* The processor which each node will lie on */
1680:   int                      node;
1681:   int                      ierr;

1684:   /* Use the existing interior nodes */
1685:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1686:   for(node = 0; node < numNodes; node++) {
1687:     nodeProcs[node] = -1;
1688:   }
1689:   for(node = firstNode[rank]; node < firstNode[rank+1]; node++) {
1690:     nodeProcs[node] = rank;
1691:   }

1693:   *nodeMap = nodeProcs;
1694:   return(0);
1695: }

1697: #undef  __FUNCT__
1699: /*
1700:   PartitionCreateNodeMap_ElementBased - This function creates a map from nodes to domains,
1701:   based upon a prior partition of the elements.

1703:   Input Parameters:
1704: + p        - The Partition
1705: - numNodes - The global number of nodes

1707:   Output Parameter:
1708: . nodeMap  - The map from nodes to domains

1710: .seealso: PartitionNodes_Private()
1711: */
1712: int PartitionCreateNodeMap_ElementBased(Partition p, int numNodes, int **nodeMap)
1713: {
1714:   Mesh             mesh               = p->mesh;
1715:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1716:   int              numLocElements     = p->numLocElements;
1717:   int              numGhostElements   = p->numOverlapElements - p->numLocElements;
1718:   int              numCorners         = mesh->numCorners;
1719:   int             *elements           = tri->faces;
1720:   int             *firstElement       = p->firstElement;
1721:   int             *ghostElements      = p->ghostElements;
1722:   int             *ghostElementProcs  = p->ghostElementProcs;
1723:   int              rank               = p->rank;
1724:   int             *nodeProcs;     /* The processor which each node will lie on */
1725:   int             *support;
1726:   int              nProc, elem, sElem, nElem, nLocElem, gElem, corner, nCorner, node, degree;
1727:   int              ierr;

1730:   /* Count nodes on this partition -- keep node if you are the lower numbered domain */
1731:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1732:   for(node = 0; node < numNodes; node++) {
1733:     nodeProcs[node] = -1;
1734:   }

1736:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1737:     for(corner = 0; corner < numCorners; corner++) {
1738:       node = elements[elem*numCorners+corner];

1740:       /* Check the support of the node */
1741:       MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1742:       for(sElem = 0; sElem < degree; sElem++) {
1743:         nElem = support[sElem];
1744:         /* See if neighbor is in another domain */
1745:         if ((nElem < firstElement[rank]) || (nElem >= firstElement[rank+1])) {
1746:           /* Check to see if node is contained in the neighboring element */
1747:           for(nCorner = 0; nCorner < numCorners; nCorner++) {
1748:             if (elements[nElem*numCorners+nCorner] == node) {
1749:               PartitionGlobalToLocalElementIndex(p, nElem, &nLocElem);
1750:               nProc = ghostElementProcs[nLocElem-numLocElements];
1751:               /* Give the node to the lowest numbered domain */
1752:               if ((nProc < rank) && ((nodeProcs[node] < 0) || (nProc < nodeProcs[node]))) {
1753:                 nodeProcs[node] = nProc;
1754:               }
1755:               break;
1756:             }
1757:           }
1758:         }
1759:       }
1760:       MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);

1762:       /* If no one else claims it, take the node */
1763:       if (nodeProcs[node] < 0) {
1764:         nodeProcs[node] = rank;
1765:       }
1766:     }
1767:   }

1769:   /* Now assign the ghost nodes from ghost elements (which we can never own) */
1770:   for(gElem = 0; gElem < numGhostElements; gElem++) {
1771:     for(corner = 0; corner < numCorners; corner++) {
1772:       node = elements[ghostElements[gElem]*numCorners+corner];
1773:       if (nodeProcs[node] < 0)
1774:         nodeProcs[node] = ghostElementProcs[gElem];
1775:     }
1776:   }

1778:   *nodeMap = nodeProcs;
1779:   return(0);
1780: }

1782: /*
1783:   PartitionCreateNodePartition_Private - This function uses the node map to create
1784:   partition structures for node-based data.

1786:   Input Parameters:
1787: + p               - The Partition
1788: - nodeMap         - The map from nodes to domains

1790:   Output Parameter:
1791: . ordering        - The new node ordering

1793:   Output Parameters in Partition_Triangular_2D:
1794: + numLocNodes     - The number of local nodes
1795: . numOverlapNodes - The number of local + ghost nodes
1796: . firstNode       - The first node in each domain
1797: - ghostNodes      - The global number for each ghost node

1799: .seealso: PartitionNodes_Private()
1800: */
1801: #undef  __FUNCT__
1803: int PartitionCreateNodePartition_Private(Partition p, int *nodeMap, AO *ordering)
1804: {
1805:   Partition_Triangular_2D *q        = (Partition_Triangular_2D *) p->data;
1806:   int                      numNodes = q->numNodes;
1807:   int                      numProcs = p->numProcs;
1808:   int                      rank     = p->rank;
1809:   int                      numGhostNodes; /* Number of ghost nodes for this domain */
1810:   int                     *AppOrdering, *PetscOrdering;
1811:   int                      proc, node, idx, idx2;
1812:   int                      ierr;

1815:   /* Determine local and ghost sizes */
1816:   for(node = 0, q->numLocNodes = 0, numGhostNodes = 0; node < numNodes; node++) {
1817:     if (nodeMap[node] == rank) {
1818:       q->numLocNodes++;
1819:     } else if (nodeMap[node] >= 0) {
1820:       numGhostNodes++;
1821:     }
1822:   }

1824:   /* Recompute size and offset of each domain */
1825:   MPI_Allgather(&q->numLocNodes, 1, MPI_INT, &q->firstNode[1], 1, MPI_INT, p->comm);
1826:   for(proc = 1, q->firstNode[0] = 0; proc <= numProcs; proc++) {
1827:     q->firstNode[proc] = q->firstNode[proc] + q->firstNode[proc-1];
1828:   }
1829:   if (q->firstNode[numProcs] != numNodes) {
1830:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of nodes %d should be %d", q->firstNode[numProcs], numNodes);
1831:   }

1833:   /* Setup ghost node structures */
1834:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;
1835:   if (numGhostNodes > 0) {
1836:     PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
1837:   }

1839:   /* Get indices for reordering */
1840:   PetscMalloc(q->numLocNodes * sizeof(int), &AppOrdering);
1841:   PetscMalloc(q->numLocNodes * sizeof(int), &PetscOrdering);
1842:   for(node = 0; node < q->numLocNodes; node++) {
1843:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1844:     PetscOrdering[node] = q->firstNode[rank] + node;
1845:   }
1846:   for(node = 0, idx = 0, idx2 = 0; node < numNodes; node++) {
1847:     if (nodeMap[node] == rank) {
1848:       AppOrdering[idx++]    = node;
1849:     } else if (nodeMap[node] >= 0) {
1850:       q->ghostNodes[idx2++] = node;
1851:     }
1852:   }
1853:   if (idx  != q->numLocNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node renumbering");
1854:   if (idx2 != numGhostNodes)  SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node renumbering");

1856:   /* Create the global node reordering */
1857:   AOCreateBasic(p->comm, q->numLocNodes, AppOrdering, PetscOrdering, ordering);
1858:   if (ierr) {
1859:     PartitionDebugAO_Private(p, nodeMap);
1860:   }

1862:   PetscFree(AppOrdering);
1863:   PetscFree(PetscOrdering);
1864:   return(0);
1865: }

1867: #undef  __FUNCT__
1869: /*
1870:   PartitionPermuteNodes_Private - This function permutes the data which is implicitly
1871:   indexed by node number

1873:   Input Parameter:
1874: . p       - The Partition

1876:   Output Parameter in Mesh_Triangular:
1877: + nodes   - The coordinates on each node
1878: . markers - The node markers
1879: - degrees - The degree of each node

1881: .seealso: PartitionNodes_Private()
1882: */
1883: int PartitionPermuteNodes_Private(Partition p)
1884: {
1885:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
1886:   Mesh                     mesh = p->mesh;
1887:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
1888:   int                      ierr;

1891:   AOApplicationToPetscPermuteReal(q->nodeOrdering, mesh->dim, tri->nodes);
1892:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->markers);
1893:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->degrees);
1894:   return(0);
1895: }

1897: #undef  __FUNCT__
1899: /*
1900:   PartitionRenumberNodes_Private - This function renumbers the node-based data globally in
1901:   order to make the canonical numbers sequential in each domain.

1903:   Input Parameter:
1904: . p          - The Partition

1906:   Output Parameters in Mesh_Triangular:
1907: + faces      - The nodes in each element
1908: . edges      - The nodes on each edge
1909: - bdNodes    - The nodes on each boundary

1911:   Output Parameter in Partition_Triangular_2D:
1912: . ghostNodes - The global number of each ghost node

1914: .seealso: PartitionNodes_Private()
1915: */
1916: int PartitionRenumberNodes_Private(Partition p)
1917: {
1918:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1919:   Mesh                     mesh          = p->mesh;
1920:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
1921:   int                      numElements   = p->numElements;
1922:   int                      numCorners    = mesh->numCorners;
1923:   int                     *faces         = tri->faces;
1924:   int                      numEdges      = q->numEdges;
1925:   int                     *edges         = tri->edges;
1926:   int                      numBdNodes    = q->numBdNodes;
1927:   int                     *bdNodes       = tri->bdNodes;
1928:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1929:   int                     *ghostNodes    = q->ghostNodes;
1930:   int                      ierr;

1933:   AOApplicationToPetsc(q->nodeOrdering, numEdges*2,             edges);
1934:   AOApplicationToPetsc(q->nodeOrdering, numElements*numCorners, faces);
1935:   AOApplicationToPetsc(q->nodeOrdering, numBdNodes,             bdNodes);
1936:   AOApplicationToPetsc(q->nodeOrdering, numGhostNodes,          ghostNodes);
1937:   return(0);
1938: }

1940: #undef  __FUNCT__
1942: /*
1943:   PartitionDistributeNodes_Private - This function distributes the node-based data, and
1944:   permutes arrays which are implicitly indexed by node number.

1946:   Input Parameter:
1947: . p       - The Partition

1949:   Output Parameters in Mesh_Triangular:
1950: + nodes   - The node coordinates
1951: . markers - The node markers
1952: - degrees - The node degrees

1954: .seealso: PartitionNodes_Private()
1955: */
1956: int PartitionDistributeNodes_Private(Partition p)
1957: {
1958:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
1959:   Mesh                     mesh            = p->mesh;
1960:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
1961:   int                      dim             = mesh->dim;
1962:   int                      numLocNodes     = q->numLocNodes;
1963:   int                      numOverlapNodes = q->numOverlapNodes;
1964:   int                      numGhostNodes   = numOverlapNodes - numLocNodes;
1965:   int                     *firstNode       = q->firstNode;
1966:   int                     *ghostNodes      = q->ghostNodes;
1967:   int                      rank            = p->rank;
1968:   int                     *temp;
1969:   double                  *temp2;
1970:   int                      node, c;
1971:   int                      ierr;

1974:   mesh->numNodes = numLocNodes;
1975:   PetscMalloc(numOverlapNodes*dim * sizeof(double), &temp2);
1976:   /* Interior nodes */
1977:   PetscMemcpy(temp2, &tri->nodes[firstNode[rank]*dim], numLocNodes*dim * sizeof(double));
1978:   /* Ghost nodes */
1979:   for(node = 0; node < numGhostNodes; node++) {
1980:     for(c = 0; c < dim; c++) {
1981:       temp2[(numLocNodes+node)*dim+c] = tri->nodes[ghostNodes[node]*dim+c];
1982:     }
1983:   }
1984:   PetscFree(tri->nodes);
1985:   tri->nodes = temp2;
1986:   PetscLogObjectMemory(p, numGhostNodes*dim * sizeof(double));

1988:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1989:   /* Interior markers */
1990:   PetscMemcpy(temp, &tri->markers[firstNode[rank]], numLocNodes * sizeof(int));
1991:   /* Ghost markers */
1992:   for(node = 0; node < numGhostNodes; node++) {
1993:     temp[numLocNodes+node] = tri->markers[ghostNodes[node]];
1994:   }
1995:   PetscFree(tri->markers);
1996:   tri->markers = temp;
1997:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1999:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
2000:   /* Interior degrees */
2001:   PetscMemcpy(temp, &tri->degrees[firstNode[rank]], numLocNodes * sizeof(int));
2002:   /* Ghost degrees */
2003:   for(node = 0; node < numGhostNodes; node++) {
2004:     temp[numLocNodes+node] = tri->degrees[ghostNodes[node]];
2005:   }
2006:   PetscFree(tri->degrees);
2007:   tri->degrees = temp;
2008:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

2010:   return(0);
2011: }

2013: #undef  __FUNCT__
2015: /*
2016:   PartitionNodeCalcGhostProcs_Private - This function determines the processor from
2017:   which each ghost node comes.

2019:   Input Parameter:
2020: . p              - The Partition

2022:   Output Parameter in Partition_Triangular_2D:
2023: . ghostNodeProcs - The domain of each ghost node

2025: .seealso: PartitionNodes_Private()
2026: */
2027: int PartitionNodeCalcGhostProcs_Private(Partition p)
2028: {
2029:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2030:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2031:   int                     *ghostNodes    = q->ghostNodes;
2032:   int                     *firstNode     = q->firstNode;
2033:   int                      numProcs      = p->numProcs;
2034:   int                     *nodePerm;
2035:   int                      proc, node;
2036:   int                      ierr;

2039:   if (numGhostNodes == 0)
2040:     return(0);

2042:   /* Resort ghost nodes after renumbering */
2043:   PartitionSortGhosts_Private(p, &numGhostNodes, ghostNodes, &nodePerm);
2044:   PetscFree(nodePerm);
2045:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;

2047:   /* calculate ghost node domains */
2048:   PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2049:   for(node = 0; node < numGhostNodes; node++) {
2050:     for(proc = 0; proc < numProcs; proc++) {
2051:       if ((ghostNodes[node] >= firstNode[proc]) && (ghostNodes[node] <  firstNode[proc+1])) {
2052:         q->ghostNodeProcs[node] = proc;
2053:         break;
2054:       }
2055:     }
2056:     if (proc == numProcs) SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d, global number %d", node, ghostNodes[node]);
2057:   }
2058: #ifdef PETSC_USE_BOPT_g
2059:   for(node = 0; node < numGhostNodes; node++) {
2060:     if ((ghostNodes[node] < firstNode[q->ghostNodeProcs[node]]) || (ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1]))
2061:       SETERRQ2(PETSC_ERR_LIB, "Invalid source processor %d on ghost node %d", q->ghostNodeProcs[node], node);
2062:   }
2063: #endif
2064:   return(0);
2065: }

2067: #undef  __FUNCT__
2069: int PartitionNodes_Private(Partition p)
2070: {
2071:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2072:   int                    (*f)(Partition, int, int **);
2073:   int                     *nodeMap; /* The map from nodes to domains */
2074:   int                      ierr;

2077:   /* Create a new map of nodes to domains */
2078:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap", (void (**)(void)) &f);
2079:   (*f)(p, q->numNodes, &nodeMap);

2081:   /* Create the node partition */
2082:   PartitionCreateNodePartition_Private(p, nodeMap, &q->nodeOrdering);
2083:   PetscFree(nodeMap);

2085:   /* Permute arrays implicitly indexed by node number */
2086:   PartitionPermuteNodes_Private(p);

2088:   /* Globally renumber the nodes to make canonical numbers sequential in each domain */
2089:   /* WARNING: We must resort ghost nodes after renumbering, but this is done anyway in edge partitioning */
2090:   PartitionRenumberNodes_Private(p);

2092:   /* Assign ghost node source processors */
2093:   PartitionNodeCalcGhostProcs_Private(p);

2095:   q->isNodePartitioned = PETSC_TRUE;
2096:   return(0);
2097: }

2099: #undef  __FUNCT__
2101: /*
2102:   PartitionCreateEdgeMap_NodeBased - This function creates a map from edges to domains,
2103:   using a previous partition of the nodes.

2105:   Input Parameters:
2106: + p        - The Partition
2107: - numEdges - The global number of edges

2109:   Output Parameter:
2110: . edgeMap  - The map from edges to domains

2112: .seealso: PartitionEdges_Private()
2113: */
2114: int PartitionCreateEdgeMap_NodeBased(Partition p, int numEdges, int **edgeMap)
2115: {
2116:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
2117:   Mesh                     mesh      = p->mesh;
2118:   Mesh_Triangular         *tri       = (Mesh_Triangular *) mesh->data;
2119:   int                     *edges     = tri->edges;
2120:   int                     *firstNode = q->firstNode;
2121:   int                      numProcs  = p->numProcs;
2122:   int                      rank      = p->rank;
2123:   int                      startProc = -1;
2124:   int                      endProc   = -1;
2125:   int                     *edgeProcs;     /* The processor assigned to each edge */
2126:   int                      proc, edge, startNode, endNode;
2127:   int                      ierr;

2130:   PetscMalloc(numEdges * sizeof(int), &edgeProcs);
2131:   for(edge = 0; edge < numEdges; edge++) {
2132:     edgeProcs[edge] = -1;
2133:   }

2135:   /* Count edges on this partition -- keep edge if you are the lower numbered domain */
2136:   for(edge = 0; edge < numEdges; edge++) {
2137:     startNode = edges[edge*2];
2138:     endNode   = edges[edge*2+1];

2140:     if ((startNode >= firstNode[rank]) && (startNode < firstNode[rank+1])) {
2141:       /* startNode is local */
2142:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2143:         /* endNode is local */
2144:         edgeProcs[edge] = rank;
2145:       } else {
2146:         /* endNode is not local */
2147:         for(proc = 0; proc < numProcs; proc++) {
2148:           if ((endNode >= firstNode[proc]) && (endNode < firstNode[proc+1])) {
2149:             endProc = proc;
2150:             break;
2151:           }
2152:         }
2153:         if (rank < endProc) {
2154:           edgeProcs[edge] = rank;
2155:         }
2156:       }
2157:     } else {
2158:       /* startNode is not local */
2159:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2160:         /* endNode is local */
2161:         for(proc = 0; proc < numProcs; proc++) {
2162:           if ((startNode >= firstNode[proc]) && (startNode < firstNode[proc+1])) {
2163:             startProc = proc;
2164:             break;
2165:           }
2166:         }
2167:         if (rank < startProc) {
2168:           edgeProcs[edge] = rank;
2169:         }
2170:       }
2171:     }
2172:   }

2174:   *edgeMap = edgeProcs;
2175:   return(0);
2176: }

2178: #undef  __FUNCT__
2180: int PartitionCreateEdgePartition_Private(Partition p, int *edgeMap, AO *ordering)
2181: {
2182:   Mesh                     mesh          = p->mesh;
2183:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2184:   int                      numEdges      = mesh->numEdges;
2185:   int                      numProcs      = p->numProcs;
2186:   int                      rank          = p->rank;
2187:   int                     *AppOrdering   = PETSC_NULL;
2188:   int                     *PetscOrdering = PETSC_NULL;
2189:   int                      proc, edge, idx;
2190:   int                      ierr;

2193:   /* Determine local edges and new ghost nodes */
2194:   for(edge = 0, q->numLocEdges = 0; edge < numEdges; edge++) {
2195:     if (edgeMap[edge] == rank) {
2196:       q->numLocEdges++;
2197:     }
2198:   }

2200:   /* Recompute size and offset of each domain */
2201:   MPI_Allgather(&q->numLocEdges, 1, MPI_INT, &q->firstEdge[1], 1, MPI_INT, p->comm);
2202:   for(proc = 1, q->firstEdge[0] = 0; proc <= numProcs; proc++)
2203:     q->firstEdge[proc] = q->firstEdge[proc] + q->firstEdge[proc-1];
2204:   if (q->firstEdge[numProcs] != q->numEdges) {
2205:     SETERRQ2(PETSC_ERR_PLIB, "Invalid global number of edges %d should be %d", q->firstEdge[numProcs], q->numEdges);
2206:   }

2208:   /* Get indices for reordering */
2209:   if (q->numLocEdges > 0) {
2210:     PetscMalloc(q->numLocEdges * sizeof(int), &AppOrdering);
2211:     PetscMalloc(q->numLocEdges * sizeof(int), &PetscOrdering);
2212:   }
2213:   for(edge = 0; edge < q->numLocEdges; edge++) {
2214:     PetscOrdering[edge] = q->firstEdge[rank] + edge;
2215:   }
2216:   for(edge = 0, idx = 0; edge < numEdges; edge++) {
2217:     if (edgeMap[edge] == rank) {
2218:       AppOrdering[idx++] = edge;
2219:     }
2220:   }
2221:   if (idx != q->numLocEdges) {
2222:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of local edges %d should be %d", idx, q->numLocEdges);
2223:   }

2225:   /* Create the global edge reordering */
2226:   AOCreateBasic(p->comm, q->numLocEdges, AppOrdering, PetscOrdering, ordering);

2228:   if (q->numLocEdges > 0) {
2229:     PetscFree(AppOrdering);
2230:     PetscFree(PetscOrdering);
2231:   }
2232:   return(0);
2233: }

2235: #undef  __FUNCT__
2237: /*
2238:   PartitionDistributeEdges_Private - This function distributes the edge-based data, and
2239:   permutes arrays which are implicitly indexed by edge number.

2241:   Input Parameter:
2242: . p           - The Partition

2244:   Output Parameters in Mesh_Triangular:
2245: + edges       - The nodes on each edge
2246: - edgemarkers - The edge markers

2248: .seealso: PartitionEdges_Private()
2249: */
2250: int PartitionDistributeEdges_Private(Partition p) {
2251:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2252:   Mesh                     mesh        = p->mesh;
2253:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
2254:   int                      numLocEdges = q->numLocEdges;
2255:   int                     *firstEdge   = q->firstEdge;
2256:   int                      rank        = p->rank;
2257:   int                     *temp        = PETSC_NULL;
2258:   int                      ierr;

2261:   mesh->numEdges = numLocEdges;
2262:   if (numLocEdges > 0) {
2263:     PetscMalloc(numLocEdges*2 * sizeof(int), &temp);
2264:     PetscMemcpy(temp, &tri->edges[firstEdge[rank]*2], numLocEdges*2 * sizeof(int));
2265:     PetscFree(tri->edges);
2266:   }
2267:   tri->edges = temp;

2269:   if (numLocEdges > 0) {
2270:     PetscMalloc(numLocEdges * sizeof(int), &temp);
2271:     PetscMemcpy(temp, &tri->edgemarkers[firstEdge[rank]], numLocEdges * sizeof(int));
2272:     PetscFree(tri->edgemarkers);
2273:   }
2274:   tri->edgemarkers = temp;

2276:   return(0);
2277: }

2279: #undef  __FUNCT__
2281: /*
2282:   PartitionPermuteEdges_Private - This function permutes the data which is implicitly
2283:   indexed by edge number

2285:   Input Parameter:
2286: . p           - The Partition

2288:   Output Parameter in Mesh_Triangular:
2289: + edges       - The nodes on each edge
2290: - edgemarkers - The edge markers

2292: .seealso: PartitionEdges_Private()
2293: */
2294: int PartitionPermuteEdges_Private(Partition p)
2295: {
2296:   Partition_Triangular_2D *q   = (Partition_Triangular_2D *) p->data;
2297:   Mesh_Triangular         *tri = (Mesh_Triangular *) p->mesh->data;
2298:   int                      ierr;

2301:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 2, tri->edges);
2302:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 1, tri->edgemarkers);
2303:   return(0);
2304: }

2306: #undef  __FUNCT__
2308: /*
2309:   PartitionRenumberEdges_Private - This function renumbers the edge-based data globally in
2310:   order to make the canonical numbers sequential in each domain.

2312:   Input Parameter:
2313: . p       - The Partition

2315:   Output Parameter in Mesh_Triangular:
2316: . bdEdges - The edges on each boundary

2318: .seealso: PartitionEdges_Private()
2319: */
2320: int PartitionRenumberEdges_Private(Partition p)
2321: {
2322:   Partition_Triangular_2D *q          = (Partition_Triangular_2D *) p->data;
2323:   Mesh                     mesh       = p->mesh;
2324:   Mesh_Triangular         *tri        = (Mesh_Triangular *) mesh->data;
2325:   int                      numBdEdges = mesh->numBdEdges;
2326:   int                     *bdEdges    = tri->bdEdges;
2327:   int                      ierr;

2330:   AOApplicationToPetsc(q->edgeOrdering, numBdEdges, bdEdges);
2331:   return(0);
2332: }

2334: #undef  __FUNCT__
2336: int PartitionEdgeGlobalToLocal_Private(Partition p)
2337: {
2338:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2339:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
2340:   int                      numLocEdges = q->numLocEdges;
2341:   int                     *edges       = tri->edges;
2342:   int                      node;
2343:   int                      ierr;

2346:   for(node = 0; node < numLocEdges*2; node++) {
2347:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2348:   }
2349:   return(0);
2350: }

2352: #undef  __FUNCT__
2354: int PartitionEdges_Private(Partition p)
2355: {
2356:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2357:   int                    (*f)(Partition, int, int **);
2358:   int                     *edgeMap; /* The map from edges to domains */
2359:   int                      ierr;

2362:   /* Create a new map of nodes to domains */
2363:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap", (void (**)(void)) &f);
2364:   (*f)(p, q->numEdges, &edgeMap);

2366:   /* Create the edge partition */
2367:   PartitionCreateEdgePartition_Private(p, edgeMap, &q->edgeOrdering);
2368:   PetscFree(edgeMap);

2370:   /* Permute arrays implicitly indexed by edge number */
2371:   PartitionPermuteEdges_Private(p);

2373:   /* Globally renumber the edges to make canonical numbers sequential in each domain */
2374:   PartitionRenumberEdges_Private(p);

2376:   /* Check for new ghost nodes created by the element partition */
2377:   PartitionGetNewGhostNodes_Edge(p);

2379:   q->isEdgePartitioned = PETSC_TRUE;
2380:   return(0);
2381: }

2383: #undef  __FUNCT__
2385: int PartitionBoundaryNodes_Private(Partition p)
2386: {
2387:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2388:   Mesh                     mesh          = p->mesh;
2389:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
2390:   int                      numBdNodes    = mesh->numBdNodes;
2391:   int                     *bdNodes       = tri->bdNodes;
2392:   int                     *firstNode     = q->firstNode;
2393:   int                      numProcs      = p->numProcs;
2394:   int                      rank          = p->rank;
2395:   int                      proc, node;
2396:   int                      ierr;

2399:   q->numLocBdNodes = 0;
2400:   for(node = 0; node < numBdNodes; node++) {
2401:     if ((bdNodes[node] >= firstNode[rank]) && (bdNodes[node] < firstNode[rank+1]))
2402:       q->numLocBdNodes++;
2403:   }
2404:   MPI_Allgather(&q->numLocBdNodes, 1, MPI_INT, &q->firstBdNode[1], 1, MPI_INT, p->comm);
2405:   q->firstBdNode[0] = 0;
2406:   for(proc = 1; proc <= numProcs; proc++) {
2407:     q->firstBdNode[proc] = q->firstBdNode[proc] + q->firstBdNode[proc-1];
2408:   }
2409:   if (q->firstBdNode[numProcs] != q->numBdNodes) {
2410:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of boundary nodes %d should be %d", q->firstBdNode[numProcs], q->numBdNodes);
2411:   }
2412:   return(0);
2413: }

2415: #undef  __FUNCT__
2417: /*
2418:   PartitionDistributeBdNodes_Private - This function distributes the edge-based data, and
2419:   permutes arrays which are implicitly indexed by edge number.

2421:   Input Parameter:
2422: . p           - The Partition

2424:   Output Parameters in Mesh_Triangular:
2425: + edges       - The nodes on each edge
2426: - edgemarkers - The edge markers

2428: .seealso: PartitionBdNodes_Private()
2429: */
2430: int PartitionDistributeBdNodes_Private(Partition p)
2431: {
2432:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2433:   Mesh_Triangular         *tri           = (Mesh_Triangular *) p->mesh->data;
2434:   int                      numLocNodes   = q->numLocNodes;
2435:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2436:   int                     *markers       = tri->markers;
2437:   int                      numLocBdNodes = q->numLocBdNodes;
2438:   int                      node, bdNode;
2439:   int                      ierr;

2442:   /* Process ghost boundary nodes */
2443:   q->numOverlapBdNodes = numLocBdNodes;
2444:   for(node = 0; node < numGhostNodes; node++) {
2445:     if (markers[numLocNodes+node] != 0)
2446:       q->numOverlapBdNodes++;
2447:   }
2448:   if (q->numOverlapBdNodes > numLocBdNodes) {
2449:     PetscMalloc((q->numOverlapBdNodes - numLocBdNodes) * sizeof(int), &q->ghostBdNodes);
2450:     for(node = 0, bdNode = 0; node < numGhostNodes; node++) {
2451:       if (markers[numLocNodes+node] != 0)
2452:         q->ghostBdNodes[bdNode++] = node;
2453:     }
2454:   }
2455:   return(0);
2456: }

2458: #undef  __FUNCT__
2460: int PartitionDistribute_Private(Partition p)
2461: {

2465:   /* Redistribute the elements and arrays implicitly numbered by element numbers */
2466:   PartitionDistributeElements_Private(p);

2468:   /* Redistribute the nodes and permute arrays implicitly numbered by node numbers */
2469:   PartitionDistributeNodes_Private(p);

2471:   /* Redistribute the edges and permute arrays implicitly numbered by edge numbers */
2472:   PartitionDistributeEdges_Private(p);

2474:   /* Store ghost boundary nodes */
2475:   PartitionDistributeBdNodes_Private(p);
2476:   return(0);
2477: }

2479: #undef  __FUNCT__
2481: int PartitionGlobalToLocal_Private(Partition p)
2482: {
2483:   Partition_Triangular_2D *q                  = (Partition_Triangular_2D *) p->data;
2484:   Mesh                     mesh               = p->mesh;
2485:   Mesh_Triangular         *tri                = (Mesh_Triangular *) mesh->data;
2486:   int                      numOverlapElements = p->numOverlapElements;
2487:   int                      numCorners         = mesh->numCorners;
2488:   int                     *faces              = tri->faces;
2489:   int                     *neighbors          = tri->neighbors;
2490:   int                      numLocEdges        = q->numLocEdges;
2491:   int                     *edges              = tri->edges;
2492:   int                      corner, neighbor, node;
2493:   int                      ierr;

2496:   for(corner = 0; corner < numOverlapElements*numCorners; corner++) {
2497:     PartitionGlobalToLocalNodeIndex(p, faces[corner], &faces[corner]);
2498:   }
2499:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
2500:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
2501:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
2502:   }
2503:   for(node = 0; node < numLocEdges*2; node++) {
2504:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2505:   }
2506:   return(0);
2507: }

2509: #undef  __FUNCT__
2511: /*@
2512:   PartitionCreateTriangular2D - Creates a partition of a two dimensional unstructured grid.

2514:   Collective on Mesh

2516:   Input Parameters:
2517: . mesh      - The mesh to be partitioned

2519:   Output Paramter:
2520: . partition - The partition

2522:   Level: beginner

2524: .keywords unstructured mesh, partition
2525: .seealso MeshCreateTriangular2D
2526: @*/
2527: int PartitionCreateTriangular2D(Mesh mesh, Partition *part)
2528: {
2529:   int        numProcs;
2530:   PetscTruth opt;
2531:   int        ierr;

2534:   MPI_Comm_size(mesh->comm, &numProcs);
2535:   PartitionCreate(mesh, part);
2536:   if (numProcs == 1) {
2537:     PetscObjectComposeFunctionDynamic((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_Uni",
2538:                                       (void (*)(void)) PartitionCreate_Uni);
2539: 
2540:   } else {
2541:     PetscObjectComposeFunctionDynamic((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_ElementBased",
2542:                                       (void (*)(void)) PartitionCreate_ElementBased);
2543: 
2544:     PetscOptionsHasName(mesh->prefix, "-part_node_based", &opt);
2545:     if (opt == PETSC_TRUE) {
2546:       PetscObjectComposeFunctionDynamic((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_NodeBased",
2547:                                         (void (*)(void)) PartitionCreate_NodeBased);
2548: 
2549:     }
2550:   }
2551:   PartitionSetType(*part, PARTITION_TRIANGULAR_2D);

2553:   PartitionViewFromOptions_Private(*part, "Partition");
2554:   return(0);
2555: }

2557: EXTERN_C_BEGIN
2558: #undef  __FUNCT__
2560: int PartitionSerialize_Triangular_2D(Mesh mesh, Partition *part, PetscViewer viewer, PetscTruth store)
2561: {
2562:   Partition                p;
2563:   Partition_Triangular_2D *q;
2564:   int                      fd;
2565:   int                      numGhostElements, numGhostNodes, numGhostBdNodes, hasOrdering;
2566:   int                      numProcs, rank;
2567:   int                      one  = 1;
2568:   int                      zero = 0;
2569:   int                      ierr;

2572:   PetscViewerBinaryGetDescriptor(viewer, &fd);
2573:   if (store == PETSC_TRUE) {
2574:     p = *part;
2575:     numProcs = p->numProcs;
2576:     numGhostElements = p->numOverlapElements - p->numLocElements;
2577:     PetscBinaryWrite(fd, &p->numProcs,           1,                PETSC_INT, 0);
2578:     PetscBinaryWrite(fd, &p->rank,               1,                PETSC_INT, 0);
2579:     PetscBinaryWrite(fd, &p->numLocElements,     1,                PETSC_INT, 0);
2580:     PetscBinaryWrite(fd, &p->numElements,        1,                PETSC_INT, 0);
2581:     PetscBinaryWrite(fd, &p->numOverlapElements, 1,                PETSC_INT, 0);
2582:     PetscBinaryWrite(fd,  p->firstElement,       numProcs+1,       PETSC_INT, 0);
2583:     PetscBinaryWrite(fd,  p->ghostElements,      numGhostElements, PETSC_INT, 0);
2584:     PetscBinaryWrite(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT, 0);
2585:     if (p->ordering != PETSC_NULL) {
2586:       PetscBinaryWrite(fd, &one,                 1,                PETSC_INT, 0);
2587:       AOSerialize(p->comm, &p->ordering, viewer, store);
2588:     } else {
2589:       PetscBinaryWrite(fd, &zero,                1,                PETSC_INT, 0);
2590:     }

2592:     q    = (Partition_Triangular_2D *) (*part)->data;
2593:     numGhostNodes   = q->numOverlapNodes   - q->numLocNodes;
2594:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2595:     PetscBinaryWrite(fd, &q->numLocNodes,        1,                PETSC_INT, 0);
2596:     PetscBinaryWrite(fd, &q->numNodes,           1,                PETSC_INT, 0);
2597:     PetscBinaryWrite(fd, &q->numOverlapNodes,    1,                PETSC_INT, 0);
2598:     PetscBinaryWrite(fd,  q->firstNode,          numProcs+1,       PETSC_INT, 0);
2599:     PetscBinaryWrite(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT, 0);
2600:     PetscBinaryWrite(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT, 0);
2601:     PetscBinaryWrite(fd, &q->numLocEdges,        1,                PETSC_INT, 0);
2602:     PetscBinaryWrite(fd, &q->numEdges,           1,                PETSC_INT, 0);
2603:     PetscBinaryWrite(fd,  q->firstEdge,          numProcs+1,       PETSC_INT, 0);
2604:     PetscBinaryWrite(fd, &q->numLocBdNodes,      1,                PETSC_INT, 0);
2605:     PetscBinaryWrite(fd, &q->numBdNodes,         1,                PETSC_INT, 0);
2606:     PetscBinaryWrite(fd, &q->numOverlapBdNodes,  1,                PETSC_INT, 0);
2607:     PetscBinaryWrite(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT, 0);
2608:     PetscBinaryWrite(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT, 0);
2609:   } else {
2610:     /* Create the partition context */
2611:     PartitionCreate(mesh, &p);
2612:     PetscNew(Partition_Triangular_2D, &q);
2613:     PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2614:     PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2615:     PetscStrallocpy(PARTITION_TRIANGULAR_2D, &p->type_name);
2616:     p->data = (void *) q;

2618:     MPI_Comm_size(p->comm, &numProcs);
2619:     MPI_Comm_rank(p->comm, &rank);
2620:     PetscBinaryRead(fd, &p->numProcs,           1,                PETSC_INT);
2621:     PetscBinaryRead(fd, &p->rank,               1,                PETSC_INT);
2622:     if (p->numProcs != numProcs) {
2623:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid number of processors %d should be %d", numProcs, p->numProcs);
2624:     }
2625:     if (p->rank != rank) {
2626:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid processor rank %d should be %d", rank, p->rank);
2627:     }
2628:     PetscBinaryRead(fd, &p->numLocElements,     1,                PETSC_INT);
2629:     PetscBinaryRead(fd, &p->numElements,        1,                PETSC_INT);
2630:     PetscBinaryRead(fd, &p->numOverlapElements, 1,                PETSC_INT);
2631:     PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2632:     PetscBinaryRead(fd,  p->firstElement,       numProcs+1,       PETSC_INT);
2633:     numGhostElements = p->numOverlapElements - p->numLocElements;
2634:     if (numGhostElements > 0) {
2635:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElements);
2636:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElementProcs);
2637:     }
2638:     PetscBinaryRead(fd,  p->ghostElements,      numGhostElements, PETSC_INT);
2639:     PetscBinaryRead(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT);
2640:     PetscBinaryRead(fd, &hasOrdering,           1,                PETSC_INT);
2641:     if (hasOrdering) {
2642:       AOSerialize(p->comm, &p->ordering, viewer, store);
2643:     }

2645:     q->ghostNodes        = PETSC_NULL;
2646:     q->ghostNodeProcs    = PETSC_NULL;
2647:     q->ghostBdNodes      = PETSC_NULL;
2648:     q->nodeOrdering      = PETSC_NULL;
2649:     q->edgeOrdering      = PETSC_NULL;

2651:     PetscBinaryRead(fd, &q->numLocNodes,        1,                PETSC_INT);
2652:     PetscBinaryRead(fd, &q->numNodes,           1,                PETSC_INT);
2653:     PetscBinaryRead(fd, &q->numOverlapNodes,    1,                PETSC_INT);
2654:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2655:     PetscBinaryRead(fd,  q->firstNode,          numProcs+1,       PETSC_INT);
2656:     numGhostNodes   = q->numOverlapNodes - q->numLocNodes;
2657:     if (numGhostNodes > 0) {
2658:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
2659:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2660:     }
2661:     PetscBinaryRead(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT);
2662:     PetscBinaryRead(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT);
2663:     PetscBinaryRead(fd, &q->numLocEdges,        1,                PETSC_INT);
2664:     PetscBinaryRead(fd, &q->numEdges,           1,                PETSC_INT);
2665:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2666:     PetscBinaryRead(fd,  q->firstEdge,          numProcs+1,       PETSC_INT);
2667:     PetscBinaryRead(fd, &q->numLocBdNodes,      1,                PETSC_INT);
2668:     PetscBinaryRead(fd, &q->numBdNodes,         1,                PETSC_INT);
2669:     PetscBinaryRead(fd, &q->numOverlapBdNodes,  1,                PETSC_INT);
2670:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2671:     PetscBinaryRead(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT);
2672:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2673:     if (numGhostBdNodes) {
2674:       PetscMalloc(numGhostBdNodes * sizeof(int), &q->ghostBdNodes);
2675:     }
2676:     PetscBinaryRead(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT);
2677:     PetscLogObjectMemory(p, ((numProcs+1)*4 + numGhostElements*2 + numGhostNodes*2 + numGhostBdNodes)* sizeof(int));
2678:     *part = p;
2679:   }
2680:   if (p->numProcs > 1) {
2681:     AOSerialize(p->comm, &q->nodeOrdering, viewer, store);
2682:     AOSerialize(p->comm, &q->edgeOrdering, viewer, store);
2683:   }

2685:   return(0);
2686: }
2687: EXTERN_C_END

2689: EXTERN_C_BEGIN
2690: #undef  __FUNCT__
2692: int PartitionCreate_ElementBased(Partition p)
2693: {
2694: #ifdef PETSC_USE_BOPT_g
2695:   int cut; /* The number of edges of the dual crossing the partition */
2696: #endif


2701:   /* Partition elements */
2702:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2703:                                     "PartitionCreateElementMap_METIS", (void (*)(void)) PartitionCreateElementMap_METIS);
2704: 
2705:   PartitionElements_Private(p);

2707:   /* Partition the nodes */
2708:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2709:                                     "PartitionCreateNodeMap_ElementBased", (void (*)(void)) PartitionCreateNodeMap_ElementBased);
2710: 
2711:   PartitionNodes_Private(p);

2713:   /* Partition the edges -- Changes the ghost nodes */
2714:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2715:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2716: 
2717:   PartitionEdges_Private(p);

2719:   /* Partition boundary nodes */
2720:   PartitionBoundaryNodes_Private(p);

2722:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2723:   PartitionDistribute_Private(p);

2725:   /* Change to local node numbers */
2726:   PartitionGlobalToLocal_Private(p);

2728: #ifdef PETSC_USE_BOPT_g
2729:   /* Compute the size of the cut */
2730:   PartitionCalcCut_Private(p, &cut);
2731:   PetscLogInfo(p, "Size of cut: %d\n", cut);
2732: #endif

2734:   return(0);
2735: }
2736: EXTERN_C_END

2738: EXTERN_C_BEGIN
2739: #undef  __FUNCT__
2741: int PartitionCreate_Uni(Partition p)
2742: {
2743:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
2744:   Mesh                     mesh = p->mesh;
2745:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
2746:   PetscTruth               opt;
2747:   int                      ierr;

2750:   PetscOptionsHasName(p->prefix, "-part_mesh_reorder", &opt);
2751:   if (opt == PETSC_TRUE) {
2752:     MeshGetOrdering(mesh, MESH_ORDER_TRIANGULAR_2D_RCM, &q->nodeOrdering);
2753:     PetscLogObjectParent(p, q->nodeOrdering);

2755:     /* Permute arrays implicitly numbered by node numbers */
2756:     AOApplicationToPetscPermuteReal(q->nodeOrdering, 2, tri->nodes);
2757:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->markers);
2758:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->degrees);
2759:     /* Renumber arrays dependent on the canonical node numbering */
2760:     AOApplicationToPetsc(q->nodeOrdering, mesh->numEdges*2,                       tri->edges);
2761:     AOApplicationToPetsc(q->nodeOrdering, p->numOverlapElements*mesh->numCorners, tri->faces);
2762:     AOApplicationToPetsc(q->nodeOrdering, mesh->numBdNodes,                       tri->bdNodes);
2763:   }
2764:   return(0);
2765: }
2766: EXTERN_C_END

2768: EXTERN_C_BEGIN
2769: #undef  __FUNCT__
2771: int PartitionCreate_NodeBased(Partition p) {
2772: #ifdef PETSC_USE_BOPT_g
2773:   int cut; /* The number of edges of the dual crossing the partition */
2774: #endif

2778:   /* Partition Nodes */
2779:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2780:                                     "PartitionCreateNodeMap_Simple_Seq", (void (*)(void)) PartitionCreateNodeMap_Simple_Seq);
2781: 
2782:   PartitionNodes_Private(p);

2784:   /* Partition elements */
2785:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2786:                                     "PartitionCreateElementMap_NodeBased", (void (*)(void)) PartitionCreateElementMap_NodeBased);
2787: 
2788:   PartitionElements_Private(p);

2790:   /* Partition edges */
2791:   PetscObjectComposeFunctionDynamic((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2792:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2793: 
2794:   PartitionEdges_Private(p);

2796:   /* Partition boundary nodes */
2797:   PartitionBoundaryNodes_Private(p);

2799:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2800:   PartitionDistribute_Private(p);

2802:   /* Change to local node numbers */
2803:   PartitionGlobalToLocal_Private(p);

2805: #ifdef PETSC_USE_BOPT_g
2806:   /* Compute the size of the cut */
2807:   PartitionCalcCut_Private(p, &cut);
2808:   PetscLogInfo(p, "Size of cut: %d\n", cut);
2809: #endif

2811:   return(0);
2812: }
2813: EXTERN_C_END


2816: EXTERN_C_BEGIN
2817: #undef  __FUNCT__
2819: int PartitionCreate_Triangular_2D(Partition p) {
2820:   Partition_Triangular_2D *q;
2821:   Mesh                     mesh = p->mesh;
2822:   int                    (*f)(Partition);
2823:   int                      numProcs, rank, rem;
2824:   int                      proc;
2825:   int                      ierr;

2828:   MPI_Comm_size(p->comm, &numProcs);
2829:   MPI_Comm_rank(p->comm, &rank);

2831:   PetscNew(Partition_Triangular_2D, &q);
2832:   PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2833:   PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2834:   p->data = (void *) q;
2835:   PetscStrallocpy(PARTITION_SER_TRIANGULAR_2D_BINARY, &p->serialize_name);
2836:   PetscLogObjectParent(mesh, p);

2838:   /* Initialize structure */
2839:   p->numProcs             = numProcs;
2840:   p->rank                 = rank;
2841:   p->isElementPartitioned = PETSC_FALSE;
2842:   p->ordering             = PETSC_NULL;
2843:   p->ghostElements        = PETSC_NULL;
2844:   p->ghostElementProcs    = PETSC_NULL;
2845:   q->isNodePartitioned    = PETSC_FALSE;
2846:   q->isEdgePartitioned    = PETSC_FALSE;
2847:   q->nodeOrdering         = PETSC_NULL;
2848:   q->ghostNodes           = PETSC_NULL;
2849:   q->ghostNodeProcs       = PETSC_NULL;
2850:   q->edgeOrdering         = PETSC_NULL;
2851:   q->ghostBdNodes         = PETSC_NULL;
2852:   PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2853:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2854:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2855:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2856:   PetscLogObjectMemory(p, (numProcs+1)*4 * sizeof(int));
2857:   PetscMemzero(p->firstElement, (numProcs+1) * sizeof(int));
2858:   PetscMemzero(q->firstNode,    (numProcs+1) * sizeof(int));
2859:   PetscMemzero(q->firstBdNode,  (numProcs+1) * sizeof(int));
2860:   PetscMemzero(q->firstEdge,    (numProcs+1) * sizeof(int));

2862:   /* Setup crude preliminary partition */
2863:   for(proc = 0; proc < numProcs; proc++) {
2864:     rem                   = (mesh->numFaces%numProcs);
2865:     p->firstElement[proc] = (mesh->numFaces/numProcs)*proc + PetscMin(rem, proc);
2866:     rem                   = (mesh->numNodes%numProcs);
2867:     q->firstNode[proc]    = (mesh->numNodes/numProcs)*proc + PetscMin(rem, proc);
2868:     rem                   = (mesh->numEdges%numProcs);
2869:     q->firstEdge[proc]    = (mesh->numEdges/numProcs)*proc + PetscMin(rem, proc);
2870:     rem                   = (mesh->numBdNodes%numProcs);
2871:     q->firstBdNode[proc]  = (mesh->numBdNodes/numProcs)*proc + PetscMin(rem, proc);
2872:   }
2873:   p->firstElement[numProcs] = mesh->numFaces;
2874:   q->firstNode[numProcs]    = mesh->numNodes;
2875:   q->firstEdge[numProcs]    = mesh->numEdges;
2876:   q->firstBdNode[numProcs]  = mesh->numBdNodes;

2878:   p->numLocElements            = p->firstElement[rank+1] - p->firstElement[rank];
2879:   p->numElements               = p->firstElement[numProcs];
2880:   p->numOverlapElements        = p->numLocElements;
2881:   q->numLocNodes               = q->firstNode[rank+1] - q->firstNode[rank];
2882:   q->numNodes                  = q->firstNode[numProcs];
2883:   q->numOverlapNodes           = q->numLocNodes;
2884:   q->numLocEdges               = q->firstEdge[rank+1] - q->firstEdge[rank];
2885:   q->numEdges                  = q->firstEdge[numProcs];
2886:   q->numLocBdNodes             = q->firstBdNode[rank+1] - q->firstBdNode[rank];
2887:   q->numBdNodes                = q->firstBdNode[numProcs];
2888:   q->numOverlapBdNodes         = q->numLocBdNodes;

2890:   /* Partition the mesh */
2891:   PetscObjectQueryFunction((PetscObject) p,"PartitionTriangular2D_Create_C",(PetscVoidFunction) &f);
2892:   (*f)(p);

2894:   /* Recalculate derived quantites */
2895:   MeshTriangular2DCalcAreas(mesh, PETSC_FALSE);
2896:   MeshTriangular2DCalcAspectRatios(mesh, PETSC_FALSE);

2898:   return(0);
2899: }
2900: EXTERN_C_END