Actual source code: part2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: part2d.c,v 1.14 2000/01/31 17:40:21 knepley Exp $";
  3: #endif

  5: #include "src/mesh/impls/triangular/2d/2dimpl.h"         /*I "mesh.h" I*/
  6: #ifdef PETSC_HAVE_PARMETIS
  7: #include "parmetis.h"
  8: #endif
  9: #include "part2d.h"

 11: static int PartitionDestroy_Triangular_2D(Partition p)
 12: {
 13:   Partition_Triangular_2D *s = (Partition_Triangular_2D *) p->data;
 14:   int                      ierr;

 17:   PetscFree(p->firstElement);
 18:   if (p->ordering != PETSC_NULL)
 19:     {AODestroy(p->ordering);                                                                      }
 20:   if (p->ghostElements != PETSC_NULL)
 21:     {PetscFree(p->ghostElements);                                                                 }
 22:   if (p->ghostElementProcs != PETSC_NULL)
 23:     {PetscFree(p->ghostElementProcs);                                                             }
 24:   PetscFree(s->firstNode);
 25:   PetscFree(s->firstBdNode);
 26:   if (s->nodeOrdering != PETSC_NULL)
 27:     {AODestroy(s->nodeOrdering);                                                                  }
 28:   if (s->ghostNodes != PETSC_NULL)
 29:     {PetscFree(s->ghostNodes);                                                                    }
 30:   if (s->ghostNodeProcs != PETSC_NULL)
 31:     {PetscFree(s->ghostNodeProcs);                                                                }
 32:   if (s->ghostBdNodes != PETSC_NULL)
 33:     {PetscFree(s->ghostBdNodes);                                                                  }
 34:   PetscFree(s->firstEdge);
 35:   if (s->edgeOrdering != PETSC_NULL)
 36:     {AODestroy(s->edgeOrdering);                                                                  }
 37:   PetscFree(s);

 39:   return(0);
 40: }

 42: static int PartitionView_Triangular_2D_File(Partition p, PetscViewer viewer)
 43: {
 44:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
 45:   FILE                    *fd;
 46:   int                      numLocElements = p->numLocElements;
 47:   int                      numLocNodes    = q->numLocNodes;
 48:   int                      numLocEdges    = q->numLocEdges;
 49:   int                      i;
 50:   int                      ierr;

 53:   PetscViewerASCIIPrintf(viewer, "Partition Object:n");
 54:   PetscViewerASCIIPrintf(viewer, "  Partition of triangular 2D grid with %d elements and %d nodesn", p->numElements, q->numNodes);
 55:   PetscViewerASCIIGetPointer(viewer, &fd);
 56:   PetscSynchronizedFPrintf(p->comm, fd, "    Proc %d: %d elements %d nodes %d edgesn",
 57:                            p->rank, numLocElements, numLocNodes, numLocEdges);
 58:   PetscSynchronizedFlush(p->comm);
 59:   if (p->ordering != PETSC_NULL) {
 60:     PetscViewerASCIIPrintf(viewer, "  Global element renumbering:n");
 61:     AOView(p->ordering, viewer);
 62:   }
 63:   if (q->nodeOrdering != PETSC_NULL) {
 64:     PetscViewerASCIIPrintf(viewer, "  Global node renumbering:n");
 65:     AOView(q->nodeOrdering, viewer);
 66:   }
 67:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost elements on proc %dn", p->numOverlapElements - numLocElements, p->rank);
 68:   for(i = 0; i < p->numOverlapElements - numLocElements; i++)
 69:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %d %dn", i, p->ghostElements[i], p->ghostElementProcs[i]);
 70:   PetscSynchronizedFlush(p->comm);
 71:   PetscSynchronizedFPrintf(p->comm, fd, "  %d ghost nodes on proc %dn", q->numOverlapNodes - numLocNodes, p->rank);
 72:   for(i = 0; i < q->numOverlapNodes - numLocNodes; i++)
 73:     PetscSynchronizedFPrintf(p->comm, fd, "  %d %dn", i, q->ghostNodes[i]);
 74:   PetscSynchronizedFlush(p->comm);

 76:   return(0);
 77: }

 79: static int PartitionView_Triangular_2D_Draw(Partition p, PetscViewer v)
 80: {
 82:   return(0);
 83: }

 85: static int PartitionView_Triangular_2D(Partition p, PetscViewer viewer)
 86: {
 87:   PetscTruth isascii, isdraw;
 88:   int        ierr;

 91:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
 92:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW,  &isdraw);
 93:   if (isascii == PETSC_TRUE) {
 94:     PartitionView_Triangular_2D_File(p, viewer);
 95:   } else if (isdraw == PETSC_TRUE) {
 96:     PartitionView_Triangular_2D_Draw(p, viewer);
 97:   }
 98:   return(0);
 99: }

101: static int PartitionViewFromOptions_Private(Partition part, char *title)
102: {
103:   PetscViewer viewer;
104:   PetscDraw   draw;
105:   PetscTruth  opt;
106:   int         ierr;

109:   PetscOptionsHasName(part->prefix, "-part_view", &opt);
110:   if (opt == PETSC_TRUE) {
111:     PartitionView(part, PETSC_NULL);
112:   }
113:   PetscOptionsHasName(part->prefix, "-part_view_draw", &opt);
114:   if (opt == PETSC_TRUE) {
115:     PetscViewerDrawOpen(part->comm, 0, 0, 0, 0, 300, 300, &viewer);
116:     PetscViewerDrawGetDraw(viewer, 0, &draw);
117:     if (title != PETSC_NULL) {
118:       PetscDrawSetTitle(draw, title);
119:     }
120:     PartitionView(part, viewer);
121:     PetscViewerFlush(viewer);
122:     PetscDrawPause(draw);
123:     PetscViewerDestroy(viewer);
124:   }
125:   return(0);
126: }

128: int PartitionGetTotalNodes_Triangular_2D(Partition p, int *size)
129: {
130:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

133:   *size = q->numNodes;
134:   return(0);
135: }

137: int PartitionGetStartNode_Triangular_2D(Partition p, int *node)
138: {
139:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

142:   *node = q->firstNode[p->rank];
143:   return(0);
144: }

146: int PartitionGetEndNode_Triangular_2D(Partition p, int *node)
147: {
148:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

151:   *node = q->firstNode[p->rank+1];
152:   return(0);
153: }

155: int PartitionGetNumNodes_Triangular_2D(Partition p, int *size)
156: {
157:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

160:   *size = q->numLocNodes;
161:   return(0);
162: }

164: int PartitionGetNumOverlapNodes_Triangular_2D(Partition p, int *size)
165: {
166:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

169:   *size = q->numOverlapNodes;
170:   return(0);
171: }

173: int PartitionGetNumOverlapEdges_Triangular_2D(Partition p, int *size)
174: {
175:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

178:   /* We do not maintain ghost edges */
179:   *size = q->numLocEdges;
180:   return(0);
181: }

183: int PartitionGhostNodeIndex_Private(Partition p, int node, int *gNode)
184: {
185:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
186:   int                      low, high, mid;

189:   /* Use bisection since the array is assumed to be sorted */
190:   low  = 0;
191:   high = q->numOverlapNodes - (q->firstNode[p->rank+1] - q->firstNode[p->rank]) - 1;
192:   while (low <= high) {
193:     mid = (low + high)/2;
194:     if (node == q->ghostNodes[mid]) {
195:       *gNode = mid;
196:       return(0);
197:     } else if (node < q->ghostNodes[mid]) {
198:       high = mid - 1;
199:     } else {
200:       low  = mid + 1;
201:     }
202:   }
203:   *gNode = -1;
204:   /* Flag for ghost node not present */
205:   PetscFunctionReturn(1);
206: }

208: int PartitionGlobalToLocalNodeIndex_Triangular_2D(Partition p, int node, int *locNode)
209: {
210:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
211:   int                      numLocNodes = q->numLocNodes;
212:   int                      gNode; /* Local ghost node number */
213:   int                      ierr;

216:   if (node < 0) {
217:     *locNode = node;
218:     return(0);
219:   }
220:   /* Check for ghost node */
221:   if ((node < q->firstNode[p->rank]) || (node >= q->firstNode[p->rank+1])) {
222:     /* Search for canonical number */
223:     PartitionGhostNodeIndex_Private(p, node, &gNode);
224:     *locNode = numLocNodes + gNode;
225:   } else {
226:     *locNode = node - q->firstNode[p->rank];
227:   }
228:   return(0);
229: }

231: int PartitionLocalToGlobalNodeIndex_Triangular_2D(Partition p, int locNode, int *node)
232: {
233:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
234:   int                      numLocNodes = q->numLocNodes;

237:   if (locNode < 0) {
238:     *node = locNode;
239:     return(0);
240:   }
241:   /* Check for ghost node */
242:   if (locNode >= numLocNodes) {
243:     *node = q->ghostNodes[locNode - numLocNodes];
244:   } else {
245:     *node = locNode + q->firstNode[p->rank];
246:   }
247:   return(0);
248: }

250: int PartitionGetNodeOrdering_Triangular_2D(Partition p, AO *order)
251: {
252:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

255:   *order = q->nodeOrdering;
256:   return(0);
257: }

259: int PartitionGhostNodeExchange_Triangular_2D(Partition part, InsertMode addv, ScatterMode mode, int *locVars, int *ghostVars)
260: {
261:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) part->data;
262:   Mesh                     mesh = part->mesh;
263:   int                      ierr;

266:   PetscGhostExchange(part->comm, q->numOverlapNodes - mesh->numNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
267:                             q->firstNode, addv, mode, locVars, ghostVars);
268: 
269:   return(0);
270: }

272: int PartitionGetEdgeOrdering_Triangular_2D(Partition p, AO *order)
273: {
274:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;

277:   *order = q->edgeOrdering;
278:   return(0);
279: }

281: int PartitionCalcCut_Private(Partition p, int *cut)
282: {
283:   Mesh_Triangular *tri            = (Mesh_Triangular *) p->mesh->data;
284:   int              numLocElements = p->numLocElements;
285:   int             *neighbors      = tri->neighbors;
286:   int              locCut;        /* The number of edges of the dual crossing the partition from this domain */
287:   int              elem, neighbor;
288:   int              ierr;

291:   for(elem = 0, locCut = 0; elem < numLocElements; elem++) {
292:     for(neighbor = 0; neighbor < 3; neighbor++) {
293:       if (neighbors[elem*3+neighbor] >= numLocElements)
294:         locCut++;
295:     }
296:   }
297:   ierr  = MPI_Allreduce(&locCut, cut, 1, MPI_INT, MPI_SUM, p->comm);
298:   *cut /= 2;
299:   return(0);
300: }

302: static struct _PartitionOps POps = {/* Generic Operations */
303:                                     PETSC_NULL/* PartitionSetup */,
304:                                     PETSC_NULL/* PartitionSetFromOptions */,
305:                                     PartitionView_Triangular_2D,
306:                                     PETSC_NULL/* PartitionCopy */,
307:                                     PETSC_NULL/* PartitionDuplicate */,
308:                                     PartitionDestroy_Triangular_2D,
309:                                     /* Partition-Specific Operations */
310:                                     PartitionGhostNodeExchange_Triangular_2D,
311:                                     /* Node Query Functions */
312:                                     PartitionGetTotalNodes_Triangular_2D,
313:                                     PartitionGetStartNode_Triangular_2D,
314:                                     PartitionGetEndNode_Triangular_2D,
315:                                     PartitionGetNumNodes_Triangular_2D,
316:                                     PartitionGetNumOverlapNodes_Triangular_2D,
317:                                     PartitionGlobalToLocalNodeIndex_Triangular_2D,
318:                                     PartitionLocalToGlobalNodeIndex_Triangular_2D,
319:                                     PartitionGetNodeOrdering_Triangular_2D,
320:                                     /* Face Query Functions */
321:                                     PETSC_NULL/* PartitionGetTotalFaces */,
322:                                     PETSC_NULL/* PartitionGetStartFace */,
323:                                     PETSC_NULL/* PartitionGetEndFace */,
324:                                     PETSC_NULL/* PartitionGetNumFaces */,
325:                                     PETSC_NULL/* PartitionGetNumOverlapFaces */,
326:                                     PETSC_NULL/* PartitionGlobalToLocalFaceIndex */,
327:                                     PETSC_NULL/* PartitionLocalToGlobalFaceIndex */,
328:                                     PETSC_NULL/* PartitionGetFaceOrdering */,
329:                                     /* Edge Query Functions */
330:                                     PETSC_NULL/* PartitionGetTotalEdges */,
331:                                     PETSC_NULL/* PartitionGetStartEdge */,
332:                                     PETSC_NULL/* PartitionGetEndEdge */,
333:                                     PETSC_NULL/* PartitionGetNumEdges */,
334:                                     PartitionGetNumOverlapEdges_Triangular_2D,
335:                                     PETSC_NULL/* PartitionGlobalToLocalEdgeIndex */,
336:                                     PETSC_NULL/* PartitionLocalToGlobalEdgeIndex */,
337:                                     PartitionGetEdgeOrdering_Triangular_2D};

339: int PartitionDebugAO_Private(Partition p, int *nodeProcs)
340: {
341:   Mesh                     mesh        = p->mesh;
342:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
343:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
344:   int                      numCorners  = mesh->numCorners;
345:   int                      numElements = mesh->numFaces;
346:   int                     *elements    = tri->faces;
347:   int                      numNodes    = q->numNodes;
348:   int                      numProcs    = p->numProcs;
349:   int                      rank        = p->rank;
350:   int                     *support;
351:   int                     *temp;
352:   int                      proc, nProc, elem, nElem, sElem, corner, nCorner, node, degree, index;
353:   int                      ierr;

356:   PetscMalloc(numProcs * sizeof(int), &temp);
357:   for(node = 0; node < numNodes; node++) {
358:     PetscSynchronizedPrintf(p->comm, " %d", nodeProcs[node]);
359:     PetscSynchronizedFlush(p->comm);
360:     PetscPrintf(p->comm, "n");
361:     MPI_Allgather(&nodeProcs[node], 1, MPI_INT, temp, 1, MPI_INT, p->comm);
362:     for(proc = 0, index = 0; proc < numProcs; proc++) {
363:       if (temp[proc] == proc) index++;
364:     }

366:     /* If a node is not scheduled for a unique domain */
367:     if (index != 1) {
368:       for(elem = 0; elem < numElements; elem++) {
369:         for(corner = 0; corner < numCorners; corner++) {
370:           /* Locate an element containing the node */
371:           if (node != elements[elem*numCorners+corner])
372:             continue;
373: 
374:           /* Check the support of the node */
375:           PetscPrintf(PETSC_COMM_SELF, "[%d]elem: %d corner: %d node: %dn", rank, elem, corner, node);
376:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
377:           for(sElem = 0; sElem < degree; sElem++) {
378:             nElem = support[sElem];
379:             PetscPrintf(PETSC_COMM_SELF, "[%d]support[%d] = %dn", rank, sElem, nElem);
380:             /* See if neighbor is in another domain */
381:             if (nElem >= numElements) {
382:               /* Check to see if node is contained in the neighboring element */
383:               for(nCorner = 0; nCorner < numCorners; nCorner++)
384:                 if (elements[nElem*numCorners+nCorner] == node) {
385:                   nProc = p->ghostElementProcs[nElem-numElements];
386:                   PetscPrintf(PETSC_COMM_SELF, "[%d]Found in corner %d proc: %dn", rank, nCorner, nProc);
387:                   break;
388:                 }
389:             }
390:           }
391:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
392:           if (nodeProcs[node] < 0)
393:             nodeProcs[node] = rank;
394:           PetscPrintf(PETSC_COMM_SELF, "[%d]nodeProcs[%d]: %dn", rank, node, nodeProcs[node]);
395:         }
396:       }
397:     }
398:   }
399:   PetscFree(temp);
400:   PetscBarrier((PetscObject) p);
401:   PetscFunctionReturn(1);
402: }

404: /*
405:   PartitionSortGhosts_Private - This function sorts the ghost array and
406:   removes any duplicates.

408:   Input Parameters:
409: . p          - The Partition
410: . numGhosts  - The number of ghosts
411: . ghostArray - The ghost indices

413:   Output Paramters:
414: . numGhosts  - The new size of the ghost array
415: . ghostArray - The sorted ghost indices

417: .seealso:
418: */
419: int PartitionSortGhosts_Private(Partition p, int *numGhosts, int *ghostArray, int **ghostPerm)
420: {
421:   int *perm, *temp;
422:   int  size;
423:   int  ghost, newGhost;
424:   int  ierr;

427:   size = *numGhosts;
428:   PetscMalloc(size * sizeof(int), &perm);
429:   PetscMalloc(size * sizeof(int), &temp);

431:   /* Sort ghosts */
432:   for(ghost = 0; ghost < size; ghost++) perm[ghost] = ghost;
433:   PetscSortIntWithPermutation(size, ghostArray, perm);

435:   /* Permute ghosts and eliminates duplicates */
436:   for(ghost = 0, newGhost = 0; ghost < size; ghost++) {
437:     if ((newGhost == 0) || (temp[newGhost-1] != ghostArray[perm[ghost]])) {
438:       /* Keep ghost */
439:       temp[newGhost++] = ghostArray[perm[ghost]];
440:     } else {
441:       /* Eliminate redundant ghost */
442:       PetscMemmove(&perm[ghost], &perm[ghost+1], (size - (ghost+1)) * sizeof(int));
443:       ghost--;
444:       size--;
445:     }
446:   }
447:   for(ghost = 0; ghost < size; ghost++) {
448:     ghostArray[ghost] = temp[ghost];
449:   }
450:   PetscFree(temp);

452:   *numGhosts = size;
453:   *ghostPerm = perm;
454:   return(0);
455: }

457: int PartitionGetNewGhostNodes_Serial(Partition p, int *newProcNodes, int *newNodes)
458: {
459:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
460:   int                      numLocNodes   = q->numLocNodes;
461:   int                      numGhostNodes = q->numOverlapNodes - numLocNodes;
462:   int                      numProcs      = p->numProcs;
463:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
464:   int                      numNewNodes;    /* Total number of new ghost nodes to add */
465:   int                     *temp;
466:   int                      proc, node, i;
467:   int                      ierr;

470:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++)
471:     numNewNodes += newProcNodes[proc];

473:   /* Add in new ghost nodes */
474:   if (numNewNodes > 0) {
475:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
476:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
477:     for(node = 0; node < numNewNodes; node++) {
478:       temp[numGhostNodes+node] = newNodes[node];
479:     }
480:     if (q->ghostNodes != PETSC_NULL) {
481:       PetscFree(q->ghostNodes);
482:     }
483:     q->ghostNodes = temp;

485:     PetscMalloc((numGhostNodes + numNewNodes) * sizeof(int), &temp);
486:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
487:     for(proc = 0, node = 0; proc < numProcs; proc++) {
488:       for(i = 0; i < newProcNodes[proc]; i++)
489:         temp[numGhostNodes+(node++)] = proc;
490:     }
491:     if (q->ghostNodeProcs != PETSC_NULL) {
492:       PetscFree(q->ghostNodeProcs);
493:     }
494:     q->ghostNodeProcs = temp;

496:     /* Resort ghost nodes and remove duplicates */
497:     numGhostNodes += numNewNodes;
498:     PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
499:     q->numOverlapNodes = numLocNodes + numGhostNodes;
500:     PetscMalloc(numGhostNodes * sizeof(int), &temp);
501:     for(node = 0; node < numGhostNodes; node++) {
502:       temp[node] = q->ghostNodeProcs[nodePerm[node]];
503:     }
504:     PetscFree(q->ghostNodeProcs);
505:     q->ghostNodeProcs = temp;
506:     PetscFree(nodePerm);
507:   }
508: #ifdef PETSC_USE_BOPT_g
509:   /* Consistency check for ghost nodes */
510:   for(node = 0; node < numGhostNodes; node++) {
511:     if ((q->ghostNodes[node] <  q->firstNode[q->ghostNodeProcs[node]]) ||
512:         (q->ghostNodes[node] >= q->firstNode[q->ghostNodeProcs[node]+1])) {
513:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
514:     }
515:   }
516: #endif
517:   return(0);
518: }

520: int PartitionGetNewGhostNodes_Parallel(Partition p, int *sendGhostNodes, int *sendNodes, int *recvGhostNodes, int *recvNodes)
521: {
522:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
523:   Mesh                     mesh            = p->mesh;
524:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
525:   int                      numLocNodes     = q->numLocNodes;
526:   int                     *firstNode       = q->firstNode;
527:   double                  *nodes           = tri->nodes;
528:   int                     *markers         = tri->markers;
529:   int                     *degrees         = tri->degrees;
530:   int                      numProcs        = p->numProcs;
531:   int                      rank            = p->rank;
532:   int                      numGhostNodes;
533:   int                     *nodePerm;        /* The new permutation for the sorted ghost nodes */
534:   int                      numSendNodes;    /* Total number of new ghost nodes to receive */
535:   int                      numRecvNodes;    /* Total number of new ghost nodes to send */
536:   int                     *sumSendNodes;    /* Prefix sums of sendGhostNodes */
537:   int                     *sumRecvNodes;    /* Prefix sums of recvGhostNodes */
538:   int                     *sendGhostCoords; /* Number of new ghost nodes needed from a given processor */
539:   int                     *recvGhostCoords; /* Number of new ghost nodes needed by a given processor */
540:   int                     *sumSendCoords;   /* Prefix sums of sendGhostCoords */
541:   int                     *sumRecvCoords;   /* Prefix sums of recvGhostCoords */
542:   double                  *sendCoords;      /* Coordinates of ghost nodes for other domains */
543:   double                  *recvCoords;      /* Coordinates of ghost nodes for this domains */
544:   int                     *sendMarkers;     /* Markers of ghost nodes for other domains */
545:   int                     *recvMarkers;     /* Markers of ghost nodes for this domains */
546:   int                     *sendDegrees;     /* Degrees of ghost nodes for other domains */
547:   int                     *recvDegrees;     /* Degrees of ghost nodes for this domains */
548:   int                     *offsets;         /* Offsets into the send array for each destination proc */
549:   int                     *temp;
550:   double                  *temp2;
551:   int                      proc, node, locNode, i;
552:   int                      ierr;

555:   PetscMalloc(numProcs * sizeof(int), &sumSendNodes);
556:   PetscMalloc(numProcs * sizeof(int), &sumRecvNodes);
557:   PetscMalloc(numProcs * sizeof(int), &sendGhostCoords);
558:   PetscMalloc(numProcs * sizeof(int), &recvGhostCoords);
559:   PetscMalloc(numProcs * sizeof(int), &sumSendCoords);
560:   PetscMalloc(numProcs * sizeof(int), &sumRecvCoords);
561:   PetscMalloc(numProcs * sizeof(int), &offsets);
562:   PetscMemzero(sumSendNodes, numProcs * sizeof(int));
563:   PetscMemzero(sumRecvNodes, numProcs * sizeof(int));
564:   PetscMemzero(offsets,      numProcs * sizeof(int));

566:   /* Compute new ghost node offsets */
567:   for(proc = 1; proc < numProcs; proc++) {
568:     sumSendNodes[proc] = sumSendNodes[proc-1] + sendGhostNodes[proc-1];
569:     sumRecvNodes[proc] = sumRecvNodes[proc-1] + recvGhostNodes[proc-1];
570:   }
571:   numSendNodes = sumSendNodes[numProcs-1] + sendGhostNodes[numProcs-1];
572:   numRecvNodes = sumRecvNodes[numProcs-1] + recvGhostNodes[numProcs-1];

574:   /* Get numbers of ghost nodes to provide */
575:   MPI_Alltoallv(sendNodes, sendGhostNodes, sumSendNodes, MPI_INT,
576:                        recvNodes, recvGhostNodes, sumRecvNodes, MPI_INT, p->comm);
577: 

579:   /* Get node coordinates, markers, and degrees */
580:   for(proc = 0; proc < numProcs; proc++) {
581:     sendGhostCoords[proc] = sendGhostNodes[proc]*2;
582:     recvGhostCoords[proc] = recvGhostNodes[proc]*2;
583:     sumSendCoords[proc]   = sumSendNodes[proc]*2;
584:     sumRecvCoords[proc]   = sumRecvNodes[proc]*2;
585:   }
586:   if (numSendNodes) {
587:     PetscMalloc(numSendNodes*2 * sizeof(double), &recvCoords);
588:     PetscMalloc(numSendNodes   * sizeof(int),    &recvMarkers);
589:     PetscMalloc(numSendNodes   * sizeof(int),    &recvDegrees);
590:   }
591:   if (numRecvNodes) {
592:     PetscMalloc(numRecvNodes*2 * sizeof(double), &sendCoords);
593:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendMarkers);
594:     PetscMalloc(numRecvNodes   * sizeof(int),    &sendDegrees);
595:     for(node = 0; node < numRecvNodes; node++) {
596:       locNode = recvNodes[node] - firstNode[rank];
597: #ifdef PETSC_USE_BOPT_g
598:       if ((locNode < 0) || (locNode >= numLocNodes)) {
599:         SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d should be in [0,%d)", locNode, numLocNodes);
600:       }
601: #endif
602:       for(i = 0; i < 2; i++)
603:         sendCoords[node*2+i] = nodes[locNode*2+i];
604:       sendMarkers[node] = markers[locNode];
605:       sendDegrees[node] = degrees[locNode];
606:     }
607:   }

609:   /* Communicate node coordinates and markers and degrees */
610:   MPI_Alltoallv(sendCoords,  recvGhostCoords, sumRecvCoords, MPI_DOUBLE,
611:                        recvCoords,  sendGhostCoords, sumSendCoords, MPI_DOUBLE, p->comm);
612: 
613:   MPI_Alltoallv(sendMarkers, recvGhostNodes,  sumRecvNodes,  MPI_INT,
614:                        recvMarkers, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
615: 
616:   MPI_Alltoallv(sendDegrees, recvGhostNodes,  sumRecvNodes,  MPI_INT,
617:                        recvDegrees, sendGhostNodes,  sumSendNodes,  MPI_INT, p->comm);
618: 

620:   /* Add in new ghost nodes */
621:   numGhostNodes = q->numOverlapNodes - numLocNodes;
622:   if (numSendNodes > 0) {
623:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
624:     PetscMemcpy(temp, q->ghostNodes, numGhostNodes * sizeof(int));
625:     for(node = 0; node < numSendNodes; node++)
626:       temp[numGhostNodes+node] = sendNodes[node];
627:     if (q->ghostNodes != PETSC_NULL) {
628:       PetscFree(q->ghostNodes);
629:     }
630:     q->ghostNodes = temp;

632:     PetscMalloc((numGhostNodes + numSendNodes) * sizeof(int), &temp);
633:     PetscMemcpy(temp, q->ghostNodeProcs, numGhostNodes * sizeof(int));
634:     for(proc = 0, node = 0; proc < numProcs; proc++) {
635:       for(i = 0; i < sendGhostNodes[proc]; i++)
636:         temp[numGhostNodes+(node++)] = proc;
637:     }
638:     if (q->ghostNodeProcs != PETSC_NULL) {
639:       PetscFree(q->ghostNodeProcs);
640:     }
641:     q->ghostNodeProcs = temp;

643:     PetscMalloc((q->numOverlapNodes + numSendNodes)*2 * sizeof(double), &temp2);
644:     PetscMemcpy(temp2, nodes, q->numOverlapNodes*2 * sizeof(double));
645:     for(node = 0; node < numSendNodes*2; node++)
646:       temp2[q->numOverlapNodes*2+node] = recvCoords[node];
647:     PetscFree(nodes);
648:     tri->nodes = temp2;

650:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
651:     PetscMemcpy(temp, markers, q->numOverlapNodes * sizeof(int));
652:     for(node = 0; node < numSendNodes; node++)
653:       temp[q->numOverlapNodes+node] = recvMarkers[node];
654:     PetscFree(markers);
655:     tri->markers = temp;

657:     PetscMalloc((q->numOverlapNodes + numSendNodes) * sizeof(int), &temp);
658:     PetscMemcpy(temp, degrees, q->numOverlapNodes * sizeof(int));
659:     for(node = 0; node < numSendNodes; node++)
660:       temp[q->numOverlapNodes+node] = recvDegrees[node];
661:     PetscFree(degrees);
662:     tri->degrees = temp;
663:   }

665:   /* Resort ghost nodes and remove duplicates */
666:   numGhostNodes     += numSendNodes;
667:   PartitionSortGhosts_Private(p, &numGhostNodes, q->ghostNodes, &nodePerm);
668:   q->numOverlapNodes = numLocNodes + numGhostNodes;

670:   PetscMalloc(numGhostNodes * sizeof(int), &temp);
671:   for(node = 0; node < numGhostNodes; node++)
672:     temp[node] = q->ghostNodeProcs[nodePerm[node]];
673:   for(node = 0; node < numGhostNodes; node++)
674:     q->ghostNodeProcs[node] = temp[node];

676:   for(node = 0; node < numGhostNodes; node++)
677:     temp[node] = tri->markers[mesh->numNodes+nodePerm[node]];
678:   for(node = 0; node < numGhostNodes; node++)
679:     tri->markers[mesh->numNodes+node] = temp[node];

681:   for(node = 0; node < numGhostNodes; node++)
682:     temp[node] = tri->degrees[mesh->numNodes+nodePerm[node]];
683:   for(node = 0; node < numGhostNodes; node++)
684:     tri->degrees[mesh->numNodes+node] = temp[node];
685:   PetscFree(temp);

687:   PetscMalloc(numGhostNodes*2 * sizeof(double), &temp2);
688:   for(node = 0; node < numGhostNodes; node++) {
689:     temp2[node*2]   = tri->nodes[(mesh->numNodes+nodePerm[node])*2];
690:     temp2[node*2+1] = tri->nodes[(mesh->numNodes+nodePerm[node])*2+1];
691:   }
692:   for(node = 0; node < numGhostNodes; node++) {
693:     tri->nodes[(mesh->numNodes+node)*2]   = temp2[node*2];
694:     tri->nodes[(mesh->numNodes+node)*2+1] = temp2[node*2+1];
695:   }
696:   PetscFree(temp2);
697:   PetscFree(nodePerm);

699: #ifdef PETSC_USE_BOPT_g
700:   /* Consistency check for ghost nodes */
701:   for(node = 0; node < numGhostNodes; node++) {
702:     if ((q->ghostNodes[node] <  firstNode[q->ghostNodeProcs[node]]) ||
703:         (q->ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1])) {
704:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node source processor");
705:     }
706:   }
707: #endif

709:   /* Cleanup */
710:   PetscFree(sumSendNodes);
711:   PetscFree(sendGhostCoords);
712:   PetscFree(sumSendCoords);
713:   PetscFree(offsets);
714:   if (numSendNodes) {
715:     PetscFree(recvCoords);
716:     PetscFree(recvMarkers);
717:     PetscFree(recvDegrees);
718:   }
719:   PetscFree(sumRecvNodes);
720:   PetscFree(recvGhostCoords);
721:   PetscFree(sumRecvCoords);
722:   if (numRecvNodes) {
723:     PetscFree(sendCoords);
724:     PetscFree(sendMarkers);
725:     PetscFree(sendDegrees);
726:   }
727:   return(0);
728: }

730: int PartitionGetNewGhostElements_Serial(Partition p, int *newProcElements, int *newElements)
731: {
732:   int  numLocElements   = p->numLocElements;
733:   int  numGhostElements = p->numOverlapElements - numLocElements;
734:   int  numProcs         = p->numProcs;
735:   int *elemPerm;        /* The new permutation for the sorted ghost elements */
736:   int  numNewElements;  /* Total number of new ghost elements to add */
737:   int *temp;
738:   int  proc, elem, i;
739:   int  ierr;

742:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++)
743:     numNewElements += newProcElements[proc];

745:   /* Add in new ghost elements */
746:   if (numNewElements > 0) {
747:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
748:     PetscMemcpy(temp, p->ghostElements, numGhostElements * sizeof(int));
749:     for(elem = 0; elem < numNewElements; elem++)
750:       temp[numGhostElements+elem] = newElements[elem];
751:     if (p->ghostElements != PETSC_NULL) {
752:       PetscFree(p->ghostElements);
753:     }
754:     p->ghostElements = temp;

756:     PetscMalloc((numGhostElements + numNewElements) * sizeof(int), &temp);
757:     PetscMemcpy(temp, p->ghostElementProcs, numGhostElements * sizeof(int));
758:     for(proc = 0, elem = 0; proc < numProcs; proc++) {
759:       for(i = 0; i < newProcElements[proc]; i++)
760:         temp[numGhostElements+(elem++)] = proc;
761:     }
762:     if (p->ghostElementProcs != PETSC_NULL) {
763:       PetscFree(p->ghostElementProcs);
764:     }
765:     p->ghostElementProcs = temp;

767:     /* Resort ghost elements and remove duplicates */
768:     numGhostElements += numNewElements;
769:     PartitionSortGhosts_Private(p, &numGhostElements, p->ghostElements, &elemPerm);
770:     p->numOverlapElements = numLocElements + numGhostElements;
771:     PetscMalloc(numGhostElements * sizeof(int), &temp);
772:     for(elem = 0; elem < numGhostElements; elem++)
773:       temp[elem] = p->ghostElementProcs[elemPerm[elem]];
774:     PetscFree(p->ghostElementProcs);
775:     p->ghostElementProcs = temp;
776:     PetscFree(elemPerm);
777:   }
778: #ifdef PETSC_USE_BOPT_g
779:   /* Consistency check for ghost elements */
780:   for(elem = 0; elem < numGhostElements; elem++) {
781:     if ((p->ghostElements[elem] <  p->firstElement[p->ghostElementProcs[elem]]) ||
782:         (p->ghostElements[elem] >= p->firstElement[p->ghostElementProcs[elem]+1])) {
783:       SETERRQ(PETSC_ERR_PLIB, "Invalid ghost element source processor");
784:     }
785:   }
786: #endif
787:   return(0);
788: }

790: /*
791:   PartitionCreateElementMap_METIS - This function creates a map from elements to domains,
792:   using METIS to minimize the cut and approximately balance the sizes.

794:   Input Parameters:
795: + p           - The Partition
796: - numElements - The local number of elements

798:   Output Parameter:
799: . elementMap  - The map from nodes to domains

801: .seealso: PartitionNodes_Private()
802: */
803: int PartitionCreateElementMap_METIS(Partition p, int numElements, int **elementMap)
804: {
805: #ifdef PETSC_HAVE_PARMETIS
806:   Mesh mesh = p->mesh;
807:   int       *elemProcs;     /* The processor assigned to each element */
808:   int       *elemOffsets;   /* The offsets into elemNeighbors of each element row for dual in CSR format */
809:   int       *elemNeighbors; /* The list of element neighbors for dual in CSR format */
810:   int       *edgeWeights;   /* The list of edge weights for dual in CSR format */
811:   int        weight;        /* A weight for constrained nodes */
812:   int        options[5];    /* The option flags for METIS */
813:   PetscTruth opt;
814:   int        ierr;

817:   /* Create the dual graph in distributed CSR format */
818:   weight = 0;
819:   ierr   = PetscOptionsGetInt(mesh->prefix, "-mesh_partition_weight", &weight, &opt);
820:   ierr   = MeshCreateDualCSR(mesh, &elemOffsets, &elemNeighbors, &edgeWeights, weight);

822:   /* Partition graph */
823:   if (numElements != p->numLocElements) {
824:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d for ParMETIS, should be %d", numElements, p->numLocElements);
825:   }
826:   PetscMalloc(numElements * sizeof(int), &elemProcs);
827:   options[0] = 0;   /* Returns the edge cut */
828:   options[1] = 150; /* The folding factor, 0 = no folding */
829:   options[2] = 1;   /* Serial initial partition */
830:   options[3] = 0;   /* C style numbering */
831:   options[4] = 0;   /* No timing information */
832:   PARKMETIS(p->firstElement, elemOffsets, PETSC_NULL, elemNeighbors, PETSC_NULL, elemProcs, options, p->comm);

834:   /* Destroy dual graph */
835:   MeshDestroyDualCSR(mesh, elemOffsets, elemNeighbors, edgeWeights);

837:   *elementMap = elemProcs;
838:   return(0);
839: #else
840:   SETERRQ(PETSC_ERR_SUP, "You must obtain George Karypis' ParMETIS software")
841: #endif
842: }

844: /*
845:   PartitionCreateElementMap_NodeBased - This function creates a map from elements to domains,
846:   using a previous partition of the nodes.

848:   Input Parameters:
849: + p           - The Partition
850: - numElements - The global number of nodes

852:   Output Parameter:
853: . elementMap  - The map from nodes to domains

855: .seealso: PartitionNodes_Private()
856: */
857: int PartitionCreateElementMap_NodeBased(Partition p, int numElements, int **elementMap)
858: {
859:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
860:   Mesh                     mesh         = p->mesh;
861:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
862:   int                      numCorners   = mesh->numCorners;
863:   int                     *elements     = tri->faces;
864:   int                     *firstElement = p->firstElement;
865:   int                     *firstNode    = q->firstNode;
866:   int                      numProcs     = p->numProcs;
867:   int                      rank         = p->rank;
868:   int                      startElement = firstElement[rank];
869:   int                      endElement   = firstElement[rank+1];
870:   int                     *elemProcs;     /* The processor assigned to each element */
871:   int                      proc, elem, corner, node;
872:   int                      ierr;

875:   if (numElements != p->numLocElements) {
876:     SETERRQ2(PETSC_ERR_ARG_WRONG, "Incorrect input size %d should be %d", numElements, p->numLocElements);
877:   }
878:   /* Count elements on this partition -- keep element if you are the lower numbered domain */
879:   PetscMalloc(numElements * sizeof(int), &elemProcs);
880:   for(elem = 0; elem < numElements; elem++) {
881:     elemProcs[elem] = -1;
882:   }

884:   for(elem = startElement; elem < endElement; elem++) {
885:     for(corner = 0; corner < numCorners; corner++) {
886:       node = elements[elem*numCorners+corner];
887:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
888:         /* Get the domain of the node */
889:         for(proc = 0; proc < numProcs; proc++) {
890:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
891:         }
892:         if ((elemProcs[elem-startElement] < 0) || (proc < elemProcs[elem-startElement]))
893:           elemProcs[elem-startElement] = proc;
894:       }
895:     }
896:     /* If no one else claims it, take the element */
897:     if (elemProcs[elem-startElement] < 0) {
898:       elemProcs[elem-startElement] = rank;
899:     }
900:   }

902:   *elementMap = elemProcs;
903:   return(0);
904: }

906: /*
907:   PartitionCreateElementPartition_Private - This function uses the element map to create
908:   partition structures for element-based data.

910:   Input Parameters:
911: + p              - The Partition
912: - elementMap     - The map from elements to domains

914:   Output Parameters:
915: . ordering       - The new element ordering

917:   Output Parameters in Partition_Triangular_2D:
918: + numLocElements - The number of local elements
919: - firstElement   - The first elemnt in each domain

921: .seealso: PartitionElements_Private()
922: */
923: int PartitionCreateElementPartition_Private(Partition p, int *elementMap, AO *ordering)
924: {
925:   int              numLocElements = p->numLocElements; /* Number of local elements before partitioning */
926:   int             *firstElement   = p->firstElement;
927:   int              numProcs       = p->numProcs;
928:   int              rank           = p->rank;
929:   int             *partSendElements;     /* The number of elements sent to each processor for partitioning */
930:   int             *sumSendElements;      /* The prefix sums of partSendElements */
931:   int             *partRecvElements;     /* The number of elements received from each processor for partitioning */
932:   int             *sumRecvElements;      /* The prefix sums of partRecvElements */
933:   int             *offsets;              /* The offsets into the send and receive arrays */
934:   int             *sendBuffer;
935:   int             *AppOrdering, *PetscOrdering;
936:   int              proc, elem;
937:   int              ierr;

940:   /* Initialize communication */
941:   PetscMalloc(numProcs * sizeof(int), &partSendElements);
942:   PetscMalloc(numProcs * sizeof(int), &sumSendElements);
943:   PetscMalloc(numProcs * sizeof(int), &partRecvElements);
944:   PetscMalloc(numProcs * sizeof(int), &sumRecvElements);
945:   PetscMalloc(numProcs * sizeof(int), &offsets);
946:   PetscMemzero(partSendElements,  numProcs * sizeof(int));
947:   PetscMemzero(sumSendElements,   numProcs * sizeof(int));
948:   PetscMemzero(partRecvElements,  numProcs * sizeof(int));
949:   PetscMemzero(sumRecvElements,   numProcs * sizeof(int));
950:   PetscMemzero(offsets,           numProcs * sizeof(int));

952:   /* Get sizes of interior element number blocks to send to each processor */
953:   for(elem = 0; elem < numLocElements; elem++) {
954:     partSendElements[elementMap[elem]]++;
955:   }

957:   /* Get sizes of interior element number blocks to receive from each processor */
958:   MPI_Alltoall(partSendElements, 1, MPI_INT, partRecvElements, 1, MPI_INT, p->comm);

960:   /* Calculate offsets into the interior element number send array */
961:   for(proc = 1; proc < numProcs; proc++) {
962:     sumSendElements[proc] = sumSendElements[proc-1] + partSendElements[proc-1];
963:     offsets[proc]         = sumSendElements[proc];
964:   }

966:   /* Calculate offsets into the interior element number receive array */
967:   for(proc = 1; proc < numProcs; proc++) {
968:     sumRecvElements[proc] = sumRecvElements[proc-1] + partRecvElements[proc-1];
969:   }

971:   /* Send interior element numbers to each processor -- could prevent copying elements already there I think */
972:   p->numLocElements = sumRecvElements[numProcs-1] + partRecvElements[numProcs-1];
973:   PetscMalloc(numLocElements    * sizeof(int), &sendBuffer);
974:   PetscMalloc(p->numLocElements * sizeof(int), &AppOrdering);
975:   PetscMalloc(p->numLocElements * sizeof(int), &PetscOrdering);
976:   for(elem = 0; elem < numLocElements; elem++) {
977:     sendBuffer[offsets[elementMap[elem]]++] = elem + firstElement[rank];
978:   }
979:   MPI_Alltoallv(sendBuffer,  partSendElements, sumSendElements, MPI_INT,
980:                        AppOrdering, partRecvElements, sumRecvElements, MPI_INT, p->comm);
981: 

983:   /* If the mesh was intially distributed, we would need to send the elements themselves here */

985:   /* Recompute size and offset of each domain */
986:   MPI_Allgather(&p->numLocElements, 1, MPI_INT, &firstElement[1], 1, MPI_INT, p->comm);
987:   for(proc = 1, firstElement[0] = 0; proc <= numProcs; proc++) {
988:     firstElement[proc] = firstElement[proc] + firstElement[proc-1];
989:   }

991:   /* Create the global element reordering */
992:   for(elem = 0; elem < p->numLocElements; elem++) {
993:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
994:     PetscOrdering[elem] = elem + firstElement[rank];
995:   }

997:   /* Cleanup */
998:   PetscFree(partSendElements);
999:   PetscFree(sumSendElements);
1000:   PetscFree(partRecvElements);
1001:   PetscFree(sumRecvElements);
1002:   PetscFree(offsets);
1003:   PetscFree(sendBuffer);

1005:   /* Create the global element reordering */
1006:   AOCreateBasic(p->comm, p->numLocElements, AppOrdering, PetscOrdering, ordering);
1007:   PetscLogObjectParent(p, p->ordering);

1009:   PetscFree(AppOrdering);
1010:   PetscFree(PetscOrdering);
1011:   return(0);
1012: }

1014: /*
1015:   PartitionPermuteElements_Private - This function permutes the data which is implicitly
1016:   indexed by element number

1018:   Input Parameter:
1019: . p         - The Partition

1021:   Output Parameter in Mesh_Triangular:
1022: + faces     - The nodes on each element
1023: - neighbors - The neighbors of each element

1025: .seealso: PartitionElements_Private()
1026: */
1027: int PartitionPermuteElements_Private(Partition p)
1028: {
1029:   Mesh             mesh = p->mesh;
1030:   Mesh_Triangular *tri  = (Mesh_Triangular *) mesh->data;
1031:   int              ierr;

1034:   AOApplicationToPetscPermuteInt(p->ordering, mesh->numCorners, tri->faces);
1035:   AOApplicationToPetscPermuteInt(p->ordering, 3,                tri->neighbors);
1036:   return(0);
1037: }

1039: /*
1040:   PartitionRenumberElements_Private - This function renumbers the element-based data globally in
1041:   order to make the canonical numbers sequential in each domain.

1043:   Input Parameter:
1044: . p         - The Partition

1046:   Output Parameter in Mesh_Triangular:
1047: . neighbors - The neighbors of each element

1049: .seealso: PartitionElements_Private()
1050: */
1051: int PartitionRenumberElements_Private(Partition p)
1052: {
1053:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
1054:   int                      numElements = p->numElements;
1055:   int                     *neighbors   = tri->neighbors;
1056:   int                      ierr;

1059:   AOApplicationToPetsc(p->ordering, numElements*3, neighbors);
1060:   return(0);
1061: }

1063: /*
1064:   PartitionCalcGhostElements_Private - This function calculates the ghost elements.

1066:   Input Parameters:
1067: . p         - The Partition

1069:   Output Parameters in Partition_Triangular_2D:

1071: .seealso: PartitionElements_Private()
1072: */
1073: int PartitionCalcGhostElements_Private(Partition p)
1074: {
1075:   Partition_Triangular_2D *q            = (Partition_Triangular_2D *) p->data;
1076:   Mesh                     mesh         = p->mesh;
1077:   Mesh_Triangular         *tri          = (Mesh_Triangular *) mesh->data;
1078:   int                      numCorners   = mesh->numCorners;
1079:   int                      numElements  = p->numElements;
1080:   int                     *elements     = tri->faces;
1081:   int                     *neighbors    = tri->neighbors;
1082:   int                     *firstElement = p->firstElement;
1083:   int                      numProcs     = p->numProcs;
1084:   int                      rank         = p->rank;
1085:   int                      numLocNodes  = q->numLocNodes;
1086:   int                      startNode    = q->firstNode[rank];
1087:   PetscTruth               isNodePart   = q->isNodePartitioned;
1088:   int                     *newProcElements; /* The number of new ghost elements from each processor */
1089:   int                      numNewElements;  /* The number of new ghost elements */
1090:   int                     *newElements;     /* The new ghost elements */
1091:   int                     *offsets;         /* The offsets into the send and receive arrays */
1092:   int                      degree;          /* The degree of a vertex */
1093:   int                     *support;         /* The list of elements in the support of a basis function */
1094:   int                     *elemMap;
1095:   int                      proc, elem, bElem, sElem, nElem, corner, neighbor, node;
1096:   int                      ierr;

1099:   PetscMalloc(numElements  * sizeof(int), &elemMap);
1100:   PetscMalloc(numProcs     * sizeof(int), &newProcElements);
1101:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1102:   PetscMemzero(newProcElements,  numProcs     * sizeof(int));
1103:   PetscMemzero(offsets,          (numProcs+1) * sizeof(int));
1104:   for(elem = 0; elem < numElements; elem++) {
1105:     elemMap[elem] = -1;
1106:   }
1107:   for(elem = 0; elem < numElements; elem++) {
1108:     if ((elem >= firstElement[rank]) && (elem < firstElement[rank+1])) {
1109:       /* Find a boundary element */
1110:       for(neighbor = 0; neighbor < 3; neighbor++) {
1111:         bElem = neighbors[elem*3+neighbor];
1112:         if ((bElem >= 0) && ((bElem < firstElement[rank]) || (bElem >= firstElement[rank+1])))
1113:           break;
1114:       }

1116:       if (neighbor < 3) {
1117:         /* Check the support of each vertex for off-processor elements */
1118:         for(corner = 0; corner < numCorners; corner++) {
1119:           node = elements[elem*numCorners+corner];
1120:           MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1121:           for(sElem = 0; sElem < degree; sElem++) {
1122:             nElem = support[sElem];
1123:             if (elemMap[nElem] >= 0) continue;
1124:             for(proc = 0; proc < numProcs; proc++) {
1125:               if ((proc != rank) && (nElem >= firstElement[proc]) && (nElem < firstElement[proc+1])) {
1126:                 elemMap[nElem] = proc;
1127:                 break;
1128:               }
1129:             }
1130:           }
1131:           MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
1132:         }
1133:       }
1134:     } else if (isNodePart == PETSC_TRUE) {
1135:       if (elemMap[elem] >= 0) continue;
1136:       /* We may also need elements on which we have nodes, but are not attached to */
1137:       for(corner = 0; corner < numCorners; corner++) {
1138:         node = elements[elem*numCorners+corner] - startNode;
1139:         if ((node >= 0) && (node < numLocNodes)) {
1140:           for(proc = 0; proc < numProcs; proc++) {
1141:             if ((elem >= firstElement[proc]) && (elem < firstElement[proc+1])) {
1142:               elemMap[elem] = proc;
1143:               break;
1144:             }
1145:           }
1146:         }
1147:       }
1148:     }
1149:   }

1151:   /* Compute new ghost element offsets */
1152:   for(elem = 0; elem < numElements; elem++) {
1153:     if (elemMap[elem] >= 0) {
1154:       newProcElements[elemMap[elem]]++;
1155:     }
1156:   }
1157:   for(proc = 0, numNewElements = 0; proc < numProcs; proc++) {
1158:     numNewElements  += newProcElements[proc];
1159:     offsets[proc+1]  = offsets[proc] + newProcElements[proc];
1160:   }

1162:   /* Get ghost nodes */
1163:   if (numNewElements > 0) {
1164:     PetscMalloc(numNewElements * sizeof(int), &newElements);
1165:     for(elem = 0; elem < numElements; elem++) {
1166:       if (elemMap[elem] >= 0) {
1167:         newElements[offsets[elemMap[elem]]++] = elem;
1168:       }
1169:     }
1170:   }
1171:   for(proc = 1; proc < numProcs-1; proc++) {
1172:     if (offsets[proc] - offsets[proc-1]  != newProcElements[proc]) {
1173:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc %d should be %d",
1174:                offsets[proc] - offsets[proc-1], proc, newProcElements[proc]);
1175:     }
1176:   }
1177:   if (offsets[0] != newProcElements[0]) {
1178:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost elements sent %d to proc 0 should be %d",
1179:              offsets[0], newProcElements[0]);
1180:   }

1182:   /* Add new ghosts */
1183:   p->numOverlapElements = p->numLocElements;
1184:   PartitionGetNewGhostElements_Serial(p, newProcElements, newElements);

1186:   /* Cleanup */
1187:   PetscFree(elemMap);
1188:   PetscFree(newProcElements);
1189:   PetscFree(offsets);
1190:   if (numNewElements > 0) {
1191:     PetscFree(newElements);
1192:   }
1193:   return(0);
1194: }

1196: /*
1197:   PartitionDistributeElements_Private - This function distributes the element-based data, and
1198:   permutes arrays which are implicitly indexed by element number.

1200:   Input Parameters:
1201: . p         - The Partition

1203:   Output Parameters in Mesh_Triangular:
1204: + faces     - The nodes on each element
1205: - neighbors - The element neighbors

1207: .seealso: PartitionElements_Private()
1208: */
1209: int PartitionDistributeElements_Private(Partition p)
1210: {
1211:   Mesh             mesh               = p->mesh;
1212:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1213:   int              numLocElements     = p->numLocElements;
1214:   int              numOverlapElements = p->numOverlapElements;
1215:   int              numGhostElements   = numOverlapElements - numLocElements;
1216:   int              numCorners         = mesh->numCorners;
1217:   int             *firstElement       = p->firstElement;
1218:   int             *ghostElements      = p->ghostElements;
1219:   int              rank               = p->rank;
1220:   int             *temp;
1221:   int              elem, corner, neighbor;
1222:   int              ierr;

1224:   /* Note here that we can use PetscMemcpy() for the interior variables because we already permuted the
1225:      arrays so that ghost elements could be computed.
1226:   */
1228:   mesh->numFaces = numLocElements;
1229:   PetscMalloc(numOverlapElements*numCorners * sizeof(int), &temp);
1230:   /* Interior faces */
1231:   PetscMemcpy(temp, &tri->faces[firstElement[rank]*numCorners], numLocElements*numCorners * sizeof(int));
1232:   /* Ghost faces */
1233:   for(elem = 0; elem < numGhostElements; elem++) {
1234:     for(corner = 0; corner < numCorners; corner++) {
1235:       temp[(numLocElements+elem)*numCorners+corner] = tri->faces[ghostElements[elem]*numCorners+corner];
1236:     }
1237:   }
1238:   PetscFree(tri->faces);
1239:   tri->faces = temp;
1240:   PetscLogObjectMemory(p, numGhostElements*numCorners * sizeof(int));

1242:   PetscMalloc(numOverlapElements*3 * sizeof(int), &temp);
1243:   /* Interior neighbors */
1244:   PetscMemcpy(temp, &tri->neighbors[firstElement[rank]*3], numLocElements*3 * sizeof(int));
1245:   /* Ghost neighbors */
1246:   for(elem = 0; elem < numGhostElements; elem++) {
1247:     for(neighbor = 0; neighbor < 3; neighbor++) {
1248:       temp[(numLocElements+elem)*3+neighbor] = tri->neighbors[ghostElements[elem]*3+neighbor];
1249:     }
1250:   }
1251:   PetscFree(tri->neighbors);
1252:   tri->neighbors = temp;
1253:   PetscLogObjectMemory(p, numGhostElements*3 * sizeof(int));

1255:   return(0);
1256: }

1258: int PartitionElementGlobalToLocal_Private(Partition p)
1259: {
1260:   Mesh_Triangular *tri                = (Mesh_Triangular *) p->mesh->data;
1261:   int              numOverlapElements = p->numOverlapElements;
1262:   int             *neighbors          = tri->neighbors;
1263:   int              neighbor;
1264:   int              ierr;

1267:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
1268:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
1269:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
1270:   }
1271:   return(0);
1272: }

1274: int PartitionGetNewGhostNodes_Element(Partition p)
1275: {
1276:   Partition_Triangular_2D *q                = (Partition_Triangular_2D *) p->data;
1277:   Mesh                     mesh             = p->mesh;
1278:   Mesh_Triangular         *tri              = (Mesh_Triangular *) mesh->data;
1279:   int                      numCorners       = mesh->numCorners;
1280:   int                     *elements         = tri->faces;
1281:   int                     *firstElement     = p->firstElement;
1282:   int                      numGhostElements = p->numOverlapElements - p->numLocElements;
1283:   int                     *ghostElements    = p->ghostElements;
1284:   int                      numNodes         = q->numNodes;
1285:   int                     *firstNode        = q->firstNode;
1286:   int                      numProcs         = p->numProcs;
1287:   int                      rank             = p->rank;
1288:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1289:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1290:   int                     *newNodes;     /* New ghost nodes for this domain */
1291:   int                     *offsets;      /* The offsets into newNodes[] */
1292:   int                     *nodeMap;      /* The map of nodes to processors */
1293:   int                      proc, elem, corner, node, gNode;
1294:   int                      ierr;

1297:   if (q->isNodePartitioned == PETSC_FALSE)
1298:     return(0);
1299:   /* Initialize communication */
1300:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1301:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1302:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1303:   PetscMemzero(newProcNodes, numProcs      * sizeof(int));
1304:   PetscMemzero(offsets,     (numProcs+1)   * sizeof(int));
1305:   for(node = 0; node < numNodes; node++) {
1306:     nodeMap[node] = -1;
1307:   }

1309:   /* Check for new ghost nodes */
1310:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1311:     for(corner = 0; corner < numCorners; corner++) {
1312:       node = elements[elem*numCorners+corner];
1313:       if (nodeMap[node] >= 0) continue;

1315:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1316:         /* Get the domain of the node */
1317:         for(proc = 0; proc < numProcs; proc++) {
1318:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1319:         }
1320:         /* Check for the presence of this node */
1321:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1322:           nodeMap[node] = proc;
1323:         }
1324:       }
1325:     }
1326:   }
1327:   for(elem = 0; elem < numGhostElements; elem++) {
1328:     for(corner = 0; corner < numCorners; corner++) {
1329:       node = elements[ghostElements[elem]*numCorners+corner];
1330:       if (nodeMap[node] >= 0) continue;

1332:       if ((node < firstNode[rank]) || (node >= firstNode[rank+1])) {
1333:         /* Get the domain of the node */
1334:         for(proc = 0; proc < numProcs; proc++) {
1335:           if ((node >= firstNode[proc]) && (node < firstNode[proc+1])) break;
1336:         }
1337:         /* Check for the presence of this node */
1338:         if (PartitionGhostNodeIndex_Private(p, node, &gNode) && ((nodeMap[node] < 0) || (proc < nodeMap[node]))) {
1339:           nodeMap[node] = proc;
1340:         }
1341:       }
1342:     }
1343:   }

1345:   /* Compute new ghost node offsets */
1346:   for(node = 0; node < numNodes; node++) {
1347:     if (nodeMap[node] >= 0) {
1348:       newProcNodes[nodeMap[node]]++;
1349:     }
1350:   }
1351:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1352:     numNewNodes   += newProcNodes[proc];
1353:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1354:   }

1356:   /* Get ghost nodes */
1357:   if (numNewNodes > 0) {
1358:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1359:     for(node = 0; node < numNodes; node++) {
1360:       if (nodeMap[node] >= 0) {
1361:         newNodes[offsets[nodeMap[node]]++] = node;
1362:       }
1363:     }
1364:   }
1365:   for(proc = 1; proc < numProcs-1; proc++) {
1366:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1367:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1368:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1369:     }
1370:   }
1371:   if (offsets[0] != newProcNodes[0]) {
1372:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1373:   }

1375:   /* Add new ghosts */
1376:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1378:   /* Cleanup */
1379:   PetscFree(nodeMap);
1380:   PetscFree(newProcNodes);
1381:   PetscFree(offsets);
1382:   if (numNewNodes) {
1383:     PetscFree(newNodes);
1384:   }
1385:   return(0);
1386: }

1388: int PartitionGetNewGhostNodes_Edge(Partition p)
1389: {
1390:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1391:   Mesh_Triangular         *tri       = (Mesh_Triangular *) p->mesh->data;
1392:   int                     *edges     = tri->edges;
1393:   int                     *firstEdge = q->firstEdge;
1394:   int                      numNodes  = q->numNodes;
1395:   int                     *firstNode = q->firstNode;
1396:   int                      numProcs  = p->numProcs;
1397:   int                      rank      = p->rank;
1398:   int                     *newProcNodes; /* Number of new ghost nodes needed from a given processor */
1399:   int                      numNewNodes;  /* Total number of new ghost nodes to receive */
1400:   int                     *newNodes;     /* New ghost nodes for this domain */
1401:   int                     *offsets;      /* The offsets into newNodes[] */
1402:   int                     *nodeMap;      /* The map of nodes to processors */
1403:   int                      proc, edge, node, startNode, endNode, ghostNode, gNode;
1404:   int                      ierr;

1407:   /* Initialize communication */
1408:   PetscMalloc(numProcs     * sizeof(int), &newProcNodes);
1409:   PetscMalloc((numProcs+1) * sizeof(int), &offsets);
1410:   PetscMalloc(numNodes     * sizeof(int), &nodeMap);
1411:   PetscMemzero(newProcNodes, numProcs    * sizeof(int));
1412:   PetscMemzero(offsets,     (numProcs+1) * sizeof(int));
1413:   for(node = 0; node < numNodes; node++) {
1414:     nodeMap[node] = -1;
1415:   }

1417:   /* Check for new ghost nodes */
1418:   for(edge = firstEdge[rank]; edge < firstEdge[rank+1]; edge++) {
1419:     /* Check for new ghost node */
1420:     startNode = edges[edge*2];
1421:     endNode   = edges[edge*2+1];
1422:     ghostNode = -1;
1423:     if ((startNode < firstNode[rank]) || (startNode >= firstNode[rank+1])) {
1424:       ghostNode = startNode;
1425:     } else if ((endNode < firstNode[rank]) || (endNode >= firstNode[rank+1])) {
1426:       ghostNode = endNode;
1427:     }
1428:     if (ghostNode >= 0) {
1429:       /* Get the domain of the node */
1430:       for(proc = 0; proc < numProcs; proc++) {
1431:         if ((ghostNode >= firstNode[proc]) && (ghostNode < firstNode[proc+1])) break;
1432:       }
1433:       /* Check for the presence of this ghost node */
1434:       if (PartitionGhostNodeIndex_Private(p, ghostNode, &gNode) && ((nodeMap[ghostNode] < 0) || (proc < nodeMap[ghostNode]))) {
1435:         /* We must add this node as a ghost node */
1436:         nodeMap[ghostNode] = proc;
1437:       }
1438:     }
1439:   }

1441:   /* Compute new ghost node offsets */
1442:   for(node = 0; node < numNodes; node++) {
1443:     if (nodeMap[node] >= 0) {
1444:       newProcNodes[nodeMap[node]]++;
1445:     }
1446:   }
1447:   for(proc = 0, numNewNodes = 0; proc < numProcs; proc++) {
1448:     numNewNodes   += newProcNodes[proc];
1449:     offsets[proc+1] = offsets[proc] + newProcNodes[proc];
1450:   }

1452:   /* Get ghost nodes */
1453:   if (numNewNodes > 0) {
1454:     PetscMalloc(numNewNodes * sizeof(int), &newNodes);
1455:     for(node = 0; node < numNodes; node++) {
1456:       if (nodeMap[node] >= 0) {
1457:         newNodes[offsets[nodeMap[node]]++] = node;
1458:       }
1459:     }
1460:   }
1461:   for(proc = 1; proc < numProcs-1; proc++) {
1462:     if (offsets[proc] - offsets[proc-1]  != newProcNodes[proc]) {
1463:       SETERRQ3(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc %d should be %d",
1464:                offsets[proc] - offsets[proc-1], proc, newProcNodes[proc]);
1465:     }
1466:   }
1467:   if (offsets[0] != newProcNodes[0]) {
1468:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of ghost nodes sent %d to proc 0 should be %d", offsets[0], newProcNodes[0]);
1469:   }

1471:   /* Add new ghosts */
1472:   PartitionGetNewGhostNodes_Serial(p, newProcNodes, newNodes);

1474:   /* Cleanup */
1475:   PetscFree(nodeMap);
1476:   PetscFree(newProcNodes);
1477:   PetscFree(offsets);
1478:   if (numNewNodes) {
1479:     PetscFree(newNodes);
1480:   }
1481:   return(0);
1482: }

1484: int PartitionElements_Private(Partition p)
1485: {
1486:   int (*f)(Partition, int, int **);
1487:   int  *elementMap; /* The map from elements to domains */
1488:   int   ierr;

1491:   /* Create a new map of elements to domains */
1492:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap", (void (**)(void)) &f);
1493:   (*f)(p, p->numLocElements, &elementMap);

1495: #if 0
1496:   /* Communicate interior elements */
1497:   GridInteriorExchange(numLocElements, elementMap, p->firstElement);
1498: #endif
1499:   /* Create the element partition */
1500:   PartitionCreateElementPartition_Private(p, elementMap, &p->ordering);
1501:   PetscFree(elementMap);

1503:   /* Permute arrays implicitly indexed by element number */
1504:   PartitionPermuteElements_Private(p);

1506:   /* Globally renumber the elements to make canonical numbers sequential in each domain */
1507:   PartitionRenumberElements_Private(p);

1509:   /* Calculate ghosts */
1510:   PartitionCalcGhostElements_Private(p);
1511: 
1512:   /* Check for new ghost nodes created by the element partition */
1513:   PartitionGetNewGhostNodes_Element(p);

1515:   p->isElementPartitioned = PETSC_TRUE;
1516:   return(0);
1517: }

1519: /*
1520:   PartitionCreateNodeMap_Simple_Seq - This function creates a map from nodes to domains,
1521:   using a reordering of the serial mesh to reduce the bandwidth.

1523:   Input Parameters:
1524: + p        - The Partition
1525: - numNodes - The global number of nodes

1527:   Output Parameter:
1528: . nodeMap  - The map from nodes to domains

1530: .seealso: PartitionNodes_Private()
1531: */
1532: int PartitionCreateNodeMap_Simple_Seq(Partition p, int numNodes, int **nodeMap)
1533: {
1534:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1535:   int                     *firstNode = q->firstNode;
1536:   int                      rank      = p->rank;
1537:   int                     *nodeProcs; /* The processor which each node will lie on */
1538:   int                      node;
1539:   int                      ierr;

1542:   /* Use the existing interior nodes */
1543:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1544:   for(node = 0; node < numNodes; node++) {
1545:     nodeProcs[node] = -1;
1546:   }
1547:   for(node = firstNode[rank]; node < firstNode[rank+1]; node++) {
1548:     nodeProcs[node] = rank;
1549:   }

1551:   *nodeMap = nodeProcs;
1552:   return(0);
1553: }

1555: /*
1556:   PartitionCreateNodeMap_ElementBased - This function creates a map from nodes to domains,
1557:   based upon a prior partition of the elements.

1559:   Input Parameters:
1560: + p        - The Partition
1561: - numNodes - The global number of nodes

1563:   Output Parameter:
1564: . nodeMap  - The map from nodes to domains

1566: .seealso: PartitionNodes_Private()
1567: */
1568: int PartitionCreateNodeMap_ElementBased(Partition p, int numNodes, int **nodeMap)
1569: {
1570:   Mesh             mesh               = p->mesh;
1571:   Mesh_Triangular *tri                = (Mesh_Triangular *) mesh->data;
1572:   int              numLocElements     = p->numLocElements;
1573:   int              numGhostElements   = p->numOverlapElements - p->numLocElements;
1574:   int              numCorners         = mesh->numCorners;
1575:   int             *elements           = tri->faces;
1576:   int             *firstElement       = p->firstElement;
1577:   int             *ghostElements      = p->ghostElements;
1578:   int             *ghostElementProcs  = p->ghostElementProcs;
1579:   int              rank               = p->rank;
1580:   int             *nodeProcs;     /* The processor which each node will lie on */
1581:   int             *support;
1582:   int              nProc, elem, sElem, nElem, nLocElem, gElem, corner, nCorner, node, degree;
1583:   int              ierr;

1586:   /* Count nodes on this partition -- keep node if you are the lower numbered domain */
1587:   PetscMalloc(numNodes * sizeof(int), &nodeProcs);
1588:   for(node = 0; node < numNodes; node++) {
1589:     nodeProcs[node] = -1;
1590:   }

1592:   for(elem = firstElement[rank]; elem < firstElement[rank+1]; elem++) {
1593:     for(corner = 0; corner < numCorners; corner++) {
1594:       node = elements[elem*numCorners+corner];

1596:       /* Check the support of the node */
1597:       MeshGetNodeSupport(mesh, node, elem, &degree, &support);
1598:       for(sElem = 0; sElem < degree; sElem++) {
1599:         nElem = support[sElem];
1600:         /* See if neighbor is in another domain */
1601:         if ((nElem < firstElement[rank]) || (nElem >= firstElement[rank+1])) {
1602:           /* Check to see if node is contained in the neighboring element */
1603:           for(nCorner = 0; nCorner < numCorners; nCorner++) {
1604:             if (elements[nElem*numCorners+nCorner] == node) {
1605:               ierr  = PartitionGlobalToLocalElementIndex(p, nElem, &nLocElem);
1606:               nProc = ghostElementProcs[nLocElem-numLocElements];
1607:               /* Give the node to the lowest numbered domain */
1608:               if ((nProc < rank) && ((nodeProcs[node] < 0) || (nProc < nodeProcs[node]))) {
1609:                 nodeProcs[node] = nProc;
1610:               }
1611:               break;
1612:             }
1613:           }
1614:         }
1615:       }
1616:       MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);

1618:       /* If no one else claims it, take the node */
1619:       if (nodeProcs[node] < 0) {
1620:         nodeProcs[node] = rank;
1621:       }
1622:     }
1623:   }

1625:   /* Now assign the ghost nodes from ghost elements (which we can never own) */
1626:   for(gElem = 0; gElem < numGhostElements; gElem++) {
1627:     for(corner = 0; corner < numCorners; corner++) {
1628:       node = elements[ghostElements[gElem]*numCorners+corner];
1629:       if (nodeProcs[node] < 0)
1630:         nodeProcs[node] = ghostElementProcs[gElem];
1631:     }
1632:   }

1634:   *nodeMap = nodeProcs;
1635:   return(0);
1636: }

1638: /*
1639:   PartitionCreateNodePartition_Private - This function uses the node map to create
1640:   partition structures for node-based data.

1642:   Input Parameters:
1643: + p               - The Partition
1644: - nodeMap         - The map from nodes to domains

1646:   Output Parameter:
1647: . ordering        - The new node ordering

1649:   Output Parameters in Partition_Triangular_2D:
1650: + numLocNodes     - The number of local nodes
1651: . numOverlapNodes - The number of local + ghost nodes
1652: . firstNode       - The first node in each domain
1653: - ghostNodes      - The global number for each ghost node

1655: .seealso: PartitionNodes_Private()
1656: */
1657: int PartitionCreateNodePartition_Private(Partition p, int *nodeMap, AO *ordering)
1658: {
1659:   Partition_Triangular_2D *q        = (Partition_Triangular_2D *) p->data;
1660:   int                      numNodes = q->numNodes;
1661:   int                      numProcs = p->numProcs;
1662:   int                      rank     = p->rank;
1663:   int                      numGhostNodes; /* Number of ghost nodes for this domain */
1664:   int                     *AppOrdering, *PetscOrdering;
1665:   int                      proc, node, index, index2;
1666:   int                      ierr;

1669:   /* Determine local and ghost sizes */
1670:   for(node = 0, q->numLocNodes = 0, numGhostNodes = 0; node < numNodes; node++) {
1671:     if (nodeMap[node] == rank) {
1672:       q->numLocNodes++;
1673:     } else if (nodeMap[node] >= 0) {
1674:       numGhostNodes++;
1675:     }
1676:   }

1678:   /* Recompute size and offset of each domain */
1679:   MPI_Allgather(&q->numLocNodes, 1, MPI_INT, &q->firstNode[1], 1, MPI_INT, p->comm);
1680:   for(proc = 1, q->firstNode[0] = 0; proc <= numProcs; proc++) {
1681:     q->firstNode[proc] = q->firstNode[proc] + q->firstNode[proc-1];
1682:   }
1683:   if (q->firstNode[numProcs] != numNodes) {
1684:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of nodes %d should be %d", q->firstNode[numProcs], numNodes);
1685:   }

1687:   /* Setup ghost node structures */
1688:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;
1689:   if (numGhostNodes > 0) {
1690:     PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
1691:   }

1693:   /* Get indices for reordering */
1694:   PetscMalloc(q->numLocNodes * sizeof(int), &AppOrdering);
1695:   PetscMalloc(q->numLocNodes * sizeof(int), &PetscOrdering);
1696:   for(node = 0; node < q->numLocNodes; node++) {
1697:     /* This would be the time to do RCM on the local graph by reordering PetscOrdering[] */
1698:     PetscOrdering[node] = q->firstNode[rank] + node;
1699:   }
1700:   for(node = 0, index = 0, index2 = 0; node < numNodes; node++) {
1701:     if (nodeMap[node] == rank) {
1702:       AppOrdering[index++]    = node;
1703:     } else if (nodeMap[node] >= 0) {
1704:       q->ghostNodes[index2++] = node;
1705:     }
1706:   }
1707:   if (index  != q->numLocNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node renumbering");
1708:   if (index2 != numGhostNodes)  SETERRQ(PETSC_ERR_PLIB, "Invalid ghost node renumbering");

1710:   /* Create the global node reordering */
1711:   AOCreateBasic(p->comm, q->numLocNodes, AppOrdering, PetscOrdering, ordering);
1712:   if (ierr) {
1713:     PartitionDebugAO_Private(p, nodeMap);
1714:   }

1716:   PetscFree(AppOrdering);
1717:   PetscFree(PetscOrdering);
1718:   return(0);
1719: }

1721: /*
1722:   PartitionPermuteNodes_Private - This function permutes the data which is implicitly
1723:   indexed by node number

1725:   Input Parameter:
1726: . p       - The Partition

1728:   Output Parameter in Mesh_Triangular:
1729: + nodes   - The coordinates on each node
1730: . markers - The node markers
1731: - degrees - The degree of each node

1733: .seealso: PartitionNodes_Private()
1734: */
1735: int PartitionPermuteNodes_Private(Partition p)
1736: {
1737:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
1738:   Mesh                     mesh = p->mesh;
1739:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
1740:   int                      ierr;

1743:   AOApplicationToPetscPermuteReal(q->nodeOrdering, mesh->dim, tri->nodes);
1744:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->markers);
1745:   AOApplicationToPetscPermuteInt(q->nodeOrdering,    1,         tri->degrees);
1746:   return(0);
1747: }

1749: /*
1750:   PartitionRenumberNodes_Private - This function renumbers the node-based data globally in
1751:   order to make the canonical numbers sequential in each domain.

1753:   Input Parameter:
1754: . p          - The Partition

1756:   Output Parameters in Mesh_Triangular:
1757: + faces      - The nodes in each element
1758: . edges      - The nodes on each edge
1759: - bdNodes    - The nodes on each boundary

1761:   Output Parameter in Partition_Triangular_2D:
1762: . ghostNodes - The global number of each ghost node

1764: .seealso: PartitionNodes_Private()
1765: */
1766: int PartitionRenumberNodes_Private(Partition p)
1767: {
1768:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1769:   Mesh                     mesh          = p->mesh;
1770:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
1771:   int                      numElements   = p->numElements;
1772:   int                      numCorners    = mesh->numCorners;
1773:   int                     *faces         = tri->faces;
1774:   int                      numEdges      = q->numEdges;
1775:   int                     *edges         = tri->edges;
1776:   int                      numBdNodes    = q->numBdNodes;
1777:   int                     *bdNodes       = tri->bdNodes;
1778:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1779:   int                     *ghostNodes    = q->ghostNodes;
1780:   int                      ierr;

1783:   AOApplicationToPetsc(q->nodeOrdering, numEdges*2,             edges);
1784:   AOApplicationToPetsc(q->nodeOrdering, numElements*numCorners, faces);
1785:   AOApplicationToPetsc(q->nodeOrdering, numBdNodes,             bdNodes);
1786:   AOApplicationToPetsc(q->nodeOrdering, numGhostNodes,          ghostNodes);
1787:   return(0);
1788: }

1790: /*
1791:   PartitionDistributeNodes_Private - This function distributes the node-based data, and
1792:   permutes arrays which are implicitly indexed by node number.

1794:   Input Parameter:
1795: . p       - The Partition

1797:   Output Parameters in Mesh_Triangular:
1798: + nodes   - The node coordinates
1799: . markers - The node markers
1800: - degrees - The node degrees

1802: .seealso: PartitionNodes_Private()
1803: */
1804: int PartitionDistributeNodes_Private(Partition p)
1805: {
1806:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) p->data;
1807:   Mesh                     mesh            = p->mesh;
1808:   Mesh_Triangular         *tri             = (Mesh_Triangular *) mesh->data;
1809:   int                      dim             = mesh->dim;
1810:   int                      numLocNodes     = q->numLocNodes;
1811:   int                      numOverlapNodes = q->numOverlapNodes;
1812:   int                      numGhostNodes   = numOverlapNodes - numLocNodes;
1813:   int                     *firstNode       = q->firstNode;
1814:   int                     *ghostNodes      = q->ghostNodes;
1815:   int                      rank            = p->rank;
1816:   int                     *temp;
1817:   double                  *temp2;
1818:   int                      node, c;
1819:   int                      ierr;

1822:   mesh->numNodes = numLocNodes;
1823:   PetscMalloc(numOverlapNodes*dim * sizeof(double), &temp2);
1824:   /* Interior nodes */
1825:   PetscMemcpy(temp2, &tri->nodes[firstNode[rank]*dim], numLocNodes*dim * sizeof(double));
1826:   /* Ghost nodes */
1827:   for(node = 0; node < numGhostNodes; node++) {
1828:     for(c = 0; c < dim; c++) {
1829:       temp2[(numLocNodes+node)*dim+c] = tri->nodes[ghostNodes[node]*dim+c];
1830:     }
1831:   }
1832:   PetscFree(tri->nodes);
1833:   tri->nodes = temp2;
1834:   PetscLogObjectMemory(p, numGhostNodes*dim * sizeof(double));

1836:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1837:   /* Interior markers */
1838:   PetscMemcpy(temp, &tri->markers[firstNode[rank]], numLocNodes * sizeof(int));
1839:   /* Ghost markers */
1840:   for(node = 0; node < numGhostNodes; node++) {
1841:     temp[numLocNodes+node] = tri->markers[ghostNodes[node]];
1842:   }
1843:   PetscFree(tri->markers);
1844:   tri->markers = temp;
1845:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1847:   PetscMalloc(numOverlapNodes * sizeof(int), &temp);
1848:   /* Interior degrees */
1849:   PetscMemcpy(temp, &tri->degrees[firstNode[rank]], numLocNodes * sizeof(int));
1850:   /* Ghost degrees */
1851:   for(node = 0; node < numGhostNodes; node++) {
1852:     temp[numLocNodes+node] = tri->degrees[ghostNodes[node]];
1853:   }
1854:   PetscFree(tri->degrees);
1855:   tri->degrees = temp;
1856:   PetscLogObjectMemory(p, numGhostNodes * sizeof(int));

1858:   return(0);
1859: }

1861: /*
1862:   PartitionNodeCalcGhostProcs_Private - This function determines the processor from
1863:   which each ghost node comes.

1865:   Input Parameter:
1866: . p              - The Partition

1868:   Output Parameter in Partition_Triangular_2D:
1869: . ghostNodeProcs - The domain of each ghost node

1871: .seealso: PartitionNodes_Private()
1872: */
1873: int PartitionNodeCalcGhostProcs_Private(Partition p)
1874: {
1875:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
1876:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
1877:   int                     *ghostNodes    = q->ghostNodes;
1878:   int                     *firstNode     = q->firstNode;
1879:   int                      numProcs      = p->numProcs;
1880:   int                     *nodePerm;
1881:   int                      proc, node;
1882:   int                      ierr;

1885:   if (numGhostNodes == 0)
1886:     return(0);

1888:   /* Resort ghost nodes after renumbering */
1889:   PartitionSortGhosts_Private(p, &numGhostNodes, ghostNodes, &nodePerm);
1890:   PetscFree(nodePerm);
1891:   q->numOverlapNodes = q->numLocNodes + numGhostNodes;

1893:   /* calculate ghost node domains */
1894:   PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
1895:   for(node = 0; node < numGhostNodes; node++) {
1896:     for(proc = 0; proc < numProcs; proc++) {
1897:       if ((ghostNodes[node] >= firstNode[proc]) && (ghostNodes[node] <  firstNode[proc+1])) {
1898:         q->ghostNodeProcs[node] = proc;
1899:         break;
1900:       }
1901:     }
1902:     if (proc == numProcs) SETERRQ2(PETSC_ERR_PLIB, "Invalid ghost node %d, global number %d", node, ghostNodes[node]);
1903:   }
1904: #ifdef PETSC_USE_BOPT_g
1905:   for(node = 0; node < numGhostNodes; node++) {
1906:     if ((ghostNodes[node] < firstNode[q->ghostNodeProcs[node]]) || (ghostNodes[node] >= firstNode[q->ghostNodeProcs[node]+1]))
1907:       SETERRQ2(PETSC_ERR_LIB, "Invalid source processor %d on ghost node %d", q->ghostNodeProcs[node], node);
1908:   }
1909: #endif
1910:   return(0);
1911: }

1913: int PartitionNodes_Private(Partition p)
1914: {
1915:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
1916:   int                    (*f)(Partition, int, int **);
1917:   int                     *nodeMap; /* The map from nodes to domains */
1918:   int                      ierr;

1921:   /* Create a new map of nodes to domains */
1922:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap", (void (**)(void)) &f);
1923:   (*f)(p, q->numNodes, &nodeMap);

1925:   /* Create the node partition */
1926:   PartitionCreateNodePartition_Private(p, nodeMap, &q->nodeOrdering);
1927:   PetscFree(nodeMap);

1929:   /* Permute arrays implicitly indexed by node number */
1930:   PartitionPermuteNodes_Private(p);

1932:   /* Globally renumber the nodes to make canonical numbers sequential in each domain */
1933:   /* WARNING: We must resort ghost nodes after renumbering, but this is done anyway in edge partitioning */
1934:   PartitionRenumberNodes_Private(p);

1936:   /* Assign ghost node source processors */
1937:   PartitionNodeCalcGhostProcs_Private(p);

1939:   q->isNodePartitioned = PETSC_TRUE;
1940:   return(0);
1941: }

1943: /*
1944:   PartitionCreateEdgeMap_NodeBased - This function creates a map from edges to domains,
1945:   using a previous partition of the nodes.

1947:   Input Parameters:
1948: + p        - The Partition
1949: - numEdges - The global number of edges

1951:   Output Parameter:
1952: . edgeMap  - The map from edges to domains

1954: .seealso: PartitionEdges_Private()
1955: */
1956: int PartitionCreateEdgeMap_NodeBased(Partition p, int numEdges, int **edgeMap)
1957: {
1958:   Partition_Triangular_2D *q         = (Partition_Triangular_2D *) p->data;
1959:   Mesh                     mesh      = p->mesh;
1960:   Mesh_Triangular         *tri       = (Mesh_Triangular *) mesh->data;
1961:   int                     *edges     = tri->edges;
1962:   int                     *firstNode = q->firstNode;
1963:   int                      numProcs  = p->numProcs;
1964:   int                      rank      = p->rank;
1965:   int                      startProc = -1;
1966:   int                      endProc   = -1;
1967:   int                     *edgeProcs;     /* The processor assigned to each edge */
1968:   int                      proc, edge, startNode, endNode;
1969:   int                      ierr;

1972:   PetscMalloc(numEdges * sizeof(int), &edgeProcs);
1973:   for(edge = 0; edge < numEdges; edge++) {
1974:     edgeProcs[edge] = -1;
1975:   }

1977:   /* Count edges on this partition -- keep edge if you are the lower numbered domain */
1978:   for(edge = 0; edge < numEdges; edge++) {
1979:     startNode = edges[edge*2];
1980:     endNode   = edges[edge*2+1];

1982:     if ((startNode >= firstNode[rank]) && (startNode < firstNode[rank+1])) {
1983:       /* startNode is local */
1984:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
1985:         /* endNode is local */
1986:         edgeProcs[edge] = rank;
1987:       } else {
1988:         /* endNode is not local */
1989:         for(proc = 0; proc < numProcs; proc++) {
1990:           if ((endNode >= firstNode[proc]) && (endNode < firstNode[proc+1])) {
1991:             endProc = proc;
1992:             break;
1993:           }
1994:         }
1995:         if (rank < endProc) {
1996:           edgeProcs[edge] = rank;
1997:         }
1998:       }
1999:     } else {
2000:       /* startNode is not local */
2001:       if ((endNode >= firstNode[rank]) && (endNode < firstNode[rank+1])) {
2002:         /* endNode is local */
2003:         for(proc = 0; proc < numProcs; proc++) {
2004:           if ((startNode >= firstNode[proc]) && (startNode < firstNode[proc+1])) {
2005:             startProc = proc;
2006:             break;
2007:           }
2008:         }
2009:         if (rank < startProc) {
2010:           edgeProcs[edge] = rank;
2011:         }
2012:       }
2013:     }
2014:   }

2016:   *edgeMap = edgeProcs;
2017:   return(0);
2018: }

2020: int PartitionCreateEdgePartition_Private(Partition p, int *edgeMap, AO *ordering)
2021: {
2022:   Mesh                     mesh          = p->mesh;
2023:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2024:   int                      numEdges      = mesh->numEdges;
2025:   int                      numProcs      = p->numProcs;
2026:   int                      rank          = p->rank;
2027:   int                     *AppOrdering   = PETSC_NULL;
2028:   int                     *PetscOrdering = PETSC_NULL;
2029:   int                      proc, edge, index;
2030:   int                      ierr;

2033:   /* Determine local edges and new ghost nodes */
2034:   for(edge = 0, q->numLocEdges = 0; edge < numEdges; edge++) {
2035:     if (edgeMap[edge] == rank) {
2036:       q->numLocEdges++;
2037:     }
2038:   }

2040:   /* Recompute size and offset of each domain */
2041:   MPI_Allgather(&q->numLocEdges, 1, MPI_INT, &q->firstEdge[1], 1, MPI_INT, p->comm);
2042:   for(proc = 1, q->firstEdge[0] = 0; proc <= numProcs; proc++)
2043:     q->firstEdge[proc] = q->firstEdge[proc] + q->firstEdge[proc-1];
2044:   if (q->firstEdge[numProcs] != q->numEdges) {
2045:     SETERRQ2(PETSC_ERR_PLIB, "Invalid global number of edges %d should be %d", q->firstEdge[numProcs], q->numEdges);
2046:   }

2048:   /* Get indices for reordering */
2049:   if (q->numLocEdges > 0) {
2050:     PetscMalloc(q->numLocEdges * sizeof(int), &AppOrdering);
2051:     PetscMalloc(q->numLocEdges * sizeof(int), &PetscOrdering);
2052:   }
2053:   for(edge = 0; edge < q->numLocEdges; edge++) {
2054:     PetscOrdering[edge] = q->firstEdge[rank] + edge;
2055:   }
2056:   for(edge = 0, index = 0; edge < numEdges; edge++) {
2057:     if (edgeMap[edge] == rank) {
2058:       AppOrdering[index++] = edge;
2059:     }
2060:   }
2061:   if (index != q->numLocEdges) {
2062:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of local edges %d should be %d", index, q->numLocEdges);
2063:   }

2065:   /* Create the global edge reordering */
2066:   AOCreateBasic(p->comm, q->numLocEdges, AppOrdering, PetscOrdering, ordering);

2068:   if (q->numLocEdges > 0) {
2069:     PetscFree(AppOrdering);
2070:     PetscFree(PetscOrdering);
2071:   }
2072:   return(0);
2073: }

2075: /*
2076:   PartitionDistributeEdges_Private - This function distributes the edge-based data, and
2077:   permutes arrays which are implicitly indexed by edge number.

2079:   Input Parameter:
2080: . p           - The Partition

2082:   Output Parameters in Mesh_Triangular:
2083: + edges       - The nodes on each edge
2084: - edgemarkers - The edge markers

2086: .seealso: PartitionEdges_Private()
2087: */
2088: int PartitionDistributeEdges_Private(Partition p) {
2089:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2090:   Mesh                     mesh        = p->mesh;
2091:   Mesh_Triangular         *tri         = (Mesh_Triangular *) mesh->data;
2092:   int                      numLocEdges = q->numLocEdges;
2093:   int                     *firstEdge   = q->firstEdge;
2094:   int                      rank        = p->rank;
2095:   int                     *temp        = PETSC_NULL;
2096:   int                      ierr;

2099:   mesh->numEdges = numLocEdges;
2100:   if (numLocEdges > 0) {
2101:     PetscMalloc(numLocEdges*2 * sizeof(int), &temp);
2102:     PetscMemcpy(temp, &tri->edges[firstEdge[rank]*2], numLocEdges*2 * sizeof(int));
2103:     PetscFree(tri->edges);
2104:   }
2105:   tri->edges = temp;

2107:   if (numLocEdges > 0) {
2108:     PetscMalloc(numLocEdges * sizeof(int), &temp);
2109:     PetscMemcpy(temp, &tri->edgemarkers[firstEdge[rank]], numLocEdges * sizeof(int));
2110:     PetscFree(tri->edgemarkers);
2111:   }
2112:   tri->edgemarkers = temp;

2114:   return(0);
2115: }

2117: /*
2118:   PartitionPermuteEdges_Private - This function permutes the data which is implicitly
2119:   indexed by edge number

2121:   Input Parameter:
2122: . p           - The Partition

2124:   Output Parameter in Mesh_Triangular:
2125: + edges       - The nodes on each edge
2126: - edgemarkers - The edge markers

2128: .seealso: PartitionEdges_Private()
2129: */
2130: int PartitionPermuteEdges_Private(Partition p)
2131: {
2132:   Partition_Triangular_2D *q   = (Partition_Triangular_2D *) p->data;
2133:   Mesh_Triangular         *tri = (Mesh_Triangular *) p->mesh->data;
2134:   int                      ierr;

2137:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 2, tri->edges);
2138:   AOApplicationToPetscPermuteInt(q->edgeOrdering, 1, tri->edgemarkers);
2139:   return(0);
2140: }

2142: /*
2143:   PartitionRenumberEdges_Private - This function renumbers the edge-based data globally in
2144:   order to make the canonical numbers sequential in each domain.

2146:   Input Parameter:
2147: . p       - The Partition

2149:   Output Parameter in Mesh_Triangular:
2150: . bdEdges - The edges on each boundary

2152: .seealso: PartitionEdges_Private()
2153: */
2154: int PartitionRenumberEdges_Private(Partition p)
2155: {
2156:   Partition_Triangular_2D *q          = (Partition_Triangular_2D *) p->data;
2157:   Mesh                     mesh       = p->mesh;
2158:   Mesh_Triangular         *tri        = (Mesh_Triangular *) mesh->data;
2159:   int                      numBdEdges = mesh->numBdEdges;
2160:   int                     *bdEdges    = tri->bdEdges;
2161:   int                      ierr;

2164:   AOApplicationToPetsc(q->edgeOrdering, numBdEdges, bdEdges);
2165:   return(0);
2166: }

2168: int PartitionEdgeGlobalToLocal_Private(Partition p)
2169: {
2170:   Partition_Triangular_2D *q           = (Partition_Triangular_2D *) p->data;
2171:   Mesh_Triangular         *tri         = (Mesh_Triangular *) p->mesh->data;
2172:   int                      numLocEdges = q->numLocEdges;
2173:   int                     *edges       = tri->edges;
2174:   int                      node;
2175:   int                      ierr;

2178:   for(node = 0; node < numLocEdges*2; node++) {
2179:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2180:   }
2181:   return(0);
2182: }

2184: int PartitionEdges_Private(Partition p)
2185: {
2186:   Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
2187:   int                    (*f)(Partition, int, int **);
2188:   int                     *edgeMap; /* The map from edges to domains */
2189:   int                      ierr;

2192:   /* Create a new map of nodes to domains */
2193:   PetscObjectQueryFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap", (void (**)(void)) &f);
2194:   (*f)(p, q->numEdges, &edgeMap);

2196:   /* Create the edge partition */
2197:   PartitionCreateEdgePartition_Private(p, edgeMap, &q->edgeOrdering);
2198:   PetscFree(edgeMap);

2200:   /* Permute arrays implicitly indexed by edge number */
2201:   PartitionPermuteEdges_Private(p);

2203:   /* Globally renumber the edges to make canonical numbers sequential in each domain */
2204:   PartitionRenumberEdges_Private(p);

2206:   /* Check for new ghost nodes created by the element partition */
2207:   PartitionGetNewGhostNodes_Edge(p);

2209:   q->isEdgePartitioned = PETSC_TRUE;
2210:   return(0);
2211: }

2213: int PartitionBoundaryNodes_Private(Partition p)
2214: {
2215:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2216:   Mesh                     mesh          = p->mesh;
2217:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
2218:   int                      numBdNodes    = mesh->numBdNodes;
2219:   int                     *bdNodes       = tri->bdNodes;
2220:   int                     *firstNode     = q->firstNode;
2221:   int                      numProcs      = p->numProcs;
2222:   int                      rank          = p->rank;
2223:   int                      proc, node;
2224:   int                      ierr;

2227:   q->numLocBdNodes = 0;
2228:   for(node = 0; node < numBdNodes; node++) {
2229:     if ((bdNodes[node] >= firstNode[rank]) && (bdNodes[node] < firstNode[rank+1]))
2230:       q->numLocBdNodes++;
2231:   }
2232:   MPI_Allgather(&q->numLocBdNodes, 1, MPI_INT, &q->firstBdNode[1], 1, MPI_INT, p->comm);
2233:   q->firstBdNode[0] = 0;
2234:   for(proc = 1; proc <= numProcs; proc++) {
2235:     q->firstBdNode[proc] = q->firstBdNode[proc] + q->firstBdNode[proc-1];
2236:   }
2237:   if (q->firstBdNode[numProcs] != q->numBdNodes) {
2238:     SETERRQ2(PETSC_ERR_PLIB, "Invalid number of boundary nodes %d should be %d", q->firstBdNode[numProcs], q->numBdNodes);
2239:   }
2240:   return(0);
2241: }

2243: /*
2244:   PartitionDistributeBdNodes_Private - This function distributes the edge-based data, and
2245:   permutes arrays which are implicitly indexed by edge number.

2247:   Input Parameter:
2248: . p           - The Partition

2250:   Output Parameters in Mesh_Triangular:
2251: + edges       - The nodes on each edge
2252: - edgemarkers - The edge markers

2254: .seealso: PartitionBdNodes_Private()
2255: */
2256: int PartitionDistributeBdNodes_Private(Partition p)
2257: {
2258:   Partition_Triangular_2D *q             = (Partition_Triangular_2D *) p->data;
2259:   Mesh_Triangular         *tri           = (Mesh_Triangular *) p->mesh->data;
2260:   int                      numLocNodes   = q->numLocNodes;
2261:   int                      numGhostNodes = q->numOverlapNodes - q->numLocNodes;
2262:   int                     *markers       = tri->markers;
2263:   int                      numLocBdNodes = q->numLocBdNodes;
2264:   int                      node, bdNode;
2265:   int                      ierr;

2268:   /* Process ghost boundary nodes */
2269:   q->numOverlapBdNodes = numLocBdNodes;
2270:   for(node = 0; node < numGhostNodes; node++) {
2271:     if (markers[numLocNodes+node] != 0)
2272:       q->numOverlapBdNodes++;
2273:   }
2274:   if (q->numOverlapBdNodes > numLocBdNodes) {
2275:     PetscMalloc((q->numOverlapBdNodes - numLocBdNodes) * sizeof(int), &q->ghostBdNodes);
2276:     for(node = 0, bdNode = 0; node < numGhostNodes; node++) {
2277:       if (markers[numLocNodes+node] != 0)
2278:         q->ghostBdNodes[bdNode++] = node;
2279:     }
2280:   }
2281:   return(0);
2282: }

2284: int PartitionDistribute_Private(Partition p)
2285: {

2289:   /* Redistribute the elements and arrays implicitly numbered by element numbers */
2290:   PartitionDistributeElements_Private(p);

2292:   /* Redistribute the nodes and permute arrays implicitly numbered by node numbers */
2293:   PartitionDistributeNodes_Private(p);

2295:   /* Redistribute the edges and permute arrays implicitly numbered by edge numbers */
2296:   PartitionDistributeEdges_Private(p);

2298:   /* Store ghost boundary nodes */
2299:   PartitionDistributeBdNodes_Private(p);
2300:   return(0);
2301: }

2303: int PartitionGlobalToLocal_Private(Partition p)
2304: {
2305:   Partition_Triangular_2D *q                  = (Partition_Triangular_2D *) p->data;
2306:   Mesh                     mesh               = p->mesh;
2307:   Mesh_Triangular         *tri                = (Mesh_Triangular *) mesh->data;
2308:   int                      numOverlapElements = p->numOverlapElements;
2309:   int                      numCorners         = mesh->numCorners;
2310:   int                     *faces              = tri->faces;
2311:   int                     *neighbors          = tri->neighbors;
2312:   int                      numLocEdges        = q->numLocEdges;
2313:   int                     *edges              = tri->edges;
2314:   int                      corner, neighbor, node;
2315:   int                      ierr;

2318:   for(corner = 0; corner < numOverlapElements*numCorners; corner++) {
2319:     PartitionGlobalToLocalNodeIndex(p, faces[corner], &faces[corner]);
2320:   }
2321:   /* We indicate neighbors which are not interior or ghost by -2 since boundaries are -1 */
2322:   for(neighbor = 0; neighbor < numOverlapElements*3; neighbor++) {
2323:     PartitionGlobalToLocalElementIndex(p, neighbors[neighbor], &neighbors[neighbor]);
2324:   }
2325:   for(node = 0; node < numLocEdges*2; node++) {
2326:     PartitionGlobalToLocalNodeIndex(p, edges[node], &edges[node]);
2327:   }
2328:   return(0);
2329: }

2331: /*@
2332:   PartitionCreateTriangular2D - Creates a partition of a two dimensional unstructured grid.

2334:   Collective on Mesh

2336:   Input Parameters:
2337: . mesh      - The mesh to be partitioned

2339:   Output Paramter:
2340: . partition - The partition

2342:   Level: beginner

2344: .keywords unstructured mesh, partition
2345: .seealso MeshCreateTriangular2D
2346: @*/
2347: int PartitionCreateTriangular2D(Mesh mesh, Partition *part)
2348: {
2349:   int        numProcs;
2350:   PetscTruth opt;
2351:   int        ierr;

2354:   MPI_Comm_size(mesh->comm, &numProcs);
2355:   PartitionCreate(mesh, part);
2356:   if (numProcs == 1) {
2357:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_Uni",
2358:                                       (void (*)(void)) PartitionCreate_Uni);
2359: 
2360:   } else {
2361:     PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_ElementBased",
2362:                                       (void (*)(void)) PartitionCreate_ElementBased);
2363: 
2364:     PetscOptionsHasName(mesh->prefix, "-part_node_based", &opt);
2365:     if (opt == PETSC_TRUE) {
2366:       PetscObjectComposeFunction((PetscObject) *part, "PartitionTriangular2D_Create_C", "PartitionCreate_NodeBased",
2367:                                         (void (*)(void)) PartitionCreate_NodeBased);
2368: 
2369:     }
2370:   }
2371:   PartitionSetType(*part, PARTITION_TRIANGULAR_2D);

2373:   PartitionViewFromOptions_Private(*part, "Partition");
2374:   return(0);
2375: }

2377: EXTERN_C_BEGIN
2378: int PartitionSerialize_Triangular_2D(Mesh mesh, Partition *part, PetscViewer viewer, PetscTruth store)
2379: {
2380:   Partition                p;
2381:   Partition_Triangular_2D *q;
2382:   int                      fd;
2383:   int                      numGhostElements, numGhostNodes, numGhostBdNodes, hasOrdering;
2384:   int                      numProcs, rank;
2385:   int                      one  = 1;
2386:   int                      zero = 0;
2387:   int                      ierr;

2390:   PetscViewerBinaryGetDescriptor(viewer, &fd);
2391:   if (store == PETSC_TRUE) {
2392:     p = *part;
2393:     numProcs = p->numProcs;
2394:     numGhostElements = p->numOverlapElements - p->numLocElements;
2395:     PetscBinaryWrite(fd, &p->numProcs,           1,                PETSC_INT, 0);
2396:     PetscBinaryWrite(fd, &p->rank,               1,                PETSC_INT, 0);
2397:     PetscBinaryWrite(fd, &p->numLocElements,     1,                PETSC_INT, 0);
2398:     PetscBinaryWrite(fd, &p->numElements,        1,                PETSC_INT, 0);
2399:     PetscBinaryWrite(fd, &p->numOverlapElements, 1,                PETSC_INT, 0);
2400:     PetscBinaryWrite(fd,  p->firstElement,       numProcs+1,       PETSC_INT, 0);
2401:     PetscBinaryWrite(fd,  p->ghostElements,      numGhostElements, PETSC_INT, 0);
2402:     PetscBinaryWrite(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT, 0);
2403:     if (p->ordering != PETSC_NULL) {
2404:       PetscBinaryWrite(fd, &one,                 1,                PETSC_INT, 0);
2405:       AOSerialize(p->comm, &p->ordering, viewer, store);
2406:     } else {
2407:       PetscBinaryWrite(fd, &zero,                1,                PETSC_INT, 0);
2408:     }

2410:     q    = (Partition_Triangular_2D *) (*part)->data;
2411:     numGhostNodes   = q->numOverlapNodes   - q->numLocNodes;
2412:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2413:     PetscBinaryWrite(fd, &q->numLocNodes,        1,                PETSC_INT, 0);
2414:     PetscBinaryWrite(fd, &q->numNodes,           1,                PETSC_INT, 0);
2415:     PetscBinaryWrite(fd, &q->numOverlapNodes,    1,                PETSC_INT, 0);
2416:     PetscBinaryWrite(fd,  q->firstNode,          numProcs+1,       PETSC_INT, 0);
2417:     PetscBinaryWrite(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT, 0);
2418:     PetscBinaryWrite(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT, 0);
2419:     PetscBinaryWrite(fd, &q->numLocEdges,        1,                PETSC_INT, 0);
2420:     PetscBinaryWrite(fd, &q->numEdges,           1,                PETSC_INT, 0);
2421:     PetscBinaryWrite(fd,  q->firstEdge,          numProcs+1,       PETSC_INT, 0);
2422:     PetscBinaryWrite(fd, &q->numLocBdNodes,      1,                PETSC_INT, 0);
2423:     PetscBinaryWrite(fd, &q->numBdNodes,         1,                PETSC_INT, 0);
2424:     PetscBinaryWrite(fd, &q->numOverlapBdNodes,  1,                PETSC_INT, 0);
2425:     PetscBinaryWrite(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT, 0);
2426:     PetscBinaryWrite(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT, 0);
2427:   } else {
2428:     /* Create the partition context */
2429:     PartitionCreate(mesh, &p);
2430:     PetscNew(Partition_Triangular_2D, &q);
2431:     PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2432:     PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2433:     PetscStrallocpy(PARTITION_TRIANGULAR_2D, &p->type_name);
2434:     p->data = (void *) q;

2436:     MPI_Comm_size(p->comm, &numProcs);
2437:     MPI_Comm_rank(p->comm, &rank);
2438:     PetscBinaryRead(fd, &p->numProcs,           1,                PETSC_INT);
2439:     PetscBinaryRead(fd, &p->rank,               1,                PETSC_INT);
2440:     if (p->numProcs != numProcs) {
2441:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid number of processors %d should be %d", numProcs, p->numProcs);
2442:     }
2443:     if (p->rank != rank) {
2444:       SETERRQ2(PETSC_ERR_FILE_UNEXPECTED, "Invalid processor rank %d should be %d", rank, p->rank);
2445:     }
2446:     PetscBinaryRead(fd, &p->numLocElements,     1,                PETSC_INT);
2447:     PetscBinaryRead(fd, &p->numElements,        1,                PETSC_INT);
2448:     PetscBinaryRead(fd, &p->numOverlapElements, 1,                PETSC_INT);
2449:     PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2450:     PetscBinaryRead(fd,  p->firstElement,       numProcs+1,       PETSC_INT);
2451:     numGhostElements = p->numOverlapElements - p->numLocElements;
2452:     if (numGhostElements > 0) {
2453:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElements);
2454:       PetscMalloc(numGhostElements * sizeof(int), &p->ghostElementProcs);
2455:     }
2456:     PetscBinaryRead(fd,  p->ghostElements,      numGhostElements, PETSC_INT);
2457:     PetscBinaryRead(fd,  p->ghostElementProcs,  numGhostElements, PETSC_INT);
2458:     PetscBinaryRead(fd, &hasOrdering,           1,                PETSC_INT);
2459:     if (hasOrdering) {
2460:       AOSerialize(p->comm, &p->ordering, viewer, store);
2461:     }

2463:     q->ghostNodes        = PETSC_NULL;
2464:     q->ghostNodeProcs    = PETSC_NULL;
2465:     q->ghostBdNodes      = PETSC_NULL;
2466:     q->nodeOrdering      = PETSC_NULL;
2467:     q->edgeOrdering      = PETSC_NULL;

2469:     PetscBinaryRead(fd, &q->numLocNodes,        1,                PETSC_INT);
2470:     PetscBinaryRead(fd, &q->numNodes,           1,                PETSC_INT);
2471:     PetscBinaryRead(fd, &q->numOverlapNodes,    1,                PETSC_INT);
2472:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2473:     PetscBinaryRead(fd,  q->firstNode,          numProcs+1,       PETSC_INT);
2474:     numGhostNodes   = q->numOverlapNodes - q->numLocNodes;
2475:     if (numGhostNodes > 0) {
2476:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodes);
2477:       PetscMalloc(numGhostNodes * sizeof(int), &q->ghostNodeProcs);
2478:     }
2479:     PetscBinaryRead(fd,  q->ghostNodes,         numGhostNodes,    PETSC_INT);
2480:     PetscBinaryRead(fd,  q->ghostNodeProcs,     numGhostNodes,    PETSC_INT);
2481:     PetscBinaryRead(fd, &q->numLocEdges,        1,                PETSC_INT);
2482:     PetscBinaryRead(fd, &q->numEdges,           1,                PETSC_INT);
2483:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2484:     PetscBinaryRead(fd,  q->firstEdge,          numProcs+1,       PETSC_INT);
2485:     PetscBinaryRead(fd, &q->numLocBdNodes,      1,                PETSC_INT);
2486:     PetscBinaryRead(fd, &q->numBdNodes,         1,                PETSC_INT);
2487:     PetscBinaryRead(fd, &q->numOverlapBdNodes,  1,                PETSC_INT);
2488:     PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2489:     PetscBinaryRead(fd,  q->firstBdNode,        numProcs+1,       PETSC_INT);
2490:     numGhostBdNodes = q->numOverlapBdNodes - q->numLocBdNodes;
2491:     if (numGhostBdNodes) {
2492:       PetscMalloc(numGhostBdNodes * sizeof(int), &q->ghostBdNodes);
2493:     }
2494:     PetscBinaryRead(fd,  q->ghostBdNodes,       numGhostBdNodes,  PETSC_INT);
2495:     PetscLogObjectMemory(p, ((numProcs+1)*4 + numGhostElements*2 + numGhostNodes*2 + numGhostBdNodes)* sizeof(int));
2496:     *part = p;
2497:   }
2498:   if (p->numProcs > 1) {
2499:     AOSerialize(p->comm, &q->nodeOrdering, viewer, store);
2500:     AOSerialize(p->comm, &q->edgeOrdering, viewer, store);
2501:   }

2503:   return(0);
2504: }
2505: EXTERN_C_END

2507: EXTERN_C_BEGIN
2508: int PartitionCreate_ElementBased(Partition p)
2509: {
2510: #ifdef PETSC_USE_BOPT_g
2511:   int cut; /* The number of edges of the dual crossing the partition */
2512: #endif


2517:   /* Partition elements */
2518:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2519:                                     "PartitionCreateElementMap_METIS", (void (*)(void)) PartitionCreateElementMap_METIS);
2520: 
2521:   PartitionElements_Private(p);

2523:   /* Partition the nodes */
2524:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2525:                                     "PartitionCreateNodeMap_ElementBased", (void (*)(void)) PartitionCreateNodeMap_ElementBased);
2526: 
2527:   PartitionNodes_Private(p);

2529:   /* Partition the edges -- Changes the ghost nodes */
2530:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2531:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2532: 
2533:   PartitionEdges_Private(p);

2535:   /* Partition boundary nodes */
2536:   PartitionBoundaryNodes_Private(p);

2538:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2539:   PartitionDistribute_Private(p);

2541:   /* Change to local node numbers */
2542:   PartitionGlobalToLocal_Private(p);

2544: #ifdef PETSC_USE_BOPT_g
2545:   /* Compute the size of the cut */
2546:   PartitionCalcCut_Private(p, &cut);
2547:   PetscLogInfo(p, "Size of cut: %dn", cut);
2548: #endif

2550:   return(0);
2551: }
2552: EXTERN_C_END

2554: EXTERN_C_BEGIN
2555: int PartitionCreate_Uni(Partition p)
2556: {
2557:   Partition_Triangular_2D *q    = (Partition_Triangular_2D *) p->data;
2558:   Mesh                     mesh = p->mesh;
2559:   Mesh_Triangular         *tri  = (Mesh_Triangular *) mesh->data;
2560:   PetscTruth               opt;
2561:   int                      ierr;

2564:   PetscOptionsHasName(p->prefix, "-part_mesh_reorder", &opt);
2565:   if (opt == PETSC_TRUE) {
2566:     MeshGetOrdering(mesh, MESH_ORDER_TRIANGULAR_2D_RCM, &q->nodeOrdering);
2567:     PetscLogObjectParent(p, q->nodeOrdering);

2569:     /* Permute arrays implicitly numbered by node numbers */
2570:     AOApplicationToPetscPermuteReal(q->nodeOrdering, 2, tri->nodes);
2571:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->markers);
2572:     AOApplicationToPetscPermuteInt(q->nodeOrdering, 1, tri->degrees);
2573:     /* Renumber arrays dependent on the canonical node numbering */
2574:     AOApplicationToPetsc(q->nodeOrdering, mesh->numEdges*2,                       tri->edges);
2575:     AOApplicationToPetsc(q->nodeOrdering, p->numOverlapElements*mesh->numCorners, tri->faces);
2576:     AOApplicationToPetsc(q->nodeOrdering, mesh->numBdNodes,                       tri->bdNodes);
2577:   }
2578:   return(0);
2579: }
2580: EXTERN_C_END

2582: EXTERN_C_BEGIN
2583: int PartitionCreate_NodeBased(Partition p) {
2584: #ifdef PETSC_USE_BOPT_g
2585:   int cut; /* The number of edges of the dual crossing the partition */
2586: #endif

2590:   /* Partition Nodes */
2591:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateNodeMap",
2592:                                     "PartitionCreateNodeMap_Simple_Seq", (void (*)(void)) PartitionCreateNodeMap_Simple_Seq);
2593: 
2594:   PartitionNodes_Private(p);

2596:   /* Partition elements */
2597:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateElementMap",
2598:                                     "PartitionCreateElementMap_NodeBased", (void (*)(void)) PartitionCreateElementMap_NodeBased);
2599: 
2600:   PartitionElements_Private(p);

2602:   /* Partition edges */
2603:   PetscObjectComposeFunction((PetscObject) p, "PartitionTriangular2D_CreateEdgeMap",
2604:                                     "PartitionCreateEdgeMap_NodeBased", (void (*)(void)) PartitionCreateEdgeMap_NodeBased);
2605: 
2606:   PartitionEdges_Private(p);

2608:   /* Partition boundary nodes */
2609:   PartitionBoundaryNodes_Private(p);

2611:   /* Redistribute structures and arrays implicitly numbered by canonical numbers */
2612:   PartitionDistribute_Private(p);

2614:   /* Change to local node numbers */
2615:   PartitionGlobalToLocal_Private(p);

2617: #ifdef PETSC_USE_BOPT_g
2618:   /* Compute the size of the cut */
2619:   PartitionCalcCut_Private(p, &cut);
2620:   PetscLogInfo(p, "Size of cut: %dn", cut);
2621: #endif

2623:   return(0);
2624: }
2625: EXTERN_C_END


2628: EXTERN_C_BEGIN
2629: int PartitionCreate_Triangular_2D(Partition p) {
2630:   Partition_Triangular_2D *q;
2631:   Mesh                     mesh = p->mesh;
2632:   int                    (*f)(Partition);
2633:   int                      numProcs, rank, rem;
2634:   int                      proc;
2635:   int                      ierr;

2638:   MPI_Comm_size(p->comm, &numProcs);
2639:   MPI_Comm_rank(p->comm, &rank);

2641:   PetscNew(Partition_Triangular_2D, &q);
2642:   PetscLogObjectMemory(p, sizeof(Partition_Triangular_2D));
2643:   PetscMemcpy(p->ops, &POps, sizeof(struct _PartitionOps));
2644:   p->data = (void *) q;
2645:   PetscStrallocpy(PARTITION_SER_TRIANGULAR_2D_BINARY, &p->serialize_name);
2646:   PetscLogObjectParent(mesh, p);

2648:   /* Initialize structure */
2649:   p->numProcs             = numProcs;
2650:   p->rank                 = rank;
2651:   p->isElementPartitioned = PETSC_FALSE;
2652:   p->ordering             = PETSC_NULL;
2653:   p->ghostElements        = PETSC_NULL;
2654:   p->ghostElementProcs    = PETSC_NULL;
2655:   q->isNodePartitioned    = PETSC_FALSE;
2656:   q->isEdgePartitioned    = PETSC_FALSE;
2657:   q->nodeOrdering         = PETSC_NULL;
2658:   q->ghostNodes           = PETSC_NULL;
2659:   q->ghostNodeProcs       = PETSC_NULL;
2660:   q->edgeOrdering         = PETSC_NULL;
2661:   q->ghostBdNodes         = PETSC_NULL;
2662:   PetscMalloc((numProcs+1) * sizeof(int), &p->firstElement);
2663:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstNode);
2664:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstBdNode);
2665:   PetscMalloc((numProcs+1) * sizeof(int), &q->firstEdge);
2666:   PetscLogObjectMemory(p, (numProcs+1)*4 * sizeof(int));
2667:   PetscMemzero(p->firstElement, (numProcs+1) * sizeof(int));
2668:   PetscMemzero(q->firstNode,    (numProcs+1) * sizeof(int));
2669:   PetscMemzero(q->firstBdNode,  (numProcs+1) * sizeof(int));
2670:   PetscMemzero(q->firstEdge,    (numProcs+1) * sizeof(int));

2672:   /* Setup crude preliminary partition */
2673:   for(proc = 0; proc < numProcs; proc++) {
2674:     rem                   = (mesh->numFaces%numProcs);
2675:     p->firstElement[proc] = (mesh->numFaces/numProcs)*proc + PetscMin(rem, proc);
2676:     rem                   = (mesh->numNodes%numProcs);
2677:     q->firstNode[proc]    = (mesh->numNodes/numProcs)*proc + PetscMin(rem, proc);
2678:     rem                   = (mesh->numEdges%numProcs);
2679:     q->firstEdge[proc]    = (mesh->numEdges/numProcs)*proc + PetscMin(rem, proc);
2680:     rem                   = (mesh->numBdNodes%numProcs);
2681:     q->firstBdNode[proc]  = (mesh->numBdNodes/numProcs)*proc + PetscMin(rem, proc);
2682:   }
2683:   p->firstElement[numProcs] = mesh->numFaces;
2684:   q->firstNode[numProcs]    = mesh->numNodes;
2685:   q->firstEdge[numProcs]    = mesh->numEdges;
2686:   q->firstBdNode[numProcs]  = mesh->numBdNodes;

2688:   p->numLocElements            = p->firstElement[rank+1] - p->firstElement[rank];
2689:   p->numElements               = p->firstElement[numProcs];
2690:   p->numOverlapElements        = p->numLocElements;
2691:   q->numLocNodes               = q->firstNode[rank+1] - q->firstNode[rank];
2692:   q->numNodes                  = q->firstNode[numProcs];
2693:   q->numOverlapNodes           = q->numLocNodes;
2694:   q->numLocEdges               = q->firstEdge[rank+1] - q->firstEdge[rank];
2695:   q->numEdges                  = q->firstEdge[numProcs];
2696:   q->numLocBdNodes             = q->firstBdNode[rank+1] - q->firstBdNode[rank];
2697:   q->numBdNodes                = q->firstBdNode[numProcs];
2698:   q->numOverlapBdNodes         = q->numLocBdNodes;

2700:   /* Partition the mesh */
2701:   PetscObjectQueryFunction((PetscObject) p,"PartitionTriangular2D_Create_C",(PetscVoidFunction) &f);
2702:   (*f)(p);

2704:   /* Recalculate derived quantites */
2705:   MeshTriangular2DCalcAreas(mesh, PETSC_FALSE);
2706:   MeshTriangular2DCalcAspectRatios(mesh, PETSC_FALSE);

2708:   return(0);
2709: }
2710: EXTERN_C_END