Actual source code: varorder2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: varorder2d.c,v 1.8 2000/01/30 18:31:33 huangp Exp $";
  3: #endif

  5: #include "src/grid/gridimpl.h"         /*I "grid.h" I*/
  6: #include "src/mesh/impls/triangular/2d/2dimpl.h"
  7: #include "varorder2d.h"

  9: int GridCreateVarOrdering_Triangular_2D(Grid grid, FieldClassMap map, VarOrdering *order)
 10: {
 11:   Partition_Triangular_2D *q               = (Partition_Triangular_2D *) grid->mesh->part->data;
 12:   PetscConstraintObject    constCtx        = grid->constraintCtx;
 13:   int                      numFields       = map->numFields;
 14:   int                     *fields          = map->fields;
 15:   int                      numNodes        = map->numNodes;
 16:   int                      numOverlapNodes = map->numOverlapNodes;
 17:   int                      numGhostNodes   = map->numGhostNodes;
 18:   int                      numClasses      = map->numClasses;
 19:   int                    **fieldClasses    = map->fieldClasses;
 20:   int                     *classes         = map->classes;
 21:   int                     *classSizes      = map->classSizes;
 22:   int                     *localOffsets;
 23:   int                      numNewVars;
 24:   VarOrdering              o;
 25:   /* Ghost variable communication */
 26:   int                     *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
 27:   int                     *sumSendVars;      /* Prefix sums of ghostSendVars */
 28:   int                     *ghostRecvVars;    /* Number of ghost variables on a given processor */
 29:   int                     *sumRecvVars;      /* Prefix sums of ghostRecvVars */
 30:   int                     *displs;           /* Offsets into ghostRecvVars */
 31:   int                      numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
 32:   int                     *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
 33:   int                      numProcs, rank;
 34:   int                      proc, f, field, comp, node, locNode, gNode, nclass, var;
 35:   int                      ierr;

 38:   /* Create the ordering */
 39:   PetscHeaderCreate(o, _VarOrdering, int, VAR_ORDER_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
 40:   PetscLogObjectCreate(o);
 41:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) map);

 43:   /* Allocate memory */
 44:   MPI_Comm_size(grid->comm, &numProcs);
 45:   MPI_Comm_rank(grid->comm, &rank);
 46:   GridGetNumFields(grid, &o->numTotalFields);
 47:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
 48:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
 49:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
 50:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses)*sizeof(int) + o->numTotalFields*sizeof(int *));
 51:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));
 52:   o->numLocNewVars = 0;
 53:   o->numNewVars    = 0;

 55:   /* Setup domain variable numbering */
 56:   o->offsets[0] = 0;
 57:   for(node = 1; node < numNodes; node++)
 58:     o->offsets[node] = o->offsets[node-1] + classSizes[classes[node-1]];
 59:   o->numLocVars = o->offsets[numNodes-1] + classSizes[classes[numNodes-1]];
 60:   if (map->isConstrained == PETSC_TRUE) {
 61:     (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
 62: 
 63:     o->numLocVars += o->numLocNewVars;
 64:   }
 65:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
 66:   o->firstVar[0] = 0;
 67:   for(proc = 1; proc <= numProcs; proc++)
 68:     o->firstVar[proc] += o->firstVar[proc-1];
 69:   o->numVars = o->firstVar[numProcs];
 70:   if (map->isConstrained == PETSC_TRUE) {
 71:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
 72: 
 73:     MPI_Allreduce(&o->numLocNewVars, &numNewVars, 1, MPI_INT, MPI_SUM, o->comm);
 74:     if (o->numNewVars != numNewVars)
 75:       SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
 76:   }

 78:   /* Initialize overlap size */
 79:   o->numOverlapVars    = o->numLocVars;
 80:   o->numOverlapNewVars = o->numLocNewVars;

 82:   if (numProcs > 1) {
 83:     /* Map local to global variable numbers */
 84:     for(node = 0; node < numNodes; node++)
 85:       o->offsets[node] += o->firstVar[rank];

 87: #if 0
 88:     GridGhostExchange(o->comm, numGhostNodes, q->ghostNodeProcs, q->ghostNodes, PETSC_INT,
 89:                              q->firstNode, INSERT_VALUES, SCATTER_FORWARD, o->offsets, &o->offsets[numNodes]);
 90: #else
 91:     /* Initialize communication */
 92:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
 93:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
 94:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
 95:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
 96:     PetscMalloc(numProcs * sizeof(int), &displs);
 97:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
 98:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
 99:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
100:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
101:     PetscMemzero(displs,        numProcs * sizeof(int));

103:     /* Get number of ghost variables to receive from each processor and size of blocks --
104:          we here assume that classes[] already has ghost node classes in it */
105:     for(node = 0; node < numGhostNodes; node++) {
106:       gNode  = q->ghostNodes[node];
107:       proc   = q->ghostNodeProcs[node];
108:       nclass = classes[numNodes+node];
109:       ghostRecvVars[proc]++;
110:       o->numOverlapVars += classSizes[nclass];
111:     }

113:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
114:     if (map->isConstrained == PETSC_TRUE) {
115:       (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
116: 
117:     }
118:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

120:     /* Get sizes of ghost variable blocks to send to each processor */
121:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

123:     /* Calculate offets into the ghost variable receive array */
124:     for(proc = 1; proc < numProcs; proc++) {
125:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
126:       displs[proc]      = sumRecvVars[proc];
127:     }

129:     /* Calculate offsets into the ghost variable send array */
130:     for(proc = 1; proc < numProcs; proc++)
131:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

133:     /* Send requests for ghost variable offsets to each processor */
134:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
135:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
136:     for(node = 0; node < numGhostNodes; node++) {
137:       gNode  = q->ghostNodes[node];
138:       proc   = q->ghostNodeProcs[node];
139:       o->offsets[numNodes+(displs[proc]++)] = gNode;
140:     }
141:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
142:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
143: 

145:     /* Send ghost variables offsets to each processor */
146:     for(node = 0; node < numSendGhostVars; node++) {
147: #ifdef PETSC_USE_BOPT_g
148:       if ((sendGhostBuffer[node] < q->firstNode[rank]) || (sendGhostBuffer[node] >= q->firstNode[rank+1]))
149:         SETERRQ3(PETSC_ERR_PLIB, "Invalid request for variable offset of local node %d should be in [%d,%d)",
150:                  sendGhostBuffer[node], q->firstNode[rank], q->firstNode[rank+1])
151: #endif
152:       locNode = sendGhostBuffer[node] - q->firstNode[rank];
153:       sendGhostBuffer[node] = o->offsets[locNode];
154:     }
155:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
156:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
157: 

159:     /* Cleanup */
160:     PetscFree(ghostSendVars);
161:     PetscFree(sumSendVars);
162:     PetscFree(ghostRecvVars);
163:     PetscFree(sumRecvVars);
164:     PetscFree(displs);
165:     PetscFree(sendGhostBuffer);
166: #endif

168:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
169:        interior variable, rather than the offset of the given ghost variable in the global
170:        matrix. */
171:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
172:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
173:       o->localOffsets[node] = var;
174:       nclass = classes[numNodes+node];
175:       var   += classSizes[nclass];
176:     }
177:   }

179:   /* Allocate memory */
180:   PetscMalloc(numClasses * sizeof(int), &localOffsets);
181:   PetscMemzero(localOffsets, numClasses * sizeof(int));

183:   /* Setup local field offsets */
184:   for(f = 0; f < numFields; f++) {
185:     field = fields[f];
186:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
187:     for(nclass = 0; nclass < numClasses; nclass++) {
188:       if (fieldClasses[f][nclass]) {
189:         GridGetFieldComponents(grid, field, &comp);
190:         o->localStart[field][nclass]  = localOffsets[nclass];
191:         localOffsets[nclass]         += comp;
192:       } else {
193:         o->localStart[field][nclass]  = -1;
194:       }
195:     }
196:   }
197:   *order = o;

199:   /* Cleanup */
200:   PetscFree(localOffsets);

202:   return(0);
203: }

205: int GridVarOrderingConstrain_Triangular_2D(Grid grid, int constField, PetscConstraintObject constCtx,
206:                                            FieldClassMap constMap, VarOrdering oldOrder, VarOrdering *order)
207: {
208:   Partition_Triangular_2D *q                  = (Partition_Triangular_2D *) grid->mesh->part->data;
209:   int                      numFields          = constMap->numFields;
210:   int                     *fields             = constMap->fields;
211:   int                      numNodes           = constMap->numNodes;
212:   int                      numOverlapNodes    = constMap->numOverlapNodes;
213:   int                      numGhostNodes      = constMap->numGhostNodes;
214:   int                      numClasses         = constMap->numClasses;
215:   int                     *classes            = constMap->classes;
216:   int                     *classMap           = constMap->classMap[constMap->mapSize-1];
217:   int                    **localStart         = oldOrder->localStart;
218:   int                      numClassesOld;
219:   int                      comp;
220:   FieldClassMap            map;
221:   VarOrdering              o;
222:   /* Ghost variable communication */
223:   int                     *ghostSendVars;    /* Number of ghost variables on a given processor interior to this domain */
224:   int                     *sumSendVars;      /* Prefix sums of ghostSendVars */
225:   int                     *ghostRecvVars;    /* Number of ghost variables on a given processor */
226:   int                     *sumRecvVars;      /* Prefix sums of ghostRecvVars */
227:   int                     *displs;           /* Offsets into ghostRecvVars */
228:   int                      numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
229:   int                     *sendGhostBuffer;  /* Recv: Global node numbers Send: Offsets of these nodes */
230:   int                      numProcs, rank;
231:   int                      proc, f, field, node, locNode, gNode, nclass, i, var;
232:   int                      ierr;

235:   /* Create the ordering */
236:   PetscHeaderCreate(o, _VarOrdering, int, VAR_ORDER_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
237:   PetscLogObjectCreate(o);
238:   PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) constMap);

240:   /* Allocate memory */
241:   MPI_Comm_size(grid->comm, &numProcs);
242:   MPI_Comm_rank(grid->comm, &rank);
243:   GridGetFieldComponents(grid, constField, &comp);
244:   o->numTotalFields = oldOrder->numTotalFields;
245:   PetscMalloc((numProcs+1)      * sizeof(int),   &o->firstVar);
246:   PetscMalloc(numOverlapNodes   * sizeof(int),   &o->offsets);
247:   PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
248:   PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses) * sizeof(int) +
249:                        o->numTotalFields * sizeof(int *));
250:   PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));

252:   /* Setup domain variable numbering */
253:   if (numOverlapNodes < numNodes) SETERRQ(PETSC_ERR_PLIB, "Invalid node partition");
254:   o->offsets[0] = 0;
255:   for(node = 1; node < numNodes; node++)
256:     o->offsets[node]    = o->offsets[node-1] + constMap->classSizes[classes[node-1]];
257:   o->numLocVars = o->offsets[numNodes-1] + constMap->classSizes[classes[numNodes-1]];
258:   (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
259: 
260:   o->numLocVars        += o->numLocNewVars;
261:   MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
262:   o->firstVar[0] = 0;
263:   for(proc = 1; proc <= numProcs; proc++)
264:     o->firstVar[proc] += o->firstVar[proc-1];
265:   o->numVars = o->firstVar[numProcs];
266: #ifdef PETSC_USE_BOPT_g
267:   (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
268: 
269:   MPI_Allreduce(&o->numLocNewVars, &i, 1, MPI_INT, MPI_SUM, o->comm);
270:   if (o->numNewVars != i) SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
271: #endif

273:   /* Initialize variable overlap */
274:   o->numOverlapVars    = o->numLocVars;
275:   o->numOverlapNewVars = o->numLocNewVars;

277:   if (numProcs > 1) {
278:     /* Map local to global variable numbers */
279:     for(node = 0; node < numNodes; node++)
280:       o->offsets[node] += o->firstVar[rank];

282:     /* Initialize communication */
283:     PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
284:     PetscMalloc(numProcs * sizeof(int), &sumSendVars);
285:     PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
286:     PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
287:     PetscMalloc(numProcs * sizeof(int), &displs);
288:     PetscMemzero(ghostSendVars, numProcs * sizeof(int));
289:     PetscMemzero(sumSendVars,   numProcs * sizeof(int));
290:     PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
291:     PetscMemzero(sumRecvVars,   numProcs * sizeof(int));
292:     PetscMemzero(displs,        numProcs * sizeof(int));

294:     /* Get number of ghost variables to receive from each processor and size of blocks --
295:          we here assume that classes[] already has ghost node classes in it */
296:     for(node = 0; node < numGhostNodes; node++) {
297:       gNode  = q->ghostNodes[node];
298:       proc   = q->ghostNodeProcs[node];
299:       nclass = classes[numNodes+node];
300:       ghostRecvVars[proc]++;
301:       o->numOverlapVars += constMap->classSizes[nclass];
302:     }

304:     /* Get number of constrained ghost variables to receive from each processor and size of blocks */
305:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
306: 
307:     o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;

309:     /* Get sizes of ghost variable blocks to send to each processor */
310:     MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);

312:     /* Calculate offets into the ghost variable receive array */
313:     for(proc = 1; proc < numProcs; proc++) {
314:       sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
315:       displs[proc]      = sumRecvVars[proc];
316:     }

318:     /* Calculate offsets into the ghost variable send array */
319:     for(proc = 1; proc < numProcs; proc++)
320:       sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];

322:     /* Send requests for ghost variable offsets to each processor */
323:     numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
324:     PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
325:     for(node = 0; node < numGhostNodes; node++) {
326:       gNode  = q->ghostNodes[node];
327:       proc   = q->ghostNodeProcs[node];
328:       o->offsets[numNodes+(displs[proc]++)] = gNode;
329:     }
330:     MPI_Alltoallv(&o->offsets[numNodes],  ghostRecvVars, sumRecvVars, MPI_INT,
331:                          sendGhostBuffer,        ghostSendVars, sumSendVars, MPI_INT, o->comm);
332: 

334:     /* Send ghost variables offsets to each processor */
335:     for(node = 0; node < numSendGhostVars; node++) {
336: #ifdef PETSC_USE_BOPT_g
337:       if ((sendGhostBuffer[node] < q->firstNode[rank]) || (sendGhostBuffer[node] >= q->firstNode[rank+1]))
338:         SETERRQ3(PETSC_ERR_PLIB, "Invalid request for variable offset of local node %d should be in [%d,%d)",
339:                  sendGhostBuffer[node], q->firstNode[rank], q->firstNode[rank+1])
340: #endif
341:       locNode = sendGhostBuffer[node] - q->firstNode[rank];
342:       sendGhostBuffer[node] = o->offsets[locNode];
343:     }
344:     MPI_Alltoallv(sendGhostBuffer,       ghostSendVars, sumSendVars, MPI_INT,
345:                          &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
346: 

348:     /* Cleanup */
349:     PetscFree(ghostSendVars);
350:     PetscFree(sumSendVars);
351:     PetscFree(ghostRecvVars);
352:     PetscFree(sumRecvVars);
353:     PetscFree(displs);
354:     PetscFree(sendGhostBuffer);

356:     /* We maintain local offsets for ghost variables, meaning the offsets after the last
357:        interior variable, rather than the offset of the given ghost variable in the global
358:        matrix. */
359:     PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
360:     for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
361:       o->localOffsets[node] = var;
362:       nclass = classes[numNodes+node];
363:       var   += constMap->classSizes[nclass];
364:     }
365:   }

367:   /* Setup local field offsets */
368:   VarOrderingGetClassMap(oldOrder, &map);
369:   numClassesOld = map->numClasses;
370:   for(f = 0; f < numFields; f++) {
371:     field = fields[f];
372:     ierr  = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
373:     for(nclass = 0; nclass < numClassesOld; nclass++) {
374:       o->localStart[field][nclass] = localStart[field][nclass];
375:     }
376:     for(i = numClassesOld; i < numClasses; i++) {
377:       /* Invert the class map */
378:       for(nclass = 0; nclass < numClassesOld; nclass++) {
379:         if (classMap[nclass] == i) break;
380:       }
381:       if (nclass >= numClassesOld) SETERRQ(PETSC_ERR_PLIB, "Invalid class map");

383:       /* Subtract out the constrained fields */
384:       o->localStart[field][i]    = localStart[field][nclass];
385:       if (localStart[field][nclass] > localStart[constField][nclass])
386:         o->localStart[field][i] -= comp;
387:     }
388:   }

390:   *order = o;
391:   return(0);
392: }

394: int GridCreateVarScatter_Triangular_2D(Grid grid, VarOrdering order, GVec vec, VarOrdering embedOrder, GVec embedVec,
395:                                        VecScatter *scatter)
396: {
397:   PetscConstraintObject constCtx = grid->constraintCtx;
398:   FieldClassMap         map, embedMap;
399:   int                   numEmbedFields, numNodes;
400:   int                  *embedFields, *classes, *embedClassSizes;
401:   int                   numLocVars, numEmbedLocVars, numNewLocVars;
402:   int                  *offsets, **localStart;
403:   IS                    is, embedIS;
404:   int                  *indices;
405:   PetscTruth            isConstrained;
406:   int                   node, nclass, f, field, comp, var, count;
407:   int                   ierr;

410:   /* Retrieve orderings */
411:   VarOrderingGetClassMap(order,      &map);
412:   VarOrderingGetClassMap(embedOrder, &embedMap);
413:   numNodes        = map->numNodes;
414:   classes         = map->classes;
415:   numLocVars      = order->numLocVars;
416:   offsets         = order->offsets;
417:   localStart      = order->localStart;
418:   numEmbedFields  = embedMap->numFields;
419:   embedFields     = embedMap->fields;
420:   embedClassSizes = embedMap->classSizes;
421:   numEmbedLocVars = embedOrder->numLocVars;

423:   PetscMalloc(numEmbedLocVars * sizeof(int), &indices);
424:   for(node = 0, count = 0; node < numNodes; node++) {
425:     nclass = classes[node];
426:     if (embedClassSizes[nclass] > 0) {
427:       for(f = 0; f < numEmbedFields; f++) {
428:         field = embedFields[f];
429:         if (localStart[field][nclass] >= 0) {
430:           GridGetFieldComponents(grid, field, &comp);
431:           for(var = 0; var < comp; var++)
432:             indices[count++] = offsets[node] + localStart[field][nclass] + var;
433:         }
434:       }
435:     }
436:   }
437:   /* Handle new variables */
438:   GridIsConstrained(grid, &isConstrained);
439:   if ((isConstrained == PETSC_TRUE) && (count < numEmbedLocVars)) {
440:     (*constCtx->ops->getsize)(constCtx, &numNewLocVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
441: 
442:     if (count != numEmbedLocVars - numNewLocVars) SETERRQ(PETSC_ERR_PLIB, "Inconsistent variable orderings");
443:     /* Fill in the embed variables */
444:     for(var = numNewLocVars; var > 0; var--)
445:       indices[count++] = numLocVars - var;
446:   }
447:   if (count != numEmbedLocVars) SETERRQ(PETSC_ERR_PLIB, "Inconsistent variable orderings");

449:   /* Create mappings */
450:   ISCreateGeneral(order->comm, numEmbedLocVars, indices, &is);
451:   ISCreateStride(order->comm, numEmbedLocVars, 0, 1, &embedIS);

453:   /* Create the scatter */
454:   VecScatterCreate(vec, is, embedVec, embedIS, scatter);

456:   /* Cleanup */
457:   ISDestroy(is);
458:   ISDestroy(embedIS);
459:   PetscFree(indices);

461:   return(0);
462: }

464: int GridCreateLocalVarOrdering_Triangular_2D(Grid grid, int numFields, int *fields, LocalVarOrdering *order)
465: {
466:   LocalVarOrdering o;
467:   int              elemOffset;
468:   int              f, field;
469:   int              ierr;

472:   /* Create the ordering */
473:   PetscHeaderCreate(o, _LocalVarOrdering, int, VAR_ORDER_COOKIE, 0, "LocalVarOrdering", grid->comm, LocalVarOrderingDestroy, 0);
474:   PetscLogObjectCreate(o);

476:   /* Allocate memory */
477:   o->numFields = numFields;
478:   PetscMalloc(numFields    * sizeof(int), &o->fields);
479:   PetscMalloc(grid->numFields * sizeof(int), &o->elemStart);
480:   PetscLogObjectMemory(o, (numFields + grid->numFields) * sizeof(int));
481:   PetscMemcpy(o->fields, fields, numFields * sizeof(int));

483:   /* Put in sentinel values */
484:   for(f = 0; f < grid->numFields; f++) {
485:     o->elemStart[f] = -1;
486:   }

488:   /* Setup local and global offsets offsets */
489:   for(f = 0, elemOffset = 0; f < numFields; f++) {
490:     field               = fields[f];
491:     o->elemStart[field] = elemOffset;
492:     elemOffset         += grid->fields[field].disc->size;
493:   }
494:   o->elemSize = elemOffset;
495:   *order = o;

497:   return(0);
498: }