Actual source code: grid2d.c
1: #ifdef PETSC_RCS_HEADER
2: static char vcid[] = "$Id: grid2d.c,v 1.31 2000/07/16 23:20:01 knepley Exp $";
3: #endif
5: /* Implements 2d triangular grids */
6: #include "petscts.h"
7: #include "gsolver.h"
8: #include "src/grid/gridimpl.h" /*I "grid.h" I*/
9: #include "src/mesh/impls/triangular/2d/2dimpl.h"
10: #include "src/gvec/impls/triangular/2d/gvec2d.h"
11: #include "src/gvec/impls/triangular/2d/gvec2dView.h"
12: #include "src/gvec/impls/triangular/2d/gmat2d.h"
13: #include "elemvec2d.h"
14: #include "varorder2d.h"
16: extern int GridResetConstrainedMultiply_Private(Grid, GMat);
18: static int GridDestroy_Triangular_2D(Grid grid)
19: {
20: int field, bd;
21: int ierr;
24: /* Field variables */
25: for(field = 0; field < grid->numFields; field++) {
26: if (grid->fields[field].name != PETSC_NULL) {
27: PetscFree(grid->fields[field].name);
28: }
29: PetscFree(grid->fields[field].discType);
30: DiscretizationDestroy(grid->fields[field].disc);
31: }
32: PetscFree(grid->fields);
33: /* Class variables */
34: if (grid->cm) {
35: FieldClassMapDestroy(grid->cm);
36: }
37: /* Default variable orderings */
38: if (grid->order) {
39: VarOrderingDestroy(grid->order);
40: }
41: if (grid->locOrder) {
42: LocalVarOrderingDestroy(grid->locOrder);
43: }
44: /* Ghost variable scatter */
45: if (grid->ghostVec) {
46: VecDestroy(grid->ghostVec);
47: }
48: if (grid->ghostScatter) {
49: VecScatterDestroy(grid->ghostScatter);
50: }
51: /* Constraint variables */
52: if (grid->constraintCM) {
53: FieldClassMapDestroy(grid->constraintCM);
54: }
55: if (grid->constraintOrder) {
56: VarOrderingDestroy(grid->constraintOrder);
57: }
58: if (grid->constraintOrdering) {
59: ISDestroy(grid->constraintOrdering);
60: }
61: if (grid->constraintMatrix) {
62: MatDestroy(grid->constraintMatrix);
63: }
64: if (grid->constraintInverse) {
65: MatDestroy(grid->constraintInverse);
66: }
67: /* Problem variables */
68: PetscFree(grid->rhsFuncs);
69: PetscFree(grid->rhsOps);
70: PetscFree(grid->matOps);
71: /* Assembly variables */
72: PetscFree(grid->defaultFields);
73: if (grid->vec) {
74: ElementVecDestroy(grid->vec);
75: }
76: if (grid->mat) {
77: ElementMatDestroy(grid->mat);
78: }
79: if (grid->ghostElementVec) {
80: ElementVecDestroy(grid->ghostElementVec);
81: }
82: /* Boundary condition variables */
83: if (grid->reductionCM) {
84: FieldClassMapDestroy(grid->reductionCM);
85: }
86: if (grid->reduceOrder) {
87: VarOrderingDestroy(grid->reduceOrder);
88: }
89: if (grid->locReduceOrder) {
90: LocalVarOrderingDestroy(grid->locReduceOrder);
91: }
92: PetscFree(grid->bc);
93: PetscFree(grid->pointBC);
94: /* Boundary iteration variables */
95: for(bd = 0; bd < grid->numBd; bd++) {
96: if (grid->bdSize[bd] != PETSC_NULL) {
97: PetscFree(grid->bdSize[bd]);
98: }
99: }
100: PetscFree(grid->bdSize);
101: if (grid->bdOrder) {
102: VarOrderingDestroy(grid->bdOrder);
103: }
104: if (grid->bdLocOrder) {
105: LocalVarOrderingDestroy(grid->bdLocOrder);
106: }
107: /* Subobjects */
108: MeshDestroy(grid->mesh);
109: return(0);
110: }
112: static int GridView_Triangular_2D_File(Grid grid, PetscViewer viewer)
113: {
114: VarOrdering order = grid->order;
115: FILE *fd;
116: int rank, field;
117: int ierr;
120: MPI_Comm_rank(grid->comm, &rank);
121: PetscViewerASCIIGetPointer(viewer, &fd);
122: PetscFPrintf(grid->comm, fd, "Grid Object:n");
123: if (grid->numFields == 1) {
124: PetscFPrintf(grid->comm, fd, " %d field:n", grid->numFields);
125: } else {
126: PetscFPrintf(grid->comm, fd, " %d fields:n", grid->numFields);
127: }
128: for(field = 0; field < grid->numFields; field++) {
129: /* Grid structure */
130: if (grid->fields[field].name != PETSC_NULL) {
131: PetscFPrintf(grid->comm, fd, " %s field", grid->fields[field].name);
132: } else {
133: PetscFPrintf(grid->comm, fd, " field %d", field);
134: }
135: if (grid->fields[field].numComp == 1) {
136: PetscFPrintf(grid->comm, fd, " with %d component is ", grid->fields[field].numComp);
137: } else {
138: PetscFPrintf(grid->comm, fd, " with %d components is ", grid->fields[field].numComp);
139: }
140: if (grid->fields[field].isActive) {
141: PetscFPrintf(grid->comm, fd, "activen ");
142: } else {
143: PetscFPrintf(grid->comm, fd, "inactiven ");
144: }
145: DiscretizationView(grid->fields[field].disc, viewer);
146: }
148: /* Problem specific information */
149: if (grid->numActiveFields > 0) {
150: PetscFPrintf(grid->comm, fd, " %d variables in the problem:n", order->numVars);
151: PetscSynchronizedFPrintf(grid->comm, fd, " %d variables and %d ghost variables in domain %d:n",
152: order->numLocVars, order->numOverlapVars - order->numLocVars, rank);
153: PetscSynchronizedFlush(grid->comm);
154: }
156: /* Underlying mesh */
157: MeshView(grid->mesh, viewer);
158: return(0);
159: }
161: static int GridView_Triangular_2D(Grid grid, PetscViewer viewer)
162: {
163: PetscTruth isascii;
164: int ierr;
167: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
168: if (isascii == PETSC_TRUE) {
169: GridView_Triangular_2D_File(grid, viewer);
170: }
171: return(0);
172: }
174: static int GridSetupGhostScatter_Triangular_2D(Grid grid, VarOrdering order, Vec *ghostVec, VecScatter *ghostScatter)
175: {
176: FieldClassMap map;
177: PetscConstraintObject constCtx = grid->constraintCtx;
178: int numOverlapVars = order->numOverlapVars;
179: int numLocVars = order->numLocVars;
180: int numVars = order->numVars;
181: int numLocNewVars = order->numLocNewVars;
182: int numOverlapNewVars = order->numOverlapNewVars;
183: int numGhostNewVars = order->numOverlapNewVars - order->numLocNewVars;
184: int *firstVar = order->firstVar;
185: int *offsets = order->offsets;
186: int numNodes, numGhostNodes;
187: int *classes, *classSizes;
188: IS localIS; /* Local indices for local ghost vector variables */
189: int *indices; /* Global indices for local ghost vector variables */
190: IS globalIS; /* Global indices for local ghost vector variables */
191: Vec dummyVec; /* Dummy global vector used to create the ghost variable scatter */
192: int rank, newComp;
193: int node, nclass, var, startVar, newField, i, c;
194: int ierr;
199: VarOrderingGetClassMap(order, &map);
200: numNodes = map->numNodes;
201: numGhostNodes = map->numGhostNodes;
202: classes = map->classes;
203: classSizes = map->classSizes;
205: /* Create the ghost variable scatter -- Notice that for no ghost variables localOffsets is not used */
206: MPI_Comm_rank(grid->comm, &rank);
207: ISCreateStride(grid->comm, numOverlapVars, 0, 1, &localIS);
208: PetscMalloc(numOverlapVars * sizeof(int), &indices);
209: for(var = 0; var < numLocVars; var++) {
210: indices[var] = var + firstVar[rank];
211: }
212: for(node = 0, var = numLocVars; node < numGhostNodes; node++) {
213: nclass = classes[numNodes+node];
214: for(i = 0; i < classSizes[nclass]; i++) {
215: indices[var++] = offsets[numNodes+node] + i;
216: }
217: }
218: if (numGhostNewVars > 0) {
219: /* Add in constraints that generate off-processor variables */
220: (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
221:
222: for(newField = numLocNewVars/newComp; newField < numOverlapNewVars/newComp; newField++) {
223: (*constCtx->ops->getindices)(constCtx, grid->mesh, order, newField, CONSTRAINT_NEW_INDEX, &startVar);
224:
225: for(c = 0; c < newComp; c++, var++) {
226: indices[var] = startVar+c;
227: }
228: }
229: }
230: if (var != numOverlapVars) SETERRQ(PETSC_ERR_PLIB, "Invalid ghost vector numbering");
231: ISCreateGeneral(grid->comm, numOverlapVars, indices, &globalIS);
232: VecCreateMPI(grid->comm, numLocVars, numVars, &dummyVec);
233: VecCreateSeq(PETSC_COMM_SELF, numOverlapVars, ghostVec);
234: VecScatterCreate(dummyVec, globalIS, *ghostVec, localIS, ghostScatter);
235: PetscLogObjectParent(grid, *ghostVec);
236: PetscLogObjectParent(grid, *ghostScatter);
238: /* Cleanup */
239: VecDestroy(dummyVec);
240: ISDestroy(localIS);
241: ISDestroy(globalIS);
242: PetscFree(indices);
243: return(0);
244: }
246: static int GridSetupBoundarySizes_Triangular_2D(Grid grid)
247: {
248: Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
249: Partition p = grid->mesh->part;
250: Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
251: int numFields = grid->cm->numFields;
252: int *fields = grid->cm->fields;
253: int numClasses = grid->cm->numClasses;
254: int *classes = grid->cm->classes;
255: int **fieldClasses = grid->cm->fieldClasses;
256: int *bdCount; /* Number of boundary nodes of a given class */
257: int bd, bdNode, f, field, node, nclass;
258: int ierr;
261: PetscMalloc(numClasses * sizeof(int), &bdCount);
263: for(bd = 0; bd < grid->numBd; bd++) {
264: /* Count the number of boundary nodes of each class */
265: PetscMemzero(bdCount, numClasses * sizeof(int));
266: for(bdNode = tri->bdBegin[bd]; bdNode < tri->bdBegin[bd+1]; bdNode++) {
267: node = tri->bdNodes[bdNode] - q->firstNode[p->rank];
268: if ((node >= 0) && (node < grid->mesh->numNodes)) {
269: bdCount[classes[node]]++;
270: }
271: }
272: /* Calculate boundary sizes */
273: PetscMemzero(grid->bdSize[bd], grid->numFields * sizeof(int));
274: for(f = 0; f < numFields; f++) {
275: field = fields[f];
276: for(nclass = 0; nclass < numClasses; nclass++) {
277: if (fieldClasses[f][nclass]) {
278: grid->bdSize[bd][field] += bdCount[nclass];
279: }
280: }
281: }
282: }
284: /* Cleanup */
285: PetscFree(bdCount);
286: return(0);
287: }
289: #if 0
290: /*@C
291: GridExtantExchange
292: This functions transfers data between local storage in different domains without a predefined mapping.
294: Input Parameters:
295: . numExtants - The number of extants (interior variables) in this domain
296: . extantProcs - The processor to which to send each extant
297: . firstExtant - The first extant variable in each domain
299: . ghostIndices - The global index for each ghost
300: . dataType - The type of the variables
301: . firstVar - The first variable on each processor
302: . addv - The insert mode, INSERT_VALUES or ADD_VALUES
303: . mode - The direction of the transfer, SCATTER_FORWARD or SCATTER_REVERSE
304: . locVars - The local variable array
306: Output Paramters:
307: . firstExtant - The first extant variable in each domain after repartitioning
309: . ghostVars - The ghost variables
311: Note:
312: The data in ghostVars is assumed contiguous and implicitly indexed by the order of
313: ghostProcs and ghostIndices. The SCATTER_FORWARD mode will take the requested data
314: from locVars and copy it to ghostVars in the order specified by ghostIndices. The
315: SCATTER_REVERSE mode will take data from ghostVars and copy it to locVars.
317: Level: developer
319: .keywords ghost, exchange, grid
320: .seealso GridGlobalToLocal, GridLocalToGlobal
321: @*/
322: int GridExtantExchange(MPI_Comm comm, int numExtants, int *extantProcs, int *firstExtant, PetscDataType dataType, AO *ordering)
324: int *firstVar, InsertMode addv, ScatterMode mode, void *locVars, void *ghostVars
325: {
326: int *numSendExtants; /* The number of extants from each domain */
327: int *numRecvExtants; /* The number of extants in each domain */
328: int *sumSendExtants; /* The prefix sums of numSendExtants */
329: int *sumRecvExtants; /* The prefix sums of numRecvExtantss */
330: int *offsets; /* The offset into the send array for each domain */
331: int totSendExtants; /* The number of ghosts to request variables for */
332: int totRecvExtants; /* The number of nodes to provide class info about */
333: int *sendIndices; /* The canonical indices of extants in this domain */
334: int *recvIndices; /* The canonical indices of extants to return variables for */
335: int *extantIndices; /* The new canonical indices of extants after reordering */
336: char *tempVars; /* The variables of the requested or submitted extants */
337: int numLocVars;
338: char *locBytes = (char *) locVars;
339: MPI_Datatype MPIType;
340: int typeSize;
341: int numProcs, rank;
342: int proc, extant, locIndex, byte;
343: int ierr;
346: /* Initialize communication */
347: MPI_Comm_size(comm, &numProcs);
348: MPI_Comm_rank(comm, &rank);
349: PetscMalloc(numProcs * sizeof(int), &numSendExtants);
350: PetscMalloc(numProcs * sizeof(int), &numRecvExtants);
351: PetscMalloc(numProcs * sizeof(int), &sumSendExtants);
352: PetscMalloc(numProcs * sizeof(int), &sumRecvExtants);
353: PetscMalloc(numProcs * sizeof(int), &offsets);
354: PetscMemzero(numSendExtants, numProcs * sizeof(int));
355: PetscMemzero(numRecvExtants, numProcs * sizeof(int));
356: PetscMemzero(sumSendExtants, numProcs * sizeof(int));
357: PetscMemzero(sumRecvExtants, numProcs * sizeof(int));
358: PetscMemzero(offsets, numProcs * sizeof(int));
359: numLocVars = firstVar[rank+1] - firstVar[rank];
361: /* Get number of extants to send to each processor */
362: for(extant = 0; extant < numExtants; extant++) {
363: numSendExtants[extantProcs[extant]]++;
364: }
366: /* Get number of extants to receive from each processor */
367: MPI_Alltoall(numSendExtants, 1, MPI_INT, numRecvExtants, 1, MPI_INT, comm);
368: for(proc = 1; proc < numProcs; proc++) {
369: sumSendExtants[proc] = sumSendExtants[proc-1] + numSendExtants[proc-1];
370: sumRecvExtants[proc] = sumRecvExtants[proc-1] + numRecvExtants[proc-1];
371: offsets[proc] = sumSendExtants[proc];
372: }
373: totSendExtants = sumSendExtants[numProcs-1] + numSendExtants[numProcs-1];
374: totRecvExtants = sumRecvExtants[numProcs-1] + numRecvExtants[numProcs-1];
375: if (numExtants != totSendExtants) SETERRQ(PETSC_ERR_PLIB, "Invalid number of extants in send");
377: PetscDataTypeGetSize(dataType, &typeSize);
378: if (totSendExtants) {
379: PetscMalloc(totSendExtants * sizeof(int), &sendIndices);
380: }
381: if (totRecvExtants) {
382: PetscMalloc(totRecvExtants * sizeof(int), &recvIndices);
383: PetscMalloc(totRecvExtants * sizeof(int), &extantIndices);
384: PetscMalloc(totRecvExtants * typeSize, &tempVars);
385: }
387: /* Must order extants by processor */
388: for(extant = 0; extant < numExtants; extant++)
389: sendIndices[offsets[extantProcs[extant]]++] = extant + firstExtant[rank];
391: /* Get canonical indices of extants to provide variables for */
392: MPI_Alltoallv(sendIndices, numSendExtants, sumSendExtants, MPI_INT,
393: recvIndices, numRecvExtants, sumRecvExtants, MPI_INT, comm);
394:
396: /* Recompute size and offset of each domain */
397: MPI_Allgather(&totRecvExtants, 1, MPI_INT, &firstExtant[1], 1, MPI_INT, comm);
398: firstExtant[0] = 0;
399: for(proc = 1; proc <= numProcs; proc++)
400: firstExtant[proc] += firstExtant[proc-1];
402: /* Create the global extant reordering */
403: for(extant = 0; extant < totRecvExtants; extant++)
404: /* This would be the time to do RCM on the local graph by reordering extantIndices[] */
405: extantIndices[extant] = extant + firstExtant[rank];
406: AOCreateDebug(comm, totRecvExtants, recvIndices, extantIndices, ordering);
408: switch(mode)
409: {
410: case SCATTER_FORWARD:
411: /* Get extant variables */
412: if (addv == INSERT_VALUES) {
413: for(extant = 0; extant < totRecvExtants; extant++)
414: {
415: locIndex = recvIndices[extant] - firstVar[rank];
416: #ifdef PETSC_USE_BOPT_g
417: if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
418: #endif
419: for(byte = 0; byte < typeSize; byte++)
420: tempVars[extant*typeSize+byte] = locBytes[locIndex*typeSize+byte];
421: }
422: } else {
423: for(extant = 0; extant < totRecvExtants; extant++)
424: {
425: locIndex = recvIndices[extant] - firstVar[rank];
426: #ifdef PETSC_USE_BOPT_g
427: if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
428: #endif
429: for(byte = 0; byte < typeSize; byte++)
430: tempVars[extant*typeSize+byte] += locBytes[locIndex*typeSize+byte];
431: }
432: }
434: /* Communicate local variables to extant storage */
435: PetscDataTypeToMPIDataType(dataType, &MPIType);
436: MPI_Alltoallv(tempVars, numRecvExtants, sumRecvExtants, MPIType,
437: extantVars, numSendExtants, sumSendExtants, MPIType, comm);
438:
439: break;
440: case SCATTER_REVERSE:
441: /* Communicate extant variables to local storage */
442: PetscDataTypeToMPIDataType(dataType, &MPIType);
443: MPI_Alltoallv(extantVars, numSendExtants, sumRecvExtants, MPIType,
444: tempVars, numRecvExtants, sumSendExtants, MPIType, comm);
445:
447: /* Get extant variables */
448: if (addv == INSERT_VALUES) {
449: for(extant = 0; extant < totRecvExtants; extant++)
450: {
451: locIndex = recvIndices[extant] - firstVar[rank];
452: #ifdef PETSC_USE_BOPT_g
453: if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
454: #endif
455: for(byte = 0; byte < typeSize; byte++)
456: locBytes[locIndex*typeSize+byte] = tempVars[extant*typeSize+byte];
457: }
458: } else {
459: for(extant = 0; extant < totRecvExtants; extant++)
460: {
461: locIndex = recvIndices[extant] - firstVar[rank];
462: #ifdef PETSC_USE_BOPT_g
463: if ((locIndex < 0) || (locIndex >= numLocVars)) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Invalid extant index received");
464: #endif
465: for(byte = 0; byte < typeSize; byte++)
466: locBytes[locIndex*typeSize+byte] += tempVars[extant*typeSize+byte];
467: }
468: }
469: break;
470: default:
471: SETERRQ(PETSC_ERR_ARG_WRONG, "Invalid scatter mode");
472: }
474: /* Cleanup */
475: PetscFree(numSendExtants);
476: PetscFree(numRecvExtants);
477: PetscFree(sumSendExtants);
478: PetscFree(sumRecvExtants);
479: PetscFree(offsets);
480: if (totSendExtants) {
481: PetscFree(sendIndices);
482: }
483: if (totRecvExtants) {
484: PetscFree(recvIndices);
485: PetscFree(tempVars);
486: }
487: return(0);
488: }
489: #endif
491: int GridSetUp_Triangular_2D(Grid grid)
492: {
493: FieldClassMap newCM;
494: #ifdef NEW_REDUCTION
495: int numReduceFields;
496: int *reduceFields;
497: int bc;
498: #endif
499: int elemSize;
500: int f, field;
501: int ierr;
504: if (grid->numActiveFields <= 0) PetscFunctionReturn(1);
506: /* Create default class map */
507: if (grid->cm != PETSC_NULL) {
508: FieldClassMapDestroy(grid->cm);
509: }
510: FieldClassMapCreateTriangular2D(grid, grid->numActiveFields, grid->defaultFields, &grid->cm);
511: /* Implement system constraints */
512: if (grid->reduceSystem == PETSC_TRUE) {
513: /* Constrain the default class structure */
514: FieldClassMapConstrain(grid->cm, grid, PETSC_TRUE, PETSC_FALSE, &newCM);
515: FieldClassMapDestroy(grid->cm);
516: grid->cm = newCM;
517: /* Create reduction class map */
518: if (grid->reductionCM != PETSC_NULL) {
519: FieldClassMapDestroy(grid->reductionCM);
520: }
521: #ifdef NEW_REDUCTION
522: PetscMalloc((grid->numBC+grid->numPointBC) * sizeof(int), &reduceFields);
523: for(bc = 0, numReduceFields = 0; bc < grid->numBC; bc++) {
524: if (grid->bcReduce[bc] != PETSC_TRUE) continue;
525: for(f = 0; f < numReduceFields; f++) {
526: if (reduceFields[f] == grid->bcField[bc]) break;
527: }
528: if (f == numReduceFields) reduceFields[numReduceFields++] = grid->bcField[bc];
529: }
530: for(bc = 0; bc < grid->numPointBC; bc++) {
531: if (grid->pointBCReduce[bc] != PETSC_TRUE) continue;
532: for(f = 0; f < numReduceFields; f++) {
533: if (reduceFields[f] == grid->pointBCField[bc]) break;
534: }
535: if (f == numReduceFields) reduceFields[numReduceFields++] = grid->pointBCField[bc];
536: }
537: FieldClassMapCreateTriangular2D(grid, numReduceFields, reduceFields, &newCM);
538: FieldClassMapReduce(newCM, grid, &grid->reductionCM);
539: FieldClassMapDestroy(newCM);
540: PetscFree(reduceFields);
541: #else
542: FieldClassMapReduce(grid->cm, grid, &grid->reductionCM);
543: #endif
544: }
545: /* Calculate boundary sizes after reduction */
546: GridSetupBoundarySizes_Triangular_2D(grid);
548: /* Setup default global and local variable orderings */
549: if (grid->order) {
550: VarOrderingDestroy(grid->order);
551: }
552: if (grid->locOrder) {
553: LocalVarOrderingDestroy(grid->locOrder);
554: }
555: VarOrderingCreate(grid, &grid->order);
556: LocalVarOrderingCreate(grid, grid->cm->numFields, grid->cm->fields, &grid->locOrder);
558: /* Setup global and local variable orderings for BC reduction */
559: if (grid->reduceOrder) {
560: VarOrderingDestroy(grid->reduceOrder);
561: }
562: if (grid->locReduceOrder) {
563: LocalVarOrderingDestroy(grid->locReduceOrder);
564: }
565: if (grid->reduceSystem) {
566: VarOrderingCreateReduce(grid, &grid->reduceOrder);
567: LocalVarOrderingCreate(grid, grid->reductionCM->numFields, grid->reductionCM->fields, &grid->locReduceOrder);
568:
569: }
571: /* Setup element vector and matrix */
572: if (grid->vec != PETSC_NULL) {
573: ElementVecDestroy(grid->vec);
574: }
575: if (grid->ghostElementVec != PETSC_NULL) {
576: ElementVecDestroy(grid->ghostElementVec);
577: }
578: if (grid->mat != PETSC_NULL) {
579: ElementMatDestroy(grid->mat);
580: }
581: elemSize = grid->locOrder->elemSize;
582: if (grid->explicitConstraints == PETSC_TRUE) {
583: for(f = 0; f < grid->cm->numFields; f++) {
584: field = grid->cm->fields[f];
585: if (grid->fields[field].isConstrained == PETSC_TRUE)
586: elemSize += grid->fields[field].disc->funcs*grid->fields[field].constraintCompDiff;
587: }
588: }
589: ElementVecCreate(grid->comm, elemSize, &grid->vec);
590: ElementVecCreate(grid->comm, elemSize, &grid->ghostElementVec);
591: ElementMatCreate(grid->comm, elemSize, elemSize, &grid->mat);
592: grid->vec->reduceSize = grid->locOrder->elemSize;
593: grid->ghostElementVec->reduceSize = grid->locOrder->elemSize;
594: grid->mat->reduceRowSize = grid->locOrder->elemSize;
595: grid->mat->reduceColSize = grid->locOrder->elemSize;
597: return(0);
598: }
600: int GridSetupConstraints_Triangular_2D(Grid grid, PetscConstraintObject ctx) {
601: Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
602: int *markers = tri->markers;
603: Field *fields = grid->fields;
604: FieldClassMap cm = grid->cm;
605: int numFields = grid->cm->numFields;
606: int numNodes = grid->cm->numNodes;
607: int **fieldClasses = grid->cm->fieldClasses;
608: int *classes = grid->cm->classes;
609: int *classSizes = grid->cm->classSizes;
610: int numVars = grid->order->numVars;
611: int numLocVars = grid->order->numLocVars;
612: int *firstVar = grid->order->firstVar;
613: int *offsets = grid->order->offsets;
614: int numTotalFields = grid->order->numTotalFields;
615: int **localStart = grid->order->localStart;
616: int rank = grid->mesh->part->rank;
617: int constField = -1; /* The field which is constrained */
618: int *ordering; /* Gives a mapping between the two variable numberings */
619: int *diagRows; /* Allocation for the projector P */
620: int *offdiagRows; /* Allocation for the projector P */
621: int numConstrainedFields;
622: int rowStartVar, colStartVar, locColStart, locColEnd, numLocConstraintVars;
623: int f, field, node, nclass, comp, nodeVars, var, count;
624: PetscTruth opt;
625: int ierr;
628: /* Check constrained fields */
629: for(field = 0, numConstrainedFields = 0; field < numTotalFields; field++)
630: if (fields[field].isConstrained == PETSC_TRUE) {
631: constField = field;
632: numConstrainedFields++;
633: }
634: if (numConstrainedFields == 0) return(0);
635: if (numConstrainedFields > 1) SETERRQ(PETSC_ERR_ARG_OUTOFRANGE, "Only one field may be constrained");
637: /* Create constrained class map */
638: if (grid->constraintCM != PETSC_NULL) {
639: FieldClassMapDestroy(grid->constraintCM);
640: }
641: FieldClassMapConstrain(grid->cm, grid, PETSC_FALSE, PETSC_TRUE, &grid->constraintCM);
643: /* Create variable ordering for constrained and new fields */
644: if (grid->constraintOrder != PETSC_NULL) {
645: VarOrderingDestroy(grid->constraintOrder);
646: }
647: VarOrderingConstrain(grid, grid->order, &grid->constraintOrder);
649: /* Calculate mapping between variable numberings */
650: if (grid->constraintOrdering != PETSC_NULL) {
651: ISDestroy(grid->constraintOrdering);
652: }
653: PetscMalloc(numLocVars * sizeof(int), &ordering);
654: numLocConstraintVars = grid->constraintOrder->numLocVars - grid->constraintOrder->numLocNewVars;
655: for(node = 0, count = 0; node < numNodes; node++) {
656: nclass = classes[node];
657: rowStartVar = offsets[node];
658: nodeVars = classSizes[nclass];
659: colStartVar = grid->constraintOrder->offsets[node];
661: if ((markers[node] < 0) && (localStart[constField][nclass] >= 0)) {
662: /* The preceeding fields on the node */
663: for(var = 0; var < localStart[constField][nclass]; var++, count++)
664: ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
665: /* Nonzeroes in C */
666: rowStartVar += localStart[constField][nclass];
667: colStartVar += localStart[constField][nclass];
668: for(var = 0; var < fields[constField].numComp; var++, count++)
669: ordering[rowStartVar-firstVar[rank]+var] = numLocConstraintVars++;
670: /* The remaining fields on the node */
671: for(var = fields[constField].numComp; var < nodeVars - localStart[constField][nclass]; var++, count++)
672: ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var-fields[constField].numComp;
673: } else {
674: /* Nonzeroes in I */
675: for(var = 0; var < nodeVars; var++, count++)
676: ordering[rowStartVar-firstVar[rank]+var] = colStartVar-grid->constraintOrder->firstVar[rank]+var;
677: }
678: }
679: if (numLocConstraintVars != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
680: if (count != numLocVars) SETERRQ(PETSC_ERR_PLIB, "Invalid constraint variable offsets");
681: ISCreateGeneral(PETSC_COMM_SELF, numLocVars, ordering, &grid->constraintOrdering);
682: PetscFree(ordering);
685: /* Calculate allocation for constraint matrix which transforms unconstrained fields to constrained and new fields:
687: / I 0 / v_Int = / v_Int
688: 0 C / v_Bd / v_New /
689: */
690: PetscMalloc(numLocVars * sizeof(int), &diagRows);
691: PetscMalloc(numLocVars * sizeof(int), &offdiagRows);
692: PetscMemzero(diagRows, numLocVars * sizeof(int));
693: PetscMemzero(offdiagRows, numLocVars * sizeof(int));
694: locColStart = grid->constraintOrder->firstVar[rank];
695: locColEnd = grid->constraintOrder->firstVar[rank+1];
696: for(node = 0; node < numNodes; node++) {
697: nclass = classes[node];
698: rowStartVar = offsets[node] - firstVar[rank];
699: nodeVars = classSizes[nclass];
701: /* All constrained nodes have negative markers */
702: if (markers[node] < 0) {
703: for(f = 0; f < numFields; f++) {
704: field = cm->fields[f];
705: if (fields[field].isConstrained == PETSC_TRUE) {
706: comp = fields[field].numComp + fields[field].constraintCompDiff;
707: (*ctx->ops->getindices)(ctx, grid->mesh, grid->constraintOrder, node, CONSTRAINT_COL_INDEX, &colStartVar);
708:
709: /* Check to see whether the variables fall within the diagonal block --
710: Notice we are overestimating as if every constrained variable
711: depends on all the new variables
712: */
713: if ((colStartVar + comp <= locColStart) || (colStartVar >= locColEnd)) {
714: for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
715: offdiagRows[rowStartVar] += comp;
716: } else if ((colStartVar >= locColStart) && (colStartVar + comp <= locColEnd)) {
717: for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
718: diagRows[rowStartVar] += comp;
719: #if 0
720: /* Allow cuts on a single node for rectangular matrices */
721: } else if (rectangular) {
722: if (colStartVar < locColStart) {
723: /* Cut is from below */
724: for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
725: {
726: diagRows[rowStartVar] += (colStartVar + comp) - locColStart;
727: offdiagRows[rowStartVar] += locColStart - colStartVar;
728: }
729: } else {
730: /* Cut is from above */
731: for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
732: {
733: diagRows[rowStartVar] += locColEnd - colStartVar;
734: offdiagRows[rowStartVar] += (colStartVar + comp) - locColEnd;
735: }
736: }
737: #endif
738: } else {
739: /* Row blocking cuts variables on a single node. This is bad partitioning. */
740: SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
741: }
742: } else if (fieldClasses[f][nclass]) {
743: /* Remember localStart[][] is -1 if the field is not on the node */
744: for(var = 0; var < fields[field].numComp; var++, rowStartVar++)
745: diagRows[rowStartVar] = 1;
746: }
747: }
748: } else {
749: /* Unconstrained nodes */
750: for(var = 0; var < nodeVars; var++)
751: diagRows[rowStartVar+var] = 1;
752: }
753: }
755: /* Create the constraint matrix */
756: if (grid->constraintMatrix != PETSC_NULL) {
757: MatDestroy(grid->constraintMatrix);
758: }
759: MatCreateMPIAIJ(grid->comm, numLocVars, grid->constraintOrder->numLocVars, numVars,
760: grid->constraintOrder->numVars, 0, diagRows, 0, offdiagRows, &grid->constraintMatrix);
761:
762: MatSetOption(grid->constraintMatrix, MAT_NEW_NONZERO_ALLOCATION_ERR);
764: /* Create the pseudo-inverse of the constraint matrix */
765: PetscOptionsHasName(PETSC_NULL, "-grid_const_inv", &opt);
766: if (opt == PETSC_TRUE) {
767: if (grid->constraintInverse != PETSC_NULL) {
768: MatDestroy(grid->constraintInverse);
769: }
770: MatCreateMPIAIJ(grid->comm, grid->constraintOrder->numLocVars, grid->constraintOrder->numLocVars,
771: grid->constraintOrder->numVars, grid->constraintOrder->numVars, 3, PETSC_NULL, 0, PETSC_NULL,
772: &grid->constraintInverse);
773:
774: MatSetOption(grid->constraintInverse, MAT_NEW_NONZERO_ALLOCATION_ERR);
775: }
777: /* Cleanup */
778: PetscFree(diagRows);
779: PetscFree(offdiagRows);
781: return(0);
782: }
784: int GridSetupBoundary_Triangular_2D(Grid grid)
785: {
786: Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
787: Partition p = grid->mesh->part;
788: Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
789: FieldClassMap map = grid->cm;
790: PetscConstraintObject constCtx = grid->constraintCtx;
791: int *markers = tri->markers;
792: int numBC = grid->numBC;
793: GridBC *gridBC = grid->bc;
794: int numFields = map->numFields;
795: int *fields = map->fields;
796: int numNodes = map->numNodes;
797: int numOverlapNodes = map->numOverlapNodes;
798: int numGhostNodes = map->numGhostNodes;
799: int numClasses = map->numClasses;
800: int **fieldClasses = map->fieldClasses;
801: int *classes = map->classes;
802: int *classSizes = map->classSizes;
803: int *localOffsets;
804: int numNewVars;
805: VarOrdering o;
806: LocalVarOrdering l;
807: /* Ghost variable communication */
808: int *ghostSendVars; /* Number of ghost variables on a given processor interior to this domain */
809: int *sumSendVars; /* Prefix sums of ghostSendVars */
810: int *ghostRecvVars; /* Number of ghost variables on a given processor */
811: int *sumRecvVars; /* Prefix sums of ghostRecvVars */
812: int *displs; /* Offsets into ghostRecvVars */
813: int numSendGhostVars; /* The number of ghost variable offsets to send to other processors */
814: int *sendGhostBuffer; /* Recv: Global node numbers Send: Offsets of these nodes */
815: int numProcs, rank;
816: int elemOffset;
817: int proc, f, field, bc, node, locNode, gNode, nclass, var;
818: int ierr;
821: grid->bdSetupCalled = PETSC_TRUE;
823: /* Destroy old orderings */
824: if (grid->bdOrder) {
825: VarOrderingDestroy(grid->bdOrder);
826: }
827: if (grid->bdLocOrder) {
828: LocalVarOrderingDestroy(grid->bdLocOrder);
829: }
831: /* Setup the boundary ordering */
832: PetscHeaderCreate(o, _VarOrdering, int, IS_COOKIE, 0, "VarOrdering", grid->comm, VarOrderingDestroy, 0);
833: PetscLogObjectCreate(o);
834: PetscObjectCompose((PetscObject) o, "ClassMap", (PetscObject) map);
836: /* Allocate memory */
837: MPI_Comm_size(grid->comm, &numProcs);
838: MPI_Comm_rank(grid->comm, &rank);
839: GridGetNumFields(grid, &o->numTotalFields);
840: PetscMalloc((numProcs+1) * sizeof(int), &o->firstVar);
841: PetscMalloc(numOverlapNodes * sizeof(int), &o->offsets);
842: PetscMalloc(o->numTotalFields * sizeof(int *), &o->localStart);
843: PetscLogObjectMemory(o, (numProcs+1 + numOverlapNodes + o->numTotalFields*numClasses) * sizeof(int) + o->numTotalFields*sizeof(int *));
844: PetscMemzero(o->localStart, o->numTotalFields * sizeof(int *));
845: o->numLocNewVars = 0;
846: o->numNewVars = 0;
848: /* Setup domain variable numbering */
849: o->offsets[0] = 0;
850: for(node = 0; node < numNodes-1; node++) {
851: if (markers[node] == 0) {
852: o->offsets[node+1] = o->offsets[node];
853: } else {
854: for(bc = 0; bc < numBC; bc++) {
855: if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == markers[node])) break;
856: }
857: if (bc == numBC) {
858: o->offsets[node+1] = o->offsets[node] + classSizes[classes[node]];
859: } else {
860: o->offsets[node+1] = o->offsets[node];
861: }
862: }
863: }
864: for(bc = 0; bc < numBC; bc++) {
865: if ((gridBC[bc].reduce == PETSC_TRUE) && (gridBC[bc].boundary == markers[numNodes-1])) break;
866: }
867: if (bc == numBC) {
868: o->numLocVars = o->offsets[numNodes-1] + classSizes[classes[numNodes-1]];
869: } else {
870: o->numLocVars = o->offsets[numNodes-1];
871: }
872: if (map->isConstrained == PETSC_TRUE) {
873: (*constCtx->ops->getsize)(constCtx, &o->numLocNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
874:
875: o->numLocVars += o->numLocNewVars;
876: }
877: MPI_Allgather(&o->numLocVars, 1, MPI_INT, &o->firstVar[1], 1, MPI_INT, o->comm);
878: o->firstVar[0] = 0;
879: for(proc = 1; proc <= numProcs; proc++)
880: o->firstVar[proc] += o->firstVar[proc-1];
881: o->numVars = o->firstVar[numProcs];
882: if (map->isConstrained == PETSC_TRUE) {
883: (*constCtx->ops->getsize)(constCtx, PETSC_NULL, &o->numNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
884:
885: MPI_Allreduce(&o->numLocNewVars, &numNewVars, 1, MPI_INT, MPI_SUM, o->comm);
886: if (o->numNewVars != numNewVars) SETERRQ(PETSC_ERR_PLIB, "Invalid partition of new variables");
887: }
889: /* Initialize the overlap */
890: o->numOverlapVars = o->numLocVars;
891: o->numOverlapNewVars = o->numLocNewVars;
893: if (numProcs > 1) {
894: /* Map local to global variable numbers */
895: for(node = 0; node < numNodes; node++)
896: o->offsets[node] += o->firstVar[rank];
898: /* Initialize communication */
899: PetscMalloc(numProcs * sizeof(int), &ghostSendVars);
900: PetscMalloc(numProcs * sizeof(int), &sumSendVars);
901: PetscMalloc(numProcs * sizeof(int), &ghostRecvVars);
902: PetscMalloc(numProcs * sizeof(int), &sumRecvVars);
903: PetscMalloc(numProcs * sizeof(int), &displs);
904: PetscMemzero(ghostSendVars, numProcs * sizeof(int));
905: PetscMemzero(sumSendVars, numProcs * sizeof(int));
906: PetscMemzero(ghostRecvVars, numProcs * sizeof(int));
907: PetscMemzero(sumRecvVars, numProcs * sizeof(int));
908: PetscMemzero(displs, numProcs * sizeof(int));
910: /* Get number of ghost variables to receive from each processor and size of blocks --
911: we here assume that classes[] already has ghost node classes in it */
912: for(node = 0; node < numGhostNodes; node++) {
913: gNode = q->ghostNodes[node];
914: proc = q->ghostNodeProcs[node];
915: nclass = classes[numNodes+node];
916: ghostRecvVars[proc]++;
917: o->numOverlapVars += classSizes[nclass];
918: }
920: /* Get number of constrained ghost variables to receive from each processor and size of blocks */
921: if (map->isConstrained == PETSC_TRUE) {
922: (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, &o->numOverlapNewVars, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL);
923:
924: }
925: o->numOverlapVars += o->numOverlapNewVars - o->numLocNewVars;
927: /* Get sizes of ghost variable blocks to send to each processor */
928: MPI_Alltoall(ghostRecvVars, 1, MPI_INT, ghostSendVars, 1, MPI_INT, o->comm);
930: /* Calculate offets into the ghost variable receive array */
931: for(proc = 1; proc < numProcs; proc++) {
932: sumRecvVars[proc] = sumRecvVars[proc-1] + ghostRecvVars[proc-1];
933: displs[proc] = sumRecvVars[proc];
934: }
936: /* Calculate offsets into the ghost variable send array */
937: for(proc = 1; proc < numProcs; proc++)
938: sumSendVars[proc] = sumSendVars[proc-1] + ghostSendVars[proc-1];
940: /* Send requests for ghost variable offsets to each processor */
941: numSendGhostVars = sumSendVars[numProcs-1] + ghostSendVars[numProcs-1];
942: PetscMalloc(numSendGhostVars * sizeof(int), &sendGhostBuffer);
943: for(node = 0; node < numGhostNodes; node++) {
944: gNode = q->ghostNodes[node];
945: proc = q->ghostNodeProcs[node];
946: o->offsets[numNodes+(displs[proc]++)] = gNode;
947: }
948: MPI_Alltoallv(&o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT,
949: sendGhostBuffer, ghostSendVars, sumSendVars, MPI_INT, o->comm);
950:
952: /* Send ghost variables offsets to each processor */
953: for(node = 0; node < numSendGhostVars; node++) {
954: #ifdef PETSC_USE_BOPT_g
955: if ((sendGhostBuffer[node] < q->firstNode[rank]) || (sendGhostBuffer[node] >= q->firstNode[rank+1])) {
956: SETERRQ3(PETSC_ERR_PLIB, "Invalid request for variable offset of local node %d should be in [%d,%d)",
957: sendGhostBuffer[node], q->firstNode[rank], q->firstNode[rank+1]);
958: }
959: #endif
960: locNode = sendGhostBuffer[node] - q->firstNode[rank];
961: sendGhostBuffer[node] = o->offsets[locNode];
962: }
963: MPI_Alltoallv(sendGhostBuffer, ghostSendVars, sumSendVars, MPI_INT,
964: &o->offsets[numNodes], ghostRecvVars, sumRecvVars, MPI_INT, o->comm);
965:
967: /* Cleanup */
968: PetscFree(ghostSendVars);
969: PetscFree(sumSendVars);
970: PetscFree(ghostRecvVars);
971: PetscFree(sumRecvVars);
972: PetscFree(displs);
973: PetscFree(sendGhostBuffer);
975: /* We maintain local offsets for ghost variables, meaning the offsets after the last
976: interior variable, rather than the offset of the given ghost variable in the global
977: matrix. */
978: PetscMalloc(numGhostNodes * sizeof(int), &o->localOffsets);
979: for(node = 0, var = o->numLocVars; node < numGhostNodes; node++) {
980: o->localOffsets[node] = var;
981: nclass = classes[numNodes+node];
982: var += classSizes[nclass];
983: }
984: }
986: /* Allocate memory */
987: PetscMalloc(numClasses * sizeof(int), &localOffsets);
988: PetscMemzero(localOffsets, numClasses * sizeof(int));
990: /* Setup local field offsets */
991: for(f = 0; f < numFields; f++) {
992: field = fields[f];
993: ierr = PetscMalloc(numClasses * sizeof(int), &o->localStart[field]);
994: for(nclass = 0; nclass < numClasses; nclass++) {
995: if (fieldClasses[f][nclass]) {
996: o->localStart[field][nclass] = localOffsets[nclass];
997: localOffsets[nclass] += grid->fields[field].disc->bdDisc->comp;
998: } else {
999: o->localStart[field][nclass] = -1;
1000: }
1001: }
1002: }
1003: grid->bdOrder = o;
1005: /* Cleanup */
1006: PetscFree(localOffsets);
1008: /* Setup the local boundary ordering */
1009: PetscHeaderCreate(l, _LocalVarOrdering, int, IS_COOKIE, 0, "LocalVarOrdering", grid->comm, LocalVarOrderingDestroy, 0);
1010: PetscLogObjectCreate(l);
1012: /* Allocate memory */
1013: l->numFields = numFields;
1014: PetscMalloc(numFields * sizeof(int), &l->fields);
1015: PetscMalloc(grid->numFields * sizeof(int), &l->elemStart);
1016: PetscLogObjectMemory(l, (numFields + grid->numFields) * sizeof(int));
1017: PetscMemcpy(l->fields, fields, numFields * sizeof(int));
1019: /* Put in sentinel values */
1020: for(f = 0; f < grid->numFields; f++) {
1021: l->elemStart[f] = -1;
1022: }
1024: /* Setup local and global offsets offsets with lower dimensional discretizations */
1025: for(f = 0, elemOffset = 0; f < numFields; f++) {
1026: field = fields[f];
1027: l->elemStart[field] = elemOffset;
1028: elemOffset += grid->fields[field].disc->bdDisc->size;
1029: }
1030: l->elemSize = elemOffset;
1031: grid->bdLocOrder = l;
1033: return(0);
1034: }
1036: static int GridReformMesh_Triangular_2D(Grid grid)
1037: {
1041: GridSetupBoundarySizes_Triangular_2D(grid);
1042: return(0);
1043: }
1045: static int GridGetBoundaryNext_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map,
1046: int *node, int *nclass)
1047: {
1051: do {
1052: MeshGetBoundaryNext(grid->mesh, boundary, ghost, node);
1053: }
1054: /* Note: I am using the boolean short circuit to avoid classes[] with node == -1 */
1055: while((*node != -1) && (map->fieldClasses[fieldIdx][map->classes[*node]] == 0));
1056: if (*node != -1)
1057: *nclass = map->classes[*node];
1058: else
1059: *nclass = -1;
1060: return(0);
1061: }
1063: static int GridGetBoundaryStart_Triangular_2D(Grid grid, int boundary, int fieldIdx, PetscTruth ghost, FieldClassMap map,
1064: int *node, int *nclass)
1065: {
1066: Mesh mesh = grid->mesh;
1067: Mesh_Triangular *tri = (Mesh_Triangular *) mesh->data;
1068: int b; /* Canonical boundary number */
1069: int ierr;
1072: /* Find canonical boundary number */
1073: for(b = 0; b < mesh->numBd; b++)
1074: if (tri->bdMarkers[b] == boundary) break;
1075: if (b == mesh->numBd) SETERRQ1(PETSC_ERR_ARG_WRONG, "Invalid boundary %d specified", boundary);
1076: if (mesh->activeBd != -1) SETERRQ(PETSC_ERR_ARG_WRONGSTATE, "Already iterating over a boundary");
1077: /* Find first boundary node of a class in the active field */
1078: mesh->activeBd = b;
1079: mesh->activeBdOld = b;
1080: mesh->activeBdNode = tri->bdBegin[b] - 1;
1081: GridGetBoundaryNext_Triangular_2D(grid, boundary, fieldIdx, ghost, map, node, nclass);
1082: return(0);
1083: }
1085: static int GridCreateRestriction_Triangular_2D(Grid dcf, Grid dcc, GMat *gmat) {
1086: SETERRQ(PETSC_ERR_SUP, " ");
1087: }
1089: static int GridEvaluateRhs_Triangular_2D(Grid grid, GVec x, GVec f, PetscObject ctx)
1090: {
1091: Mesh mesh = grid->mesh;
1092: Field *fields = grid->fields;
1093: int numNewFields = grid->numNewFields; /* The number of new fields added by constraints */
1094: GridFunc *rhsFuncs = grid->rhsFuncs; /* The Rhs PointFunctions */
1095: int numRhsOps = grid->numRhsOps; /* The number of Rhs operators */
1096: GridOp *rhsOps = grid->rhsOps; /* The operators on the Rhs */
1097: PetscTruth reduceSystem = grid->reduceSystem;
1098: PetscTruth reduceElement = grid->reduceElement;
1099: PetscTruth explicitConstraints = grid->explicitConstraints;
1100: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
1101: int numFields = grid->cm->numFields; /* The number of fields in the calculation */
1102: LocalVarOrdering locOrder = grid->locOrder; /* The default local variable ordering */
1103: int elemSize = locOrder->elemSize; /* The number of shape funcs in the elem mat */
1104: int *elemStart = locOrder->elemStart; /* The offset of each field in the elem mat */
1105: ElementVec vec = grid->vec; /* The element vector */
1106: PetscScalar *array = vec->array; /* The values in the element vector */
1107: Vec ghostVec = grid->ghostVec; /* The local solution vector */
1108: ElementVec elemGhostVec = grid->ghostElementVec; /* The element vector from ghostVec */
1109: PetscScalar *ghostArray = elemGhostVec->array; /* The values in elemGhostVec */
1110: ElementMat mat = grid->mat; /* A temporary element matrix */
1111: PetscScalar *matArray = mat->array; /* The values in the element matrix */
1112: MeshMover mover;
1113: Grid ALEGrid; /* The grid describing the mesh velocity */
1114: VarOrdering order; /* The default variable ordering */
1115: ElementVec MeshALEVec; /* ALE velocity vector from mesh */
1116: ElementVec ALEVec; /* ALE velocity vector */
1117: PetscScalar *ALEArray; /* The values in the ALE element vector */
1118: int computeFunc, computeLinear, computeNonlinear; /* Flags for selective computation */
1119: PetscScalar *nonlinearArgs[2];
1120: int newComp = 0;
1121: int numElements;
1122: int sField, tField, op, newField, elem, func, fieldIndex;
1123: #ifdef PETSC_USE_BOPT_g
1124: int var;
1125: PetscTruth opt;
1126: #endif
1127: int ierr;
1130: if (explicitConstraints == PETSC_TRUE) {
1131: order = grid->constraintOrder;
1132: } else {
1133: order = grid->order;
1134: }
1135: /* Handle selective computation */
1136: computeFunc = 1;
1137: if (grid->activeOpTypes[0] == PETSC_FALSE) computeFunc = 0;
1138: computeLinear = 1;
1139: if (grid->activeOpTypes[1] == PETSC_FALSE) computeLinear = 0;
1140: computeNonlinear = 1;
1141: if (grid->activeOpTypes[2] == PETSC_FALSE) computeNonlinear = 0;
1143: /* Fill the local solution vectors */
1144: if (x != PETSC_NULL) {
1145: GridGlobalToLocal(grid, INSERT_VALUES, x);
1146: }
1148: /* Setup ALE variables */
1149: if (grid->ALEActive == PETSC_TRUE) {
1150: MeshGetMover(mesh, &mover);
1151: MeshMoverGetVelocityGrid(mover, &ALEGrid);
1152: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1153: ElementVecDuplicate(grid->vec, &ALEVec);
1154: ALEArray = ALEVec->array;
1155: MeshALEVec = ALEGrid->vec;
1156: } else {
1157: ALEArray = PETSC_NULL;
1158: MeshALEVec = PETSC_NULL;
1159: }
1161: /* Loop over elements */
1162: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
1163: for(elem = 0; elem < numElements; elem++) {
1164: /* Initialize element vector */
1165: ElementVecZero(vec);
1166: vec->reduceSize = locOrder->elemSize;
1167: elemGhostVec->reduceSize = locOrder->elemSize;
1169: /* Setup local row indices for the ghost vector */
1170: GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1171: /* Setup local solution vector */
1172: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1173: /* Must transform to unconstrained variables for element integrals */
1174: GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);
1176: /* Setup ALE variables */
1177: if (grid->ALEActive == PETSC_TRUE) {
1178: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1179: GridLocalToElement(ALEGrid, MeshALEVec);
1180: }
1182: if (computeFunc) {
1183: for(func = 0; func < grid->numRhsFuncs; func++) {
1184: if (fields[rhsFuncs[func].field].isActive == PETSC_FALSE) continue;
1185: tField = rhsFuncs[func].field;
1186: DiscretizationEvaluateFunctionGalerkin(fields[tField].disc, mesh, *rhsFuncs[tField].func, rhsFuncs[tField].alpha, elem,
1187: &array[elemStart[tField]], ctx);
1188:
1189: }
1190: #ifdef PETSC_USE_BOPT_g
1191: #endif
1192: }
1194: for(op = 0; op < numRhsOps; op++) {
1195: if (fields[rhsOps[op].field].isActive == PETSC_FALSE) continue;
1196: if ((rhsOps[op].nonlinearOp != PETSC_NULL) && (computeNonlinear)) {
1197: tField = rhsOps[op].field;
1198: nonlinearArgs[0] = &ghostArray[elemStart[tField]];
1199: nonlinearArgs[1] = &ghostArray[elemStart[tField]];
1200: if (rhsOps[op].isALE) {
1201: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, tField, ALEVec);
1202: DiscretizationEvaluateNonlinearALEOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1203: rhsOps[op].alpha, elem, 2, nonlinearArgs,
1204: ALEArray, &array[elemStart[tField]], ctx);
1205:
1206: } else {
1207: DiscretizationEvaluateNonlinearOperatorGalerkin(fields[tField].disc, mesh, rhsOps[op].nonlinearOp,
1208: rhsOps[op].alpha, elem, 2, nonlinearArgs,
1209: &array[elemStart[tField]], ctx);
1210:
1211: }
1212: } else if (computeLinear) {
1213: sField = rhsOps[op].field;
1214: tField = fields[sField].disc->operators[rhsOps[op].op]->test->field;
1215: ElementMatZero(mat);
1216: if (rhsOps[op].isALE) {
1217: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1218: DiscretizationEvaluateALEOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1219: rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1220: &ghostArray[elemStart[sField]], ALEArray, array, matArray, ctx);
1221:
1222: } else {
1223: DiscretizationEvaluateOperatorGalerkinMF(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1224: rhsOps[op].op, rhsOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1225: &ghostArray[elemStart[sField]], array, matArray, ctx);
1226:
1227: }
1228: }
1229: #ifdef PETSC_USE_BOPT_g
1230: #endif
1231: }
1233: /* Setup global row indices, with reduction if necessary */
1234: GridCalcGeneralElementVecIndices(grid, elem, order, PETSC_NULL, PETSC_FALSE, vec);
1235: #ifdef PETSC_USE_BOPT_g
1236: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
1237: if (opt == PETSC_TRUE) {
1238: for(var = 0; var < vec->reduceSize; var++)
1239: PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1240: }
1241: #endif
1242: /* Put values in the global vector */
1243: ElementVecSetValues(vec, f, ADD_VALUES);
1244: }
1246: /* Cleanup ALE variables */
1247: if (grid->ALEActive == PETSC_TRUE) {
1248: ElementVecDestroy(ALEVec);
1249: }
1251: /* Evaluate self-interaction of new fields created by constraints */
1252: if (explicitConstraints == PETSC_TRUE) {
1253: /* WARNING: This only accomodates 1 constrained field */
1254: /* Get constraint information */
1255: for(fieldIndex = 0; fieldIndex < numFields; fieldIndex++) {
1256: sField = grid->cm->fields[fieldIndex];
1257: if (fields[sField].isConstrained == PETSC_TRUE) {
1258: newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1259: break;
1260: }
1261: }
1262: /* Calculate self-interaction */
1263: for(newField = 0; newField < numNewFields; newField++) {
1264: /* Initialize element vector */
1265: ElementVecZero(vec);
1266: vec->reduceSize = newComp;
1268: /* Calculate the indices and contribution to the element matrix from the new field */
1269: (*constCtx->ops->newelemvec)(constCtx, order, newField, vec);
1270: #ifdef PETSC_USE_BOPT_g
1271: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly_constrained", &opt);
1272: if (opt == PETSC_TRUE) {
1273: for(var = 0; var < vec->reduceSize; var++)
1274: PetscPrintf(grid->comm, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
1275: }
1276: #endif
1277: /* Put values in global matrix */
1278: ElementVecSetValues(vec, f, ADD_VALUES);
1279: #ifdef PETSC_USE_BOPT_g
1280: #endif
1281: }
1282: }
1284: /* Reset element vectors */
1285: vec->reduceSize = locOrder->elemSize;
1286: elemGhostVec->reduceSize = locOrder->elemSize;
1288: VecAssemblyBegin(f);
1289: VecAssemblyEnd(f);
1290: return(0);
1291: }
1293: static int GridEvaluateSystemMatrix_Triangular_2D(Grid grid, GVec x, GMat *J, GMat *M, MatStructure *flag, PetscObject ctx)
1294: {
1295: GMat A = *J; /* The working system matrix */
1296: Mesh mesh = grid->mesh;
1297: Field *fields = grid->fields;
1298: int numNewFields = grid->numNewFields; /* The number of new fields added by constraints */
1299: int numMatOps = grid->numMatOps; /* The number of operators in the matrix */
1300: GridOp *matOps = grid->matOps; /* The operators in the system matrix */
1301: VarOrdering constOrder = grid->constraintOrder; /* The constrained variable ordering */
1302: PetscTruth reduceSystem = grid->reduceSystem;
1303: PetscTruth reduceElement = grid->reduceElement;
1304: PetscTruth expConst = grid->explicitConstraints;
1305: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
1306: int numFields = grid->cm->numFields; /* The number of fields in the calculation */
1307: LocalVarOrdering locOrder = grid->locOrder; /* The default local variable ordering */
1308: int elemSize = locOrder->elemSize; /* The number of shape functions in the element matrix */
1309: int *elemStart = locOrder->elemStart; /* The offset of each field in the element matrix */
1310: ElementMat mat = grid->mat; /* The element matrix */
1311: PetscScalar *array = mat->array; /* The values in the element matrix */
1312: Vec ghostVec = grid->ghostVec; /* The local solution vector */
1313: ElementVec elemGhostVec = grid->ghostElementVec; /* The element vector from ghostVec */
1314: PetscScalar *ghostArray = elemGhostVec->array; /* The values in elemGhostVec */
1315: MeshMover mover;
1316: Grid ALEGrid; /* The grid describing the mesh velocity */
1317: VarOrdering order; /* The default variable ordering */
1318: ElementVec MeshALEVec; /* ALE velocity vector with mesh discretization */
1319: ElementVec ALEVec; /* ALE velocity vector */
1320: PetscScalar *ALEArray; /* The values in the ALE element vector */
1321: int newComp = 0;
1322: int numElements;
1323: int elem, f, sField, tField, op, newField;
1324: #ifdef PETSC_USE_BOPT_g
1325: PetscTruth opt;
1326: #endif
1327: int ierr;
1330: if (expConst == PETSC_TRUE) {
1331: order = grid->constraintOrder;
1332: } else {
1333: order = grid->order;
1334: }
1335: /* Fill the local solution vectors */
1336: if (x != PETSC_NULL) {
1337: GridGlobalToLocal(grid, INSERT_VALUES, x);
1338: }
1340: /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
1341: if (grid->ALEActive == PETSC_TRUE) {
1342: MeshGetMover(mesh, &mover);
1343: MeshMoverGetVelocityGrid(mover, &ALEGrid);
1344: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1345: ElementVecDuplicate(grid->vec, &ALEVec);
1346: ALEArray = ALEVec->array;
1347: MeshALEVec = ALEGrid->vec;
1348: } else {
1349: ALEArray = PETSC_NULL;
1350: MeshALEVec = PETSC_NULL;
1351: }
1353: /* Loop over elements */
1354: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
1355: for(elem = 0; elem < numElements; elem++) {
1356: /* Initialize element matrix */
1357: ElementMatZero(mat);
1358: mat->reduceRowSize = locOrder->elemSize;
1359: mat->reduceColSize = locOrder->elemSize;
1360: elemGhostVec->reduceSize = locOrder->elemSize;
1362: /* Setup local row indices for the ghost vector */
1363: GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1364: /* Setup local solution vector */
1365: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1366: /* Must transform to unconstrained variables for element integrals */
1367: GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);
1369: /* Setup ALE variables */
1370: if (grid->ALEActive == PETSC_TRUE) {
1371: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1372: GridLocalToElement(ALEGrid, MeshALEVec);
1373: }
1375: /* Calculate the contribution to the element matrix from each field */
1376: for(op = 0; op < numMatOps; op++) {
1377: sField = matOps[op].field;
1378: tField = fields[sField].disc->operators[matOps[op].op]->test->field;
1379: if (fields[sField].isActive) {
1380: if (matOps[op].isALE) {
1381: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1382: DiscretizationEvaluateALEOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1383: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1384: ALEArray, array, ctx);
1385:
1386: } else {
1387: DiscretizationEvaluateOperatorGalerkin(fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1388: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1389: array, ctx);
1390:
1391: }
1392: #ifdef PETSC_USE_BOPT_g
1393: #endif
1394: }
1395: }
1397: /* Setup global numbering, with reduction if necessary */
1398: GridCalcGeneralElementMatIndices(grid, elem, order, order, PETSC_FALSE, mat);
1399: #ifdef PETSC_USE_BOPT_g
1400: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
1401: if (opt == PETSC_TRUE) {
1402: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1403: }
1404: #endif
1405: /* Put values in the global matrix */
1406: ElementMatSetValues(mat, A, ADD_VALUES);
1407: }
1409: /* Evaluate self-interaction of new fields created by constraints */
1410: if (expConst == PETSC_TRUE) {
1411: /* WARNING: This only accomodates 1 constrained field */
1412: /* Get constraint information */
1413: for(f = 0; f < numFields; f++) {
1414: sField = grid->cm->fields[f];
1415: if (fields[sField].isConstrained == PETSC_TRUE) {
1416: newComp = fields[sField].numComp + fields[sField].constraintCompDiff;
1417: break;
1418: }
1419: }
1420: /* Calculate self-interaction */
1421: for(newField = 0; newField < numNewFields; newField++) {
1422: /* Initialize element matrix */
1423: ElementMatZero(mat);
1424: mat->reduceRowSize = newComp;
1425: mat->reduceColSize = newComp;
1427: /* Calculate the indices and contribution to the element matrix from the new field */
1428: (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
1429: #ifdef PETSC_USE_BOPT_g
1430: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly_constrained", &opt);
1431: if (opt == PETSC_TRUE) {
1432: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1433: }
1434: #endif
1435: /* Put values in global matrix */
1436: ElementMatSetValues(mat, A, ADD_VALUES);
1437: #ifdef PETSC_USE_BOPT_g
1438: #endif
1439: }
1440: }
1442: /* Assemble matrix */
1443: MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY);
1444: MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY);
1446: /* Reset element matrix and vector */
1447: mat->reduceRowSize = locOrder->elemSize;
1448: mat->reduceColSize = locOrder->elemSize;
1449: elemGhostVec->reduceSize = locOrder->elemSize;
1451: /* Cleanup */
1452: if (grid->ALEActive == PETSC_TRUE) {
1453: ElementVecDestroy(ALEVec);
1454: }
1456: GridResetConstrainedMultiply_Private(grid, A);
1457: return(0);
1458: }
1460: static struct _GridOps GOps = {GridSetUp_Triangular_2D,
1461: GridSetupBoundary_Triangular_2D,
1462: GridSetupConstraints_Triangular_2D,
1463: GridSetupGhostScatter_Triangular_2D,
1464: PETSC_NULL/* GridSetFromOptions */,
1465: PETSC_NULL/* GridDuplicate */,
1466: PETSC_NULL/* GridReform */,
1467: PETSC_NULL/* GridCopy */,
1468: GridDestroy_Triangular_2D,
1469: GridView_Triangular_2D,
1470: GridGetBoundaryStart_Triangular_2D,
1471: GridGetBoundaryNext_Triangular_2D,
1472: GridReformMesh_Triangular_2D,
1473: GridCreateGMat_Triangular_2D,
1474: GridCreateVarOrdering_Triangular_2D,
1475: GridCreateLocalVarOrdering_Triangular_2D,
1476: GridCreateVarScatter_Triangular_2D,
1477: GridVarOrderingConstrain_Triangular_2D,
1478: GridCalcElementVecIndices_Triangular_2D,
1479: GridCalcElementMatIndices_Triangular_2D,
1480: GridCalcBoundaryElementVecIndices_Triangular_2D,
1481: GridCalcBoundaryElementMatIndices_Triangular_2D,
1482: GridProjectElementVec_Triangular_2D,
1483: GVecGetLocalGVec_Triangular_2D,
1484: GVecRestoreLocalGVec_Triangular_2D,
1485: 0,/* GVecGetWorkGVec */
1486: 0,/* GVecRestoreWorkGVec */
1487: GVecGlobalToLocal_Triangular_2D,
1488: GVecLocalToGlobal_Triangular_2D,
1489: GVecView_Triangular_2D,
1490: GridCreateRestriction_Triangular_2D,
1491: GVecEvaluateFunction_Triangular_2D,
1492: GVecEvaluateFunctionBoundary_Triangular_2D,
1493: GVecEvaluateFunctionCollective_Triangular_2D,
1494: GVecEvaluateFunctionGalerkin_Triangular_2D,
1495: GVecEvaluateFunctionGalerkinCollective_Triangular_2D,
1496: GVecEvaluateBoundaryFunctionGalerkin_Triangular_2D,
1497: GVecEvaluateBoundaryFunctionGalerkinCollective_Triangular_2D,
1498: GVecEvaluateOperatorGalerkin_Triangular_2D,
1499: GVecEvaluateNonlinearOperatorGalerkin_Triangular_2D,
1500: GVecEvaluateSystemMatrix_Triangular_2D,
1501: GVecEvaluateSystemMatrixDiagonal_Triangular_2D,
1502: GMatView_Triangular_2D,
1503: GMatEvaluateOperatorGalerkin_Triangular_2D,
1504: GMatEvaluateALEOperatorGalerkin_Triangular_2D,
1505: GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D,
1506: GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D,
1507: GridEvaluateRhs_Triangular_2D,
1508: GridEvaluateSystemMatrix_Triangular_2D};
1510: EXTERN_C_BEGIN
1511: int GridCreate_Triangular_2D(Grid grid) {
1515: PetscMemcpy(grid->ops, &GOps, sizeof(struct _GridOps));
1516: /* General grid description */
1517: grid->dim = 2;
1518: grid->data = PETSC_NULL;
1519: return(0);
1520: }
1521: EXTERN_C_END