Actual source code: gmat2d.c
1: #ifdef PETSC_RCS_HEADER
2: static char vcid[] = "$Id: gmat2d.c,v 1.24 2000/01/31 17:34:32 knepley Exp $";
3: #endif
5: /* Implements FE matrices derived from 2d triangular grids */
6: #include "petscsles.h" /* For ALE Operators */
7: #include "src/gvec/gvecimpl.h" /*I "gvec.h" I*/
8: #include "src/mesh/impls/triangular/2d/2dimpl.h"
9: #include "src/grid/impls/triangular/2d/elemvec2d.h"
10: #include "gmat2d.h"
12: extern int GridResetConstrainedMultiply_Private(Grid, GMat);
14: static int PlaceVariables_Private(int startVar, int nodeVars, int locColStart, int locColEnd, int sStartVar, int sNodeVars,
15: PetscTruth rectangular, int *diagRows, int *offdiagRows)
16: {
17: int var;
20: if ((nodeVars == 0) || (sNodeVars == 0)) return(0);
21: /* Check to see whether the variables fall within the diagonal block */
22: if ((sStartVar + sNodeVars <= locColStart) || (sStartVar >= locColEnd)) {
23: for(var = 0; var < nodeVars; var++) {
24: offdiagRows[startVar+var] += sNodeVars;
25: }
26: } else if ((sStartVar >= locColStart) && (sStartVar + sNodeVars <= locColEnd)) {
27: for(var = 0; var < nodeVars; var++) {
28: diagRows[startVar+var] += sNodeVars;
29: }
30: } else if (rectangular) {
31: /* Allow cuts on a single node for rectangular matrices */
32: if (sStartVar < locColStart) {
33: /* Cut is from below */
34: for(var = 0; var < nodeVars; var++) {
35: diagRows[startVar+var] += (sStartVar + sNodeVars) - locColStart;
36: offdiagRows[startVar+var] += locColStart - sStartVar;
37: }
38: } else {
39: /* Cut is from above */
40: for(var = 0; var < nodeVars; var++) {
41: diagRows[startVar+var] += locColEnd - sStartVar;
42: offdiagRows[startVar+var] += (sStartVar + sNodeVars) - locColEnd;
43: }
44: }
45: } else {
46: /* Row blocking cuts variables on a single node. This is bad partitioning. */
47: SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
48: }
49: return(0);
50: }
52: int GridCreateGMat_Triangular_2D(Grid grid, VarOrdering sOrder, VarOrdering tOrder, PetscTruth bdCols, GMat *gmat)
53: {
54: MPI_Comm comm = grid->comm;
55: Mesh mesh = grid->mesh;
56: Mesh_Triangular *tri = (Mesh_Triangular *) mesh->data;
57: Partition p = mesh->part;
58: int locRowStart; /* The row that this partition starts on */
59: int locRowEnd; /* The row that the next partition starts on */
60: int locColStart; /* The column that this partition starts on */
61: int locColEnd; /* The column that the next partition starts on */
62: int newLocColStart;/* The column that the new variable domain starts on */
63: int newLocColEnd; /* The column after the new variable domain ends */
64: int *diagRows; /* Number of nonzeros in each diagonal portion */
65: int *offdiagRows; /* Number of nonzeros in each off-diagonal portion */
66: int nodeVars; /* Number of variables on node */
67: int newNodeVars; /* Number of new variables on node */
68: int sNodeVars; /* Number of variables on a node in the support of a given node */
69: int sNewNodeVars; /* Number of new variables on a node in the support of a given node */
70: int startVar; /* First variable on a node */
71: int newStartVar; /* First new variable on a node */
72: int sStartVar; /* First variable on a support node (global numbering) */
73: int sNewStartVar; /* First new variable on a support node (global numbering) */
74: int *nodeDone; /* A 1 indicates that the node has already been processed */
75: int *nodeNeighbors; /* A list of the nodes in the support of a given node */
76: int degree; /* The degree of a vertex */
77: int *support; /* A list of elements in the support of a basis function */
78: PetscTruth rectangular; /* Flag for a rectangular matrix */
79: int numGhostNodes; /* The number of nodes constrained by variables in another domain */
80: int numGhostVars; /* The number of new variables which lie in another domain */
81: int *ghostProcs; /* The processor for each ghost node */
82: int *ghostNodes; /* The global index for each ghost node */
83: int *ghostVarProcs; /* The processor for each ghost variable */
84: int *ghostVars; /* The global index for each ghost variables */
85: int newComp; /* The number of components in the new field */
86: int numOverlapElements = p->numOverlapElements;
87: int numCorners = mesh->numCorners;
88: int *elements = tri->faces;
89: int *markers = tri->markers;
90: PetscConstraintObject constCtx = grid->constraintCtx;
91: FieldClassMap rowMap, colMap;
92: int numNodes;
93: int *rowClasses, *colClasses;
94: int *rowClassSizes, *colClassSizes;
95: int *rowIsConst, *colIsConst;
96: int rowLocVars = tOrder->numLocVars;
97: int rowVars = tOrder->numVars;
98: int *rowFirstVar = tOrder->firstVar;
99: int *rowOffsets = tOrder->offsets;
100: int colLocVars = sOrder->numLocVars;
101: int colVars = sOrder->numVars;
102: int *colFirstVar = sOrder->firstVar;
103: int *colOffsets = sOrder->offsets;
104: int proc, elem, sElem, corner, sCorner, neighbor, node, sNode, nclass, sNclass, var, count;
105: PetscTruth opt;
106: int ierr;
109: VarOrderingGetClassMap(tOrder, &rowMap);
110: VarOrderingGetClassMap(sOrder, &colMap);
111: numNodes = rowMap->numNodes;
112: rowClasses = rowMap->classes;
113: rowClassSizes = rowMap->classSizes;
114: rowIsConst = rowMap->isClassConstrained;
115: colClasses = colMap->classes;
116: colClassSizes = colMap->classSizes;
117: colIsConst = colMap->isClassConstrained;
118: newLocColStart = -1;
119: newLocColEnd = -1;
120: /* Get partition information */
121: locRowStart = rowFirstVar[p->rank];
122: locRowEnd = rowFirstVar[p->rank+1];
123: locColStart = colFirstVar[p->rank];
124: locColEnd = colFirstVar[p->rank+1];
125: rectangular = (sOrder->numVars != tOrder->numVars) ? PETSC_TRUE : PETSC_FALSE;
126: /* Get new field information */
127: if (constCtx != PETSC_NULL) {
128: (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
129:
130: }
132: /* Preallocate possible nonzeros - Note that we are being pessimistic since we set
133: the whole dense element matrix, which we know contains some zeros for certain
134: operators */
135: PetscMalloc(numNodes * sizeof(int), &nodeDone);
136: PetscMalloc(mesh->maxDegree*numCorners * sizeof(int), &nodeNeighbors);
138: /* Get the number of ghost variables due to constraints */
139: numGhostNodes = 0;
140: numGhostVars = 0;
141: if ((grid->isConstrained == PETSC_TRUE) && (p->numProcs > 1)) {
142: PetscMemzero(nodeDone, numNodes * sizeof(int));
143: for(elem = 0; elem < numOverlapElements; elem++)
144: for(corner = 0; corner < numCorners; corner++) {
145: node = elements[elem*numCorners+corner];
146: if (node >= numNodes) continue;
147: if (nodeDone[node]) continue;
148: nodeDone[node] = 1;
150: nclass = rowClasses[node];
151: if (rowIsConst[nclass]) {
152: (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &startVar);
153: /* Include only new variables since only they can be ghosts */
154: nodeVars = newComp;
155: if ((startVar < locRowStart) || (startVar >= locRowEnd)) {
156: /* This is a constraint which generates an off-processor variable */
157: numGhostNodes++;
158: numGhostVars += nodeVars;
159: }
160: }
161: }
162: }
164: /* Calculate matrix allocation */
165: if (numGhostNodes > 0) {
166: PetscMalloc(numGhostNodes * sizeof(int), &ghostNodes);
167: PetscMalloc(numGhostNodes * sizeof(int), &ghostProcs);
168: PetscMalloc(numGhostVars * sizeof(int), &ghostVars);
169: PetscMalloc(numGhostVars * sizeof(int), &ghostVarProcs);
170: }
171: PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &diagRows);
172: PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &offdiagRows);
173: PetscMemzero(diagRows, (rowLocVars+numGhostVars) * sizeof(int));
174: PetscMemzero(offdiagRows, (rowLocVars+numGhostVars) * sizeof(int));
175: PetscMemzero(nodeDone, numNodes * sizeof(int));
176: for(elem = 0, numGhostNodes = 0, numGhostVars = 0; elem < numOverlapElements; elem++) {
177: for(corner = 0; corner < numCorners; corner++) {
178: node = elements[elem*numCorners+corner];
179: if (node >= numNodes)
180: continue;
181: if (nodeDone[node])
182: continue;
183: nodeDone[node] = 1;
185: nclass = rowClasses[node];
186: startVar = rowOffsets[node] - locRowStart;
187: nodeVars = rowClassSizes[nclass];
188: newNodeVars = 0;
189: if (rowIsConst[nclass]) {
190: (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &newStartVar);
191: /* Include only new variables */
192: newNodeVars = newComp;
193: if ((newStartVar < locRowStart) || (newStartVar >= locRowEnd)) {
194: /* This is a constraint which generates an off-processor variable */
195: ghostNodes[numGhostNodes] = newStartVar;
196: for(proc = 0; newStartVar >= rowFirstVar[proc+1]; proc++) ;
197: ghostProcs[numGhostNodes] = proc;
198: for(var = 0; var < newComp; var++, numGhostVars++) {
199: ghostVars[numGhostVars] = newStartVar + var;
200: ghostVarProcs[numGhostVars] = proc;
201: }
202: numGhostNodes++;
203: /* Set partition for the appropriate processor */
204: newLocColStart = colFirstVar[proc];
205: newLocColEnd = colFirstVar[proc+1];
206: /* Reset newStartVar to the correct position in diagRows */
207: newStartVar = rowLocVars + (numGhostVars - newComp);
208: } else {
209: newLocColStart = locColStart;
210: newLocColEnd = locColEnd;
211: /* Reset newStartVar to the correct position in diagRows */
212: newStartVar -= locRowStart;
213: }
214: }
215: if (nodeVars+newNodeVars == 0) continue;
217: /* Loop over nodes on each element in the support of the node */
218: MeshGetNodeSupport(mesh, node, elem, °ree, &support);
219: for(sElem = 0, count = 0; sElem < degree; sElem++) {
220: for(sCorner = 0; sCorner < numCorners; sCorner++) {
221: /* Disregard normal columns if we are forming a boundary matrix */
222: sNode = elements[support[sElem]*numCorners+sCorner];
223: if ((bdCols == PETSC_TRUE) && (markers[sNode] == 0)) continue;
224: sNclass = colClasses[sNode];
225: sStartVar = colOffsets[sNode];
226: sNodeVars = colClassSizes[sNclass];
227: sNewNodeVars = 0;
229: if (colIsConst[sNclass]) {
230: (*constCtx->ops->getindices)(constCtx, mesh, sOrder, sNode, CONSTRAINT_COL_INDEX, &sNewStartVar);
231:
232: sNewNodeVars = newComp;
233: }
235: /* Check for duplicate node */
236: for(neighbor = 0; neighbor < count; neighbor++) {
237: if (nodeNeighbors[neighbor] == sNode) break;
238: }
239: if (neighbor < count) {
240: continue;
241: } else {
242: #ifdef PETSC_USE_BOPT_g
243: if (count >= mesh->maxDegree*numCorners) {
244: SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Too many neighboring nodes: %d", count);
245: }
246: #endif
247: nodeNeighbors[count++] = sNode;
248: }
250: PlaceVariables_Private(startVar, nodeVars, locColStart, locColEnd, sStartVar, sNodeVars,
251: rectangular, diagRows, offdiagRows);
252:
253: PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sStartVar, sNodeVars,
254: rectangular, diagRows, offdiagRows);
255:
256: PlaceVariables_Private(startVar, nodeVars, locColStart, locColEnd, sNewStartVar, sNewNodeVars,
257: rectangular, diagRows, offdiagRows);
258:
259: PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sNewStartVar, sNewNodeVars,
260: rectangular, diagRows, offdiagRows);
261:
262: #ifdef PETSC_USE_BOPT_g
263: if ((p->numProcs == 1) && (offdiagRows[startVar] > 0)) {
264: for(proc = 0; proc <= p->numProcs; proc++)
265: PetscPrintf(PETSC_COMM_SELF, "colFirstVar[%d]: %dn", proc, colFirstVar[proc]);
266: for(node = 0; node < colMap->numNodes; node++)
267: PetscPrintf(PETSC_COMM_SELF, "colOffsets[%d]: %dn", node, colOffsets[node]);
268: PetscPrintf(PETSC_COMM_SELF, "sNode %d sStartVar %d in [%d,%d)n", sNode, sStartVar, locColStart, locColEnd);
269: SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, startVar);
270: }
271: if ((p->numProcs == 1) && (rowIsConst[nclass]) && (offdiagRows[newStartVar] > 0)) {
272: SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, newStartVar);
273: }
274: #endif
275: }
276: }
277: MeshRestoreNodeSupport(mesh, node, elem, °ree, &support);
278: }
279: }
281: #ifdef PETSC_USE_BOPT_g
282: /* Check that we looked at every node */
283: for(node = 0; node < numNodes; node++){
284: if (!nodeDone[node]) SETERRQ1(PETSC_ERR_PLIB, "Node %d was not encountered", node);
285: }
286: #endif
287: PetscOptionsHasName(PETSC_NULL, "-trace_alloc", &opt);
288: if (opt == PETSC_TRUE) {
289: for(var = 0; var < rowLocVars; var++) {
290: PetscSynchronizedPrintf(comm, "diagRows[%d]: %d offdiagRows[%d]: %dn",
291: var + rowFirstVar[p->rank], diagRows[var], var + rowFirstVar[p->rank], offdiagRows[var]);
292: }
293: PetscSynchronizedFlush(comm);
294: }
295: #ifdef PETSC_USE_BOPT_g
296: #endif
298: /* Communicate */
299: if ((grid->isConstrained == PETSC_TRUE) && (p->numProcs > 1)) {
300: PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
301: ADD_VALUES, SCATTER_REVERSE, diagRows, &diagRows[rowLocVars]);
302:
303: PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
304: ADD_VALUES, SCATTER_REVERSE, offdiagRows, &offdiagRows[rowLocVars]);
305:
306: }
308: /* Stopgap solution for constrained variables */
309: if (grid->isConstrained == PETSC_TRUE) {
310: for(var = 0; var < rowLocVars; var++) {
311: if (diagRows[var] > colLocVars) diagRows[var] = colLocVars;
312: if (offdiagRows[var] > colLocVars) offdiagRows[var] = colLocVars;
313: }
314: }
316: /* Create the matrix */
317: MatCreateMPIAIJ(comm, rowLocVars, colLocVars, rowVars, colVars, 0, diagRows, 0, offdiagRows, gmat);
318: PetscObjectCompose((PetscObject) *gmat, "Grid", (PetscObject) grid);
319: MatSetOption(*gmat, MAT_NEW_NONZERO_ALLOCATION_ERR);
321: /* Cleanup */
322: ierr = PetscFree(diagRows);
323: ierr = PetscFree(offdiagRows);
324: ierr = PetscFree(nodeDone);
325: ierr = PetscFree(nodeNeighbors);
326: if (numGhostNodes > 0) {
327: PetscFree(ghostNodes);
328: PetscFree(ghostProcs);
329: PetscFree(ghostVars);
330: PetscFree(ghostVarProcs);
331: }
333: return(0);
334: }
336: int GMatView_Draw_Triangular_2D(GMat gmat, PetscViewer v)
337: {
341: MatView(gmat, v);
342: PetscFunctionReturn(ierr);
343: }
345: int GMatView_Triangular_2D(GMat gmat, PetscViewer viewer)
346: {
347: Grid grid;
348: PetscTruth isascii, isdraw;
349: int ierr;
352: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
353: PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW, &isdraw);
354: if (isascii == PETSC_TRUE) {
355: GMatGetGrid(gmat, &grid);
356: GridView(grid, viewer);
357: PetscViewerFlush(viewer);
358: MatView(gmat, viewer);
359: } else if (isdraw == PETSC_TRUE) {
360: GMatView_Draw_Triangular_2D(gmat, viewer);
361: }
363: return(0);
364: }
366: int GMatEvaluateALEOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
367: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
368: LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
369: void *ctx)
370: {
371: Mesh mesh = grid->mesh;
372: int numElements = mesh->numFaces;
373: int sElemSize = sLocOrder->elemSize;
374: int tElemSize = tLocOrder->elemSize;
375: int *sElemStart = sLocOrder->elemStart;
376: int *tElemStart = tLocOrder->elemStart;
377: ElementVec ghostVec = grid->ghostElementVec; /* Local solution vector */
378: PetscScalar *ghostArray = ghostVec->array; /* The values in the ghost element vector */
379: MeshMover mover;
380: Grid ALEGrid; /* The grid describing the mesh velocity */
381: ElementMat mat; /* The element matrix */
382: PetscScalar *array; /* The values in the element matrix */
383: ElementVec MeshALEVec; /* The ALE velocity vector with mesh discretization */
384: ElementVec ALEVec; /* The ALE velocity vector */
385: PetscScalar *ALEArray; /* The values in the ALE element vector */
386: int sField, tField;
387: int f, elem;
388: #ifdef PETSC_USE_BOPT_g
389: int i, j;
390: PetscTruth opt;
391: #endif
392: int ierr;
395: MeshGetMover(mesh, &mover);
396: MeshMoverGetVelocityGrid(mover, &ALEGrid);
397: /* Setup element matrix */
398: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
399: array = mat->array;
401: /* Setup ALE variables */
402: if (grid->ALEActive == PETSC_TRUE) {
403: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
404: MeshALEVec = ALEGrid->vec;
405: ALEVec = grid->vec;
406: ALEArray = ALEVec->array;
407: } else {
408: MeshALEVec = PETSC_NULL;
409: ALEVec = PETSC_NULL;
410: ALEArray = PETSC_NULL;
411: }
413: /* Setup the operator with information about the test function space */
414: for(f = 0; f < numFields; f++) {
415: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
416: }
418: for(elem = 0; elem < numElements; elem++)
419: {
420: /* Initialize element matrix */
421: ElementMatZero(mat);
423: /* Setup global row and column indices */
424: GridCalcLocalElementVecIndices(grid, elem, ghostVec);
426: /* Setup local solution vector */
427: GridLocalToElement(grid, ghostVec);
429: /* Setup ALE variables */
430: if (grid->ALEActive == PETSC_TRUE)
431: {
432: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
433: GridLocalToElement(ALEGrid, MeshALEVec);
434: }
436: for(f = 0; f < numFields; f++)
437: {
438: sField = sFields[f];
439: tField = tFields[f];
440: /* Calculate the contribution to the element matrix from the field */
441: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
442: DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
443: op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
444:
445: #ifdef PETSC_USE_BOPT_g
446: #endif
447: }
449: /* Setup global row and column indices */
450: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
451: #ifdef PETSC_USE_BOPT_g
452: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
453: if (opt == PETSC_TRUE) {
454: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
455: for(i = 1; i < mat->reduceColSize; i++)
456: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
457: PetscPrintf(grid->comm, "n");
458: for(i = 0; i < mat->reduceRowSize; i++)
459: {
460: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
461: for(j = 0; j < mat->reduceColSize; j++)
462: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
463: PetscPrintf(grid->comm, "n");
464: }
465: }
466: #endif
467: /* Put values in global matrix */
468: ElementMatSetValues(mat, M, ADD_VALUES);
469: }
470: MatAssemblyBegin(M, type);
471: MatAssemblyEnd(M, type);
473: /* Cleanup */
474: ElementMatDestroy(mat);
476: /* Reset size functions */
477: GridResetConstrainedMultiply_Private(grid, M);
478: return(0);
479: }
481: int GMatEvaluateOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
482: VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
483: MatAssemblyType type, void *ctx)
484: {
485: Mesh mesh = grid->mesh;
486: PetscTruth reduceSystem = grid->reduceSystem;
487: PetscTruth reduceElement = grid->reduceElement;
488: int sElemSize = sLocOrder->elemSize;
489: int tElemSize = tLocOrder->elemSize;
490: int *sElemStart = sLocOrder->elemStart;
491: int *tElemStart = tLocOrder->elemStart;
492: FieldClassMap sMap, tMap;
493: int numSFields, numTFields;
494: int *sFields, *tFields;
495: PetscTruth sConstrained, tConstrained;
496: Vec ghostVec; /* The local ghost vector for x (usually the solution) */
497: VecScatter ghostScatter; /* The scatter from x to ghostVec */
498: ElementMat mat;
499: ElementVec elemGhostVec;
500: PetscScalar *ghostArray, *array;
501: int numElements;
502: int sField, tField;
503: int f, elem;
504: #ifdef PETSC_USE_BOPT_g
505: PetscTruth opt;
506: #endif
507: int ierr;
510: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
511: VarOrderingGetClassMap(sOrder, &sMap);
512: VarOrderingGetClassMap(tOrder, &tMap);
513: numSFields = sMap->numFields;
514: sFields = sMap->fields;
515: sConstrained = sMap->isConstrained;
516: numTFields = tMap->numFields;
517: tFields = tMap->fields;
518: tConstrained = tMap->isConstrained;
519: /* Setup reduction */
520: (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
521: /* Setup element vector and matrix */
522: if (tConstrained == PETSC_TRUE) {
523: for(f = 0; f < numTFields; f++) {
524: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
525: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
526: }
527: }
528: if (sConstrained == PETSC_TRUE) {
529: for(f = 0; f < numSFields; f++) {
530: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
531: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
532: }
533: }
534: ierr = ElementVecCreate(grid->comm, tElemSize, &elemGhostVec);
535: ghostArray = elemGhostVec->array;
536: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
537: array = mat->array;
538: ierr = ElementVecZero(elemGhostVec);
540: /* Fill the local solution vectors */
541: if (x != PETSC_NULL) {
542: GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
543: }
545: /* Setup the operator with information about the test function space */
546: for(f = 0; f < numSFields; f++) {
547: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
548: }
550: for(elem = 0; elem < numElements; elem++) {
551: /* Initialize element matrix */
552: ElementMatZero(mat);
553: mat->reduceRowSize = tLocOrder->elemSize;
554: mat->reduceColSize = sLocOrder->elemSize;
555: elemGhostVec->reduceSize = tLocOrder->elemSize;
557: if (x != PETSC_NULL) {
558: /* Setup local row indices for the ghost vector */
559: GridCalcGeneralElementVecIndices(grid, elem, tOrder, PETSC_NULL, PETSC_TRUE, elemGhostVec);
560: /* Setup local solution vector */
561: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
562: /* Must transform to unconstrained variables for element integrals */
563: GridProjectElementVec(grid, mesh, elem, tOrder, PETSC_FALSE, elemGhostVec);
564: }
565: for(f = 0; f < numSFields; f++) {
566: sField = sFields[f];
567: tField = tFields[f];
568: /* Calculate the contribution to the element matrix from the field */
569: DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
570: op, alpha, elem, &ghostArray[sElemStart[sField]], array, ctx);
571:
572: #ifdef PETSC_USE_BOPT_g
573: #endif
574: }
576: /* Setup global row and column indices */
577: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
578: #ifdef PETSC_USE_BOPT_g
579: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
580: if (opt == PETSC_TRUE) {
581: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
582: }
583: #endif
584: /* Put values in global matrix */
585: ElementMatSetValues(mat, M, ADD_VALUES);
586: }
588: MatAssemblyBegin(M, type);
589: MatAssemblyEnd(M, type);
591: /* Cleanup */
592: VecDestroy(ghostVec);
593: VecScatterDestroy(ghostScatter);
594: ElementVecDestroy(elemGhostVec);
595: ElementMatDestroy(mat);
597: return(0);
598: }
600: int GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
601: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
602: LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
603: void *ctx)
604: {
605: Mesh mesh = grid->mesh;
606: int numElements = mesh->numFaces;
607: int sElemSize = sLocOrder->elemSize;
608: int tElemSize = tLocOrder->elemSize;
609: int *sElemStart = sLocOrder->elemStart;
610: int *tElemStart = tLocOrder->elemStart;
611: ElementVec ghostVec = grid->ghostElementVec; /* Local solution vector */
612: PetscScalar *ghostArray = ghostVec->array; /* The values in the ghost element vector */
613: MeshMover mover;
614: Grid ALEGrid; /* The grid describing the mesh velocity */
615: ElementMat mat; /* The element matrix */
616: PetscScalar *array; /* The values in the element matrix */
617: ElementVec MeshALEVec; /* The ALE velocity vector with mesh discretization */
618: ElementVec ALEVec; /* The ALE velocity vector */
619: PetscScalar *ALEArray; /* The values in the ALE element vector */
620: int sField, tField;
621: int f, elem;
622: #ifdef PETSC_USE_BOPT_g
623: PetscTruth opt;
624: int i, j;
625: #endif
626: int ierr;
629: MeshGetMover(mesh, &mover);
630: MeshMoverGetVelocityGrid(mover, &ALEGrid);
631: /* Setup element matrix */
632: for(f = 0; f < numFields; f++) {
633: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
634: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
635: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
636: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
637: }
638: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
639: array = mat->array;
641: /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
642: if (grid->ALEActive == PETSC_TRUE) {
643: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
644: MeshALEVec = ALEGrid->vec;
645: ALEVec = grid->vec;
646: ALEArray = ALEVec->array;
647: } else {
648: MeshALEVec = PETSC_NULL;
649: ALEVec = PETSC_NULL;
650: ALEArray = PETSC_NULL;
651: }
653: /* Setup the operator with information about the test function space */
654: for(f = 0; f < numFields; f++) {
655: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
656: }
658: for(elem = 0; elem < numElements; elem++)
659: {
660: /* Initialize element matrix */
661: ElementMatZero(mat);
662: mat->reduceRowSize = tLocOrder->elemSize;
663: mat->reduceColSize = sLocOrder->elemSize;
665: /* Setup global row and column indices */
666: GridCalcLocalElementVecIndices(grid, elem, ghostVec);
668: /* Setup local solution vector */
669: GridLocalToElement(grid, ghostVec);
671: /* Setup ALE variables */
672: if (grid->ALEActive == PETSC_TRUE) {
673: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
674: GridLocalToElement(ALEGrid, MeshALEVec);
675: }
677: for(f = 0; f < numFields; f++)
678: {
679: sField = sFields[f];
680: tField = tFields[f];
681: /* Calculate the contribution to the element matrix from the field */
682: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
683: DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
684: op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
685:
686: #ifdef PETSC_USE_BOPT_g
687: #endif
688: }
690: /* Setup global row and column indices */
691: GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
692: #ifdef PETSC_USE_BOPT_g
693: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
694: if (opt == PETSC_TRUE) {
695: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
696: for(i = 1; i < mat->reduceColSize; i++)
697: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
698: PetscPrintf(grid->comm, "n");
699: for(i = 0; i < mat->reduceRowSize; i++) {
700: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
701: for(j = 0; j < mat->reduceColSize; j++)
702: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
703: PetscPrintf(grid->comm, "n");
704: }
705: }
706: #endif
707: /* Put values in global matrix */
708: ElementMatSetValues(mat, M, ADD_VALUES);
709: }
711: MatAssemblyBegin(M, type);
712: MatAssemblyEnd(M, type);
714: /* Cleanup */
715: ElementMatDestroy(mat);
717: /* Reset size functions */
718: GridResetConstrainedMultiply_Private(grid, M);
719: return(0);
720: }
722: int GMatEvaluateNewFields_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
723: LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
724: LocalVarOrdering tLocOrder, PetscScalar alpha, MatAssemblyType type, void *ctx)
725: {
726: VarOrdering constOrder = grid->constraintOrder; /* The constrained variable ordering */
727: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
728: int sElemSize = 0;
729: int tElemSize = 0;
730: ElementMat mat; /* The element matrix */
731: int f, newField;
732: #ifdef PETSC_USE_BOPT_g
733: int i, j;
734: PetscTruth opt;
735: #endif
736: int ierr;
739: /* Setup element matrix */
740: for(f = 0; f < numFields; f++) {
741: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
742: sElemSize += grid->fields[sFields[f]].disc->comp + grid->fields[sFields[f]].constraintCompDiff;
743: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
744: tElemSize += grid->fields[tFields[f]].disc->comp + grid->fields[tFields[f]].constraintCompDiff;
745: }
746: ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
748: for(newField = 0; newField < grid->numNewFields; newField++) {
749: /* Initialize element matrix */
750: ElementMatZero(mat);
752: /* Calculate the indices and contribution to the element matrix from the new field */
753: (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
754: #ifdef PETSC_USE_BOPT_g
755: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
756: if (opt == PETSC_TRUE) {
757: PetscPrintf(grid->comm, " %3d", mat->colIndices[0]);
758: for(i = 1; i < mat->reduceColSize; i++)
759: PetscPrintf(grid->comm, " %3d", mat->colIndices[i]);
760: PetscPrintf(grid->comm, "n");
761: for(i = 0; i < mat->reduceRowSize; i++)
762: {
763: PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
764: for(j = 0; j < mat->reduceColSize; j++)
765: PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
766: PetscPrintf(grid->comm, "n");
767: }
768: }
769: #endif
770: /* Put values in global matrix */
771: ElementMatSetValues(mat, M, ADD_VALUES);
772: #ifdef PETSC_USE_BOPT_g
773: #endif
774: }
776: MatAssemblyBegin(M, type);
777: MatAssemblyEnd(M, type);
779: /* Cleanup */
780: ElementMatDestroy(mat);
782: GridResetConstrainedMultiply_Private(grid, M);
783: return(0);
784: }
786: int GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
787: VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
788: MatAssemblyType type, void *ctx)
789: {
790: Mesh mesh = grid->mesh;
791: Mesh_Triangular *tri = (Mesh_Triangular *) mesh->data;
792: Partition p = mesh->part;
793: Partition_Triangular_2D *q = (Partition_Triangular_2D *) p->data;
794: PetscTruth reduceSystem = grid->reduceSystem;
795: PetscTruth reduceElement = grid->reduceElement;
796: int sElemSize = sLocOrder->elemSize;
797: int tElemSize = tLocOrder->elemSize;
798: int *sElemStart = sLocOrder->elemStart;
799: int *tElemStart = tLocOrder->elemStart;
800: int numEdges = mesh->numEdges;
801: int *bdEdges = tri->bdEdges;
802: int *firstEdge = q->firstEdge;
803: int rank = p->rank;
804: FieldClassMap sMap, tMap;
805: int numSFields, numTFields;
806: int *sFields, *tFields;
807: PetscTruth sConstrained, tConstrained;
808: Vec ghostVec; /* The local ghost vector for x (usually the solution) */
809: VecScatter ghostScatter; /* The scatter from x to ghostVec */
810: ElementMat mat;
811: ElementVec elemGhostVec;
812: PetscScalar *array;
813: EdgeContext bdCtx; /* A context wrapper to communicate the midnode of an edge */
814: int sField, tField;
815: int f, bd, edge, bdEdge, midNode;
816: #ifdef PETSC_USE_BOPT_g
817: PetscTruth opt;
818: #endif
819: int ierr;
822: VarOrderingGetClassMap(sOrder, &sMap);
823: VarOrderingGetClassMap(tOrder, &tMap);
824: numSFields = sMap->numFields;
825: sFields = sMap->fields;
826: sConstrained = sMap->isConstrained;
827: numTFields = tMap->numFields;
828: tFields = tMap->fields;
829: tConstrained = tMap->isConstrained;
830: /* Setup reduction */
831: (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
832: /* Setup element vector and matrix */
833: if (tConstrained == PETSC_TRUE) {
834: for(f = 0; f < numTFields; f++) {
835: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
836: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
837: }
838: }
839: if (sConstrained == PETSC_TRUE) {
840: for(f = 0; f < numSFields; f++) {
841: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
842: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
843: }
844: }
845: ElementVecCreate(grid->comm, tElemSize, &elemGhostVec);
846: ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
847: ElementVecZero(elemGhostVec);
848: array = mat->array;
850: /* Setup user context */
851: bdCtx.ctx = ctx;
853: /* Fill the local solution vectors */
854: if (x != PETSC_NULL) {
855: GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
856: }
858: /* Setup the operator with information about the test function space */
859: for(f = 0; f < numSFields; f++) {
860: grid->fields[sFields[f]].disc->bdDisc->operators[op]->test = grid->fields[tFields[f]].disc;
861: }
863: /* Our problem here is that "edges" are not data structures like "elements". The element
864: holds the midnodes which appear on it, but edges do not. Thus we must pass the midnode
865: number to the discretization, which we do using a context wrapper. Unfortunately, the
866: row indices were derived from element, so we must introduce another numbering function
867: which operates on nodes alone. The midnode number is found by a search of the elements
868: which could certainly be improved with geometric hints. We might also assume that it
869: is the node lying between the two endpoints in the bdNodes[] array. In addition, the
870: boundary variable ordering is in relation to boundary node numbers, so that the node
871: number must be converted before calling the numbering function. This could be speeded up
872: by placing boundary node numbers in the bdEdges[] array instead. */
874: /* Loop over boundary edges */
875: for(bd = 0, bdEdge = 0; bd < grid->numBd; bd++) {
876: for(bdEdge = tri->bdEdgeBegin[bd]; bdEdge < tri->bdEdgeBegin[bd+1]; bdEdge++) {
877: /* Check that edge is on this processor */
878: edge = bdEdges[bdEdge] - firstEdge[rank];
879: if ((edge < 0) || (edge > numEdges)) continue;
881: MeshGetMidnodeFromEdge(mesh, edge, &midNode);
882: bdCtx.midnode = midNode;
884: /* Initialize element matrix */
885: ElementMatZero(mat);
886: mat->reduceRowSize = tLocOrder->elemSize;
887: mat->reduceColSize = sLocOrder->elemSize;
888: elemGhostVec->reduceSize = tLocOrder->elemSize;
890: if (x != PETSC_NULL) {
891: /* Setup local row indices for the ghost vector */
892: GridCalcBoundaryElementVecIndices(grid, bd, edge, midNode, tOrder, PETSC_TRUE, elemGhostVec);
893: /* Setup local solution vector */
894: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
895: /* Must transform to unconstrained variables for element integrals */
896: GridProjectElementVec(grid, mesh, edge, tOrder, PETSC_FALSE, elemGhostVec);
897: SETERRQ(PETSC_ERR_SUP, "Being reworked");
898: }
899: for(f = 0; f < numSFields; f++) {
900: sField = sFields[f];
901: tField = tFields[f];
902: /* Calculate the contribution to the element matrix from the field */
903: DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc->bdDisc, mesh, sElemSize, tElemStart[tField],
904: sElemStart[sField], op, alpha, edge, PETSC_NULL, array, &bdCtx);
905:
906: #ifdef PETSC_USE_BOPT_g
907: #endif
908: }
910: /* Setup global row and column indices */
911: GridCalcBoundaryElementMatIndices(grid, bd, edge, midNode, sOrder, tOrder, PETSC_FALSE, mat);
912: #ifdef PETSC_USE_BOPT_g
913: PetscOptionsHasName(PETSC_NULL, "-trace_mat_bd_assembly", &opt);
914: if (opt == PETSC_TRUE) {
915: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
916: }
917: #endif
918: /* Put values in the global matrix */
919: ElementMatSetValues(mat, M, ADD_VALUES);
920: }
921: }
922: #ifdef PETSC_USE_BOPT_g
923: if (bdEdge != mesh->numBdEdges) SETERRQ(PETSC_ERR_PLIB, "Invalid boundary edge numbering");
924: #endif
926: MatAssemblyBegin(M, type);
927: MatAssemblyEnd(M, type);
929: /* Cleanup */
930: VecDestroy(ghostVec);
931: VecScatterDestroy(ghostScatter);
932: ElementVecDestroy(elemGhostVec);
933: ElementMatDestroy(mat);
935: return(0);
936: }