Actual source code: gvec2d.c
1: #ifdef PETSC_RCS_HEADER
2: static char vcid[] = "$Id: gvec2d.c,v 1.22 2000/10/08 00:27:05 knepley Exp $";
3: #endif
5: /* Implements FE vectors derived from 2d triangular grids */
6: #include "petscsles.h" /* For ALE Operators */
7: #include "src/gvec/gvecimpl.h" /*I "gvec.h" I*/
8: #include "src/mesh/impls/triangular/triimpl.h"
9: #include "gvec2d.h"
11: #undef __FUNCT__
13: int GVecGetLocalGVec_Triangular_2D(GVec g, GVec *gvec) {
14: SETERRQ(PETSC_ERR_SUP, " ");
15: }
17: #undef __FUNCT__
19: int GVecRestoreLocalGVec_Triangular_2D(GVec g, GVec *gvec) {
20: SETERRQ(PETSC_ERR_SUP, " ");
21: }
23: #undef __FUNCT__
25: int GVecGlobalToLocal_Triangular_2D(GVec g, InsertMode mode, GVec l) {
26: SETERRQ(PETSC_ERR_SUP, " ");
27: }
29: #undef __FUNCT__
31: int GVecLocalToGlobal_Triangular_2D(GVec l, InsertMode mode, GVec g) {
32: SETERRQ(PETSC_ERR_SUP, " ");
33: }
35: #undef __FUNCT__
37: int GVecEvaluateFunction_Triangular_2D(Grid grid, GVec v, VarOrdering order, PointFunction f, PetscScalar alpha, void *ctx) {
38: Mesh mesh;
39: Partition part;
40: int **localStart = order->localStart;
41: FieldClassMap map;
42: int numNodes, numFields;
43: int *fields, **fieldClasses, *classes, *classSizes;
44: int nodeVars;
45: PetscScalar *array;
46: double x, y, z;
47: int size, locSize, overlapSize;
48: int fieldIdx, field, node, nclass, count;
49: int ierr;
52: GridGetMesh(grid, &mesh);
53: MeshGetPartition(mesh, &part);
54: VarOrderingGetClassMap(order, &map);
55: numNodes = map->numNodes;
56: numFields = map->numFields;
57: fields = map->fields;
58: fieldClasses = map->fieldClasses;
59: classes = map->classes;
60: classSizes = map->classSizes;
61: /* Check for a locally ghosted vector */
62: VecGetArray(v, &array);
63: VecGetLocalSize(v, &locSize);
64: /* VecGetGhostSize(v, &overlapSize); */
65: overlapSize = locSize + ((Vec_MPI *) v->data)->nghost;
66: size = order->numLocVars;
67: #if 0
68: /* This doesn't work for constrained vector since it conflicts with the check on count
69: as variables generated by constraints are not handled here
70: */
71: if (locSize != order->numLocVars)
72: SETERRQ2(PETSC_ERR_ARG_WRONG, "Wrong vector size %d should be %d", locSize, order->numLocVars);
73: #endif
74: if (overlapSize > locSize) {
75: PartitionGetNumOverlapNodes(part, &numNodes);
76: size = order->numOverlapVars;
77: if (overlapSize != order->numOverlapVars) SETERRQ(PETSC_ERR_ARG_WRONG, "Wrong size for vector");
78: }
79: for(node = 0, count = 0; node < numNodes; node++, count += nodeVars) {
80: nclass = classes[node];
81: nodeVars = classSizes[nclass];
82: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
83: field = fields[fieldIdx];
84: if (fieldClasses[fieldIdx][nclass] == 0) continue;
85: MeshGetNodeCoords(mesh, node, &x, &y, &z);
86: (*f)(1, grid->fields[field].numComp, &x, &y, &z, &array[count+localStart[field][nclass]], ctx);
87: array[count+localStart[field][nclass]] *= alpha;
88: }
89: }
90: if (count != size) SETERRQ(PETSC_ERR_PLIB, "Invalid variable offset records");
91: VecRestoreArray(v, &array);
92: return(0);
93: }
95: #undef __FUNCT__
97: int GVecEvaluateFunctionBoundary_Triangular_2D(Grid grid, GVec v, int bd, VarOrdering order, PointFunction func,
98: PetscScalar alpha, void *ctx)
99: {
100: Mesh mesh = grid->mesh;
101: int **localStart = order->localStart;
102: int *offsets = order->offsets;
103: int *localOffsets = order->localOffsets;
104: int firstVar = order->firstVar[mesh->part->rank];
105: FieldClassMap map;
106: int numNodes, numFields;
107: int *fields, **fieldClasses;
108: PetscScalar *array; /* The local vector values */
109: double x, y, z;
110: int f, field, node, nclass, row;
111: int ierr;
114: VarOrderingGetClassMap(order, &map);
115: numNodes = map->numNodes;
116: numFields = map->numFields;
117: fields = map->fields;
118: fieldClasses = map->fieldClasses;
119: /* Loop over boundary nodes */
120: VecGetArray(v, &array);
121: for(f = 0; f < numFields; f++) {
122: field = fields[f];
123: (*grid->ops->getboundarystart)(grid, bd, f, PETSC_FALSE, map, &node, &nclass);
124: while(node >= 0) {
125: if (node >= numNodes) {
126: row = localOffsets[node-numNodes];
127: } else {
128: row = offsets[node] - firstVar + localStart[field][nclass];
129: }
130: MeshGetNodeCoords(mesh, node, &x, &y, &z);
131: if (fieldClasses[f][nclass] != 0) {
132: (*func)(1, grid->fields[field].numComp, &x, &y, &z, &array[row], ctx);
133: array[row] *= alpha;
134: }
135: (*grid->ops->getboundarynext)(grid, bd, f, PETSC_FALSE, map, &node, &nclass);
136: #ifdef PETSC_USE_BOPT_g
137: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
138: #endif
139: }
140: }
141: VecRestoreArray(v, &array);
142: return(0);
143: }
145: #undef __FUNCT__
147: int GVecEvaluateFunctionCollective_Triangular_2D(Grid grid, GVec v, VarOrdering order, PointFunction f, PetscScalar alpha,
148: void *ctx)
149: {
150: Mesh mesh;
151: FieldClassMap map;
152: int **localStart = order->localStart;
153: int numNodes, numFields;
154: int *fields, **fieldClasses, *classes, *classSizes;
155: int nodeVars, comp;
156: PetscScalar *array;
157: double x, y, z;
158: int maxNodes; /* The most nodes in any domain */
159: int fieldIdx, field, node, nclass, count;
160: int ierr;
163: GridGetMesh(grid, &mesh);
164: VarOrderingGetClassMap(order, &map);
165: numNodes = map->numNodes;
166: numFields = map->numFields;
167: fields = map->fields;
168: fieldClasses = map->fieldClasses;
169: classes = map->classes;
170: classSizes = map->classSizes;
171: MPI_Allreduce(&numNodes, &maxNodes, 1, MPI_INT, MPI_MAX, grid->comm);
172: VecGetArray(v, &array);
173: for(node = 0, count = 0; node < maxNodes; node++) {
174: if (node < numNodes) {
175: nclass = classes[node];
176: nodeVars = classSizes[nclass];
177: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
178: field = fields[fieldIdx];
179: comp = grid->fields[field].numComp;
180: if (fieldClasses[fieldIdx][nclass] == 0) {
181: /* We have to make sure that every processor is available at each iteration */
182: (*f)(0, 0, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, ctx);
183: continue;
184: }
185: MeshGetNodeCoords(mesh, node, &x, &y, &z);
186: (*f)(1, comp, &x, &y, &z, &array[count+localStart[field][nclass]], ctx);
187: array[count+localStart[field][nclass]] *= alpha;
188: }
189: count += nodeVars;
190: } else {
191: /* We have to make sure that every processor is available at each iteration */
192: (*f)(0, 0, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, ctx);
193: }
194: }
195: if (count != order->numLocVars) {
196: SETERRQ2(PETSC_ERR_PLIB, "Invalid number of variables modified %d should be %d", count, order->numLocVars);
197: }
198: VecRestoreArray(v, &array);
199: return(0);
200: }
202: #undef __FUNCT__
204: int GVecEvaluateFunctionGalerkin_Triangular_2D(Grid grid, GVec v, int numFields, int *fields, LocalVarOrdering locOrder,
205: PointFunction f, PetscScalar alpha, void *ctx)
206: {
207: Mesh mesh = grid->mesh;
208: int numElements = mesh->numFaces;
209: ElementVec vec = grid->vec;
210: int *elemStart = locOrder->elemStart;
211: PetscScalar *array = vec->array;
212: int field, fieldIdx, elem;
213: #ifdef PETSC_USE_BOPT_g
214: PetscTruth opt;
215: #endif
216: int ierr;
219: /* Loop over elements */
220: for(elem = 0; elem < numElements; elem++) {
221: /* Initialize element vector */
222: ElementVecZero(vec);
224: /* Get contribution to the element vector from each discretization */
225: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
226: field = fields[fieldIdx];
227: ierr = DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc, mesh, f, alpha, elem, &array[elemStart[field]], ctx);
228:
229: #ifdef PETSC_USE_BOPT_g
230: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
231: #endif
232: }
234: /* Setup global row and column indices */
235: GridCalcElementVecIndices(grid, elem, vec);
236: #ifdef PETSC_USE_BOPT_g
237: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
238: if (opt == PETSC_TRUE) {
239: int var;
241: for(var = 0; var < vec->reduceSize; var++)
242: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
243: }
244: #endif
245: /* Put values in global vector */
246: ElementVecSetValues(vec, v, ADD_VALUES);
247: }
249: VecAssemblyBegin(v);
250: VecAssemblyEnd(v);
251: return(0);
252: }
254: #undef __FUNCT__
256: int GVecEvaluateFunctionGalerkinCollective_Triangular_2D(Grid grid, GVec v, int numFields, int *fields,
257: LocalVarOrdering locOrder, PointFunction f,
258: PetscScalar alpha, void *ctx)
259: {
260: Mesh mesh = grid->mesh;
261: int numElements = mesh->numFaces;
262: ElementVec vec = grid->vec;
263: int *elemStart = locOrder->elemStart;
264: PetscScalar *array = vec->array;
265: int maxElements;
266: int field, fieldIdx, elem;
267: #ifdef PETSC_USE_BOPT_g
268: PetscTruth opt;
269: #endif
270: int ierr;
273: MPI_Allreduce(&numElements, &maxElements, 1, MPI_INT, MPI_MAX, grid->comm);
274: /* Loop over elements */
275: for(elem = 0; elem < maxElements; elem++) {
276: if (elem < numElements) {
277: /* Initialize element vector */
278: ElementVecZero(vec);
280: /* Get contribution to the element vector from each discretization */
281: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
282: field = fields[fieldIdx];
283: ierr = DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc, mesh, f, alpha, elem, &array[elemStart[field]], ctx);
284:
285: #ifdef PETSC_USE_BOPT_g
286: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
287: #endif
288: }
290: /* Setup global row and column indices */
291: GridCalcElementVecIndices(grid, elem, vec);
292: #ifdef PETSC_USE_BOPT_g
293: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
294: if (opt == PETSC_TRUE) {
295: int var;
297: for(var = 0; var < vec->reduceSize; var++)
298: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
299: }
300: #endif
301: /* Put values in global vector */
302: ElementVecSetValues(vec, v, ADD_VALUES);
303: } else {
304: /* We have to make sure that every processor is available at each call to f */
305: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
306: field = fields[fieldIdx];
307: ierr = DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc, mesh, f, 0.0, -1, PETSC_NULL, ctx);
308: }
309: }
310: }
312: VecAssemblyBegin(v);
313: VecAssemblyEnd(v);
314: return(0);
315: }
317: #undef __FUNCT__
319: int GVecEvaluateBoundaryFunctionGalerkin_Triangular_2D(Grid grid, GVec v, int numFields, int *fields,
320: LocalVarOrdering locOrder, PointFunction f, PetscScalar alpha, void *ctx)
321: {
322: Mesh mesh = grid->mesh;
323: Partition part;
324: Mesh_Triangular *tri = (Mesh_Triangular *) grid->mesh->data;
325: int elemSize = locOrder->elemSize;
326: int *elemStart = locOrder->elemStart;
327: int numEdges = mesh->numEdges;
328: int *bdEdges = tri->bdEdges;
329: int firstEdge;
330: ElementVec vec; /* The element vector */
331: PetscScalar *array; /* The values in the element vector */
332: EdgeContext bdCtx; /* A context wrapper to communicate the midnode of an edge */
333: int field, edge, midNode;
334: int fieldIdx, bd, bdEdge;
335: #ifdef PETSC_USE_BOPT_g
336: PetscTruth opt;
337: #endif
338: int ierr;
341: /* Setup element vector for the lower dimensional system */
342: ierr = ElementVecCreate(grid->comm, elemSize, &vec);
343: array = vec->array;
345: /* Setup user context */
346: bdCtx.ctx = ctx;
348: /* Our problem here is that "edges" are not data structures like "elements". The element
349: holds the midnodes which appear on it, but edges do not. Thus we must pass the midnode
350: number to the discretization, which we do using a context wrapper. Unfortunately, the
351: row indices were derived from element, so we must introduce another numbering function
352: which operates on nodes alone. The midnode number is found by a search of the elements
353: which could certainly be improved with geometric hints. We might also assume that it
354: is the node lying between the two endpoints in the bdNodes[] array. In addition, the
355: boundary variable ordering is in relation to boundary node numbers, so that the node
356: number must be converted before calling the numbering function. This could be speeded up
357: by placing boundary node numbers in the bdEdges[] array instead. */
359: /* Loop over boundary edges */
360: MeshGetPartition(mesh, &part);
361: PartitionGetStartEdge(part, &firstEdge);
362: for(bd = 0, bdEdge = 0; bd < grid->numBd; bd++) {
363: for(bdEdge = tri->bdEdgeBegin[bd]; bdEdge < tri->bdEdgeBegin[bd+1]; bdEdge++) {
364: /* Check that edge is on this processor */
365: edge = bdEdges[bdEdge] - firstEdge;
366: if ((edge < 0) || (edge > numEdges)) continue;
368: /* Search for midnode on edge */
369: midNode = -1;
370: MeshGetMidnodeFromEdge(mesh, edge, &midNode);
371: bdCtx.midnode = midNode;
373: /* Initialize element matrix */
374: ElementVecZero(vec);
376: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
377: field = fields[fieldIdx];
378: DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc->bdDisc, mesh, f, alpha, edge, &array[elemStart[field]], &bdCtx);
379:
380: #ifdef PETSC_USE_BOPT_g
381: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
382: #endif
383: }
385: /* Setup global row and column indices */
386: GridCalcBoundaryElementVecIndices(grid, bd, edge, midNode, grid->bdOrder, PETSC_FALSE, vec);
387: #ifdef PETSC_USE_BOPT_g
388: PetscOptionsHasName(PETSC_NULL, "-trace_vec_bd_assembly", &opt);
389: if (opt == PETSC_TRUE) {
390: int var;
392: for(var = 0; var < vec->reduceSize; var++)
393: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
394: }
395: #endif
396: /* Put values in global vector */
397: ElementVecSetValues(vec, v, ADD_VALUES);
398: }
399: }
400: #ifdef PETSC_USE_BOPT_g
401: if (bdEdge != mesh->numBdEdges) SETERRQ(PETSC_ERR_PLIB, "Invalid boundary edge numbering");
402: #endif
404: VecAssemblyBegin(v);
405: VecAssemblyEnd(v);
407: /* Cleanup */
408: ElementVecDestroy(vec);
410: return(0);
411: }
413: #undef __FUNCT__
415: int GVecEvaluateBoundaryFunctionGalerkinCollective_Triangular_2D(Grid grid, GVec v, int numFields, int *fields,
416: LocalVarOrdering locOrder, PointFunction f,
417: PetscScalar alpha, void *ctx)
418: {
419: Mesh mesh = grid->mesh;
420: Partition part;
421: Mesh_Triangular *tri = (Mesh_Triangular *) mesh->data;
422: int elemSize = locOrder->elemSize;
423: int *elemStart = locOrder->elemStart;
424: int numEdges = mesh->numEdges;
425: int *bdEdges = tri->bdEdges;
426: int firstEdge;
427: ElementVec vec; /* The element vector */
428: PetscScalar *array; /* The values in the element vector */
429: EdgeContext bdCtx; /* A context wrapper to communicate the midnode of an edge */
430: int field, edge, midnode;
431: int fieldIdx, bd, bdEdge;
432: #ifdef PETSC_USE_BOPT_g
433: PetscTruth opt;
434: #endif
435: int ierr;
438: /* Setup element vector for the lower dimensional system */
439: ierr = ElementVecCreate(grid->comm, elemSize, &vec);
440: array = vec->array;
442: /* Setup user context */
443: bdCtx.ctx = ctx;
445: /* Our problem here is that "edges" are not data structures like "elements". The element
446: holds the midnodes which appear on it, but edges do not. Thus we must pass the midnode
447: number to the discretization, which we do using a context wrapper. Unfortunately, the
448: row indices were derived from element, so we must introduce another numbering function
449: which operates on nodes alone. The midnode number is found by a search of the elements
450: which could certainly be improved with geometric hints. We might also assume that it
451: is the node lying between the two endpoints in the bdNodes[] array. In addition, the
452: boundary variable ordering is in relation to boundary node numbers, so that the node
453: number must be converted before calling the numbering function. This could be speeded up
454: by placing boundary node numbers in the bdEdges[] array instead. */
456: /* Loop over boundary edges */
457: MeshGetPartition(mesh, &part);
458: PartitionGetStartEdge(part, &firstEdge);
459: for(bd = 0, bdEdge = 0; bd < grid->numBd; bd++) {
460: for(bdEdge = tri->bdEdgeBegin[bd]; bdEdge < tri->bdEdgeBegin[bd+1]; bdEdge++) {
461: /* Check that edge is on this processor */
462: edge = bdEdges[bdEdge] - firstEdge;
463: if ((edge < 0) || (edge > numEdges)) {
464: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
465: field = fields[fieldIdx];
466: ierr = DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc->bdDisc, mesh, f, 0.0, -1, PETSC_NULL, &bdCtx);
467:
468: }
469: continue;
470: }
472: /* Locate midnode on edge */
473: midnode = -1;
474: MeshGetMidnodeFromEdge(mesh, edge, &midnode);
475: bdCtx.midnode = midnode;
476: #ifdef PETSC_USE_BOPT_g
477: if (tri->markers[midnode] != tri->bdMarkers[bd])
478: SETERRQ4(PETSC_ERR_ARG_WRONG, "Invalid midnode %d has marker %d on boundary %d (%d)",
479: midnode, tri->markers[midnode], bd, tri->bdMarkers[bd]);
480: #endif
482: /* Initialize element matrix */
483: ElementVecZero(vec);
485: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
486: field = fields[fieldIdx];
487: DiscretizationEvaluateFunctionGalerkin(grid->fields[field].disc->bdDisc, mesh, f, alpha, edge, &array[elemStart[field]], &bdCtx);
488:
489: #ifdef PETSC_USE_BOPT_g
490: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
491: #endif
492: }
494: /* Setup global row and column indices */
495: GridCalcBoundaryElementVecIndices(grid, bd, edge, midnode, grid->bdOrder, PETSC_FALSE, vec);
496: #ifdef PETSC_USE_BOPT_g
497: PetscOptionsHasName(PETSC_NULL, "-trace_vec_bd_assembly", &opt);
498: if (opt == PETSC_TRUE) {
499: int var;
501: for(var = 0; var < vec->reduceSize; var++)
502: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
503: }
504: #endif
505: /* Put values in global vector */
506: ElementVecSetValues(vec, v, ADD_VALUES);
507: }
508: }
509: #ifdef PETSC_USE_BOPT_g
510: if (bdEdge != mesh->numBdEdges) SETERRQ(PETSC_ERR_PLIB, "Invalid boundary edge numbering");
511: #endif
513: VecAssemblyBegin(v);
514: VecAssemblyEnd(v);
516: /* Cleanup */
517: ElementVecDestroy(vec);
519: return(0);
520: }
522: #undef __FUNCT__
524: int GVecEvaluateNonlinearOperatorGalerkin_Triangular_2D(Grid grid, GVec v, GVec x, GVec y, int numFields, int *fields,
525: LocalVarOrdering locOrder, NonlinearOperator op, PetscScalar alpha,
526: PetscTruth isALE, void *ctx)
527: {
528: Mesh mesh;
529: Partition part;
530: MeshMover mover;
531: int *elemStart = locOrder->elemStart;
532: ElementVec vec = grid->vec;
533: PetscScalar *array = vec->array;
534: ElementVec ghostVec = grid->ghostElementVec; /* The local solution vector */
535: PetscScalar *ghostArray = ghostVec->array; /* The values in the ghost element vector */
536: PetscTruth reduceElement = grid->reduceElementArgs;
537: Grid ALEGrid; /* The grid describing the mesh velocity */
538: Vec appVec; /* The local vec for y */
539: ElementVec elemAppVec; /* The element vec for y */
540: PetscScalar *appArray; /* The values in elemAppVec */
541: ElementVec MeshALEVec; /* ALE velocity vector with mesh discretization */
542: ElementVec ALEVec; /* ALE velocity vector */
543: PetscScalar *ALEArray; /* The values in the ALE element vector */
544: PetscScalar *nonlinearArgs[2];
545: PetscTruth ALEActive;
546: int numElements;
547: int field, fieldIdx, elem;
548: #ifdef PETSC_USE_BOPT_g
549: PetscTruth opt;
550: #endif
551: int ierr;
554: GridGetMesh(grid, &mesh);
555: MeshGetPartition(mesh, &part);
556: if (grid->ALEActive && (isALE == PETSC_TRUE)) {
557: ALEActive = PETSC_TRUE;
558: MeshGetMover(mesh, &mover);
559: MeshMoverGetVelocityGrid(mover, &ALEGrid);
560: } else {
561: ALEActive = PETSC_FALSE;
562: }
563: /* Fill the local solution vectors */
564: if (x != PETSC_NULL) {
565: GridGlobalToLocal(grid, INSERT_VALUES, x);
566: }
567: VecDuplicate(grid->ghostVec, &appVec);
568: ElementVecDuplicate(ghostVec, &elemAppVec);
569: if (y != PETSC_NULL) {
570: GridGlobalToLocalGeneral(grid, y, appVec, INSERT_VALUES, grid->ghostScatter);
571: }
572: appArray = elemAppVec->array;
574: /* Setup ALE variables */
575: if (ALEActive == PETSC_TRUE) {
576: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
577: MeshALEVec = ALEGrid->vec;
578: ierr = ElementVecDuplicate(grid->vec, &ALEVec);
579: ALEArray = ALEVec->array;
580: } else {
581: MeshALEVec = PETSC_NULL;
582: ALEArray = PETSC_NULL;
583: }
585: /* Loop over elements */
586: PartitionGetNumElements(part, &numElements);
587: for(elem = 0; elem < numElements; elem++) {
588: /* Initialize element vector */
589: ElementVecZero(vec);
591: /* Setup local row and column indices */
592: GridCalcLocalElementVecIndices(grid, elem, ghostVec);
593: ElementVecDuplicateIndices(ghostVec, elemAppVec);
595: /* Setup local solution vector */
596: GridLocalToElement(grid, ghostVec);
597: GridLocalToElementGeneral(grid, appVec, grid->bdReduceVecCur, grid->reduceSystem, reduceElement, elemAppVec);
599: /* Setup ALE variables */
600: if (ALEActive == PETSC_TRUE) {
601: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
602: GridLocalToElement(ALEGrid, MeshALEVec);
603: }
605: /* Get contribution to the element vector from each discretization */
606: for(fieldIdx = 0; fieldIdx < numFields; fieldIdx++) {
607: field = fields[fieldIdx];
608: nonlinearArgs[0] = &ghostArray[elemStart[field]];
609: nonlinearArgs[1] = &appArray[elemStart[field]];
610: if (ALEActive == PETSC_TRUE)
611: {
612: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, field, ALEVec);
613: DiscretizationEvaluateNonlinearALEOperatorGalerkin(grid->fields[field].disc, mesh, op, alpha, elem, 2, nonlinearArgs,
614: ALEArray, &array[elemStart[field]], ctx);
615:
616: } else {
617: DiscretizationEvaluateNonlinearOperatorGalerkin(grid->fields[field].disc, mesh, op, alpha, elem, 2, nonlinearArgs,
618: &array[elemStart[field]], ctx);
619:
620: }
621: #ifdef PETSC_USE_BOPT_g
622: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
623: #endif
624: }
626: /* Setup global row and column indices */
627: GridCalcElementVecIndices(grid, elem, vec);
628: #ifdef PETSC_USE_BOPT_g
629: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
630: if (opt == PETSC_TRUE) {
631: int var;
633: for(var = 0; var < vec->reduceSize; var++)
634: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
635: }
636: #endif
637: /* Put values in global vector */
638: ElementVecSetValues(vec, v, ADD_VALUES);
639: }
641: /* Cleanup ALE variables */
642: if (ALEActive == PETSC_TRUE) {
643: ElementVecDestroy(ALEVec);
644: }
645: /* Cleanup additional input vectors */
646: VecDestroy(appVec);
647: ElementVecDestroy(elemAppVec);
648: VecAssemblyBegin(v);
649: VecAssemblyEnd(v);
650: return(0);
651: }
653: #undef __FUNCT__
655: int GVecEvaluateOperatorGalerkin_Triangular_2D(Grid grid, GVec v, GVec x, GVec y, VarOrdering sOrder, LocalVarOrdering sLocOrder,
656: VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha, void *ctx)
657: {
658: Mesh mesh = grid->mesh;
659: PetscTruth reduceSystem = grid->reduceSystem;
660: PetscTruth reduceElement = grid->reduceElement;
661: int sElemSize = sLocOrder->elemSize;
662: int *sElemStart = sLocOrder->elemStart;
663: int tElemSize = tLocOrder->elemSize;
664: int *tElemStart = tLocOrder->elemStart;
665: FieldClassMap sMap, tMap;
666: int numSFields, numTFields;
667: int *sFields, *tFields;
668: PetscTruth sConstrained, tConstrained;
669: Vec ghostVec; /* The local ghost vector for x (usually the solution) */
670: VecScatter ghostScatter; /* The scatter from x to ghostVec */
671: Vec appVec; /* The local ghost vector for y (usually the application vector) */
672: VecScatter appScatter; /* The scatter from y to appVec */
673: ElementMat mat;
674: ElementVec elemGhostVec, elemAppVec, vec;
675: PetscScalar *ghostArray, *appArray, *matArray, *array;
676: int numElements;
677: int f, sField, tField, elem;
678: #ifdef PETSC_USE_BOPT_g
679: PetscTruth opt;
680: #endif
681: int ierr;
684: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
685: VarOrderingGetClassMap(sOrder, &sMap);
686: VarOrderingGetClassMap(tOrder, &tMap);
687: numSFields = sMap->numFields;
688: sFields = sMap->fields;
689: sConstrained = sMap->isConstrained;
690: numTFields = tMap->numFields;
691: tFields = tMap->fields;
692: tConstrained = tMap->isConstrained;
693: /* Setup reduction */
694: (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
695: (*grid->ops->gridsetupghostscatter)(grid, sOrder, &appVec, &appScatter);
696: /* Setup element vector and matrix */
697: if (tConstrained == PETSC_TRUE) {
698: for(f = 0; f < numTFields; f++) {
699: if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
700: tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
701: }
702: }
703: if (sConstrained == PETSC_TRUE) {
704: for(f = 0; f < numSFields; f++) {
705: if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
706: sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
707: }
708: }
709: ierr = ElementVecCreate(grid->comm, tElemSize, &vec);
710: array = vec->array;
711: ierr = ElementVecDuplicate(vec, &elemGhostVec);
712: ghostArray = elemGhostVec->array;
713: ierr = ElementVecCreate(grid->comm, sElemSize, &elemAppVec);
714: appArray = elemAppVec->array;
715: ierr = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
716: matArray = mat->array;
718: /* Fill the local solution vectors */
719: GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
720: GridGlobalToLocalGeneral(grid, y, appVec, INSERT_VALUES, appScatter);
722: /* Setup the operator with information about the test function space */
723: for(f = 0; f < numSFields; f++) {
724: grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
725: }
727: /* Loop over elements */
728: for(elem = 0; elem < numElements; elem++) {
729: /* Initialize element vector */
730: ElementVecZero(vec);
731: vec->reduceSize = tLocOrder->elemSize;
732: elemGhostVec->reduceSize = tLocOrder->elemSize;
733: elemAppVec->reduceSize = sLocOrder->elemSize;
735: /* Setup local row indices */
736: GridCalcGeneralElementVecIndices(grid, elem, tOrder, PETSC_NULL, PETSC_TRUE, elemGhostVec);
737: GridCalcGeneralElementVecIndices(grid, elem, sOrder, PETSC_NULL, PETSC_TRUE, elemAppVec);
738: /* Setup local vectors */
739: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
740: GridLocalToElementGeneral(grid, appVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemAppVec);
741: /* Must transform to unconstrained variables for element integrals */
742: GridProjectElementVec(grid, mesh, elem, tOrder, PETSC_FALSE, elemGhostVec);
743: GridProjectElementVec(grid, mesh, elem, sOrder, PETSC_FALSE, elemAppVec);
745: for(f = 0; f < numSFields; f++) {
746: sField = sFields[f];
747: tField = tFields[f];
748: /* Get contribution to the element vector from the linear operator */
749: ElementMatZero(mat);
750: DiscretizationEvaluateOperatorGalerkinMF(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
751: op, alpha, elem, &ghostArray[sElemStart[sField]],
752: &appArray[sElemStart[sField]], array, matArray, ctx);
753:
754: #ifdef PETSC_USE_BOPT_g
755: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
756: #endif
757: }
759: /* Setup global row indices, with reduction if necessary */
760: GridCalcGeneralElementVecIndices(grid, elem, tOrder, PETSC_NULL, PETSC_FALSE, vec);
761: #ifdef PETSC_USE_BOPT_g
762: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
763: if (opt == PETSC_TRUE) {
764: int var;
766: for(var = 0; var < vec->reduceSize; var++)
767: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
768: }
769: #endif
770: /* Put values in global vector */
771: ElementVecSetValues(vec, v, ADD_VALUES);
772: }
774: VecDestroy(ghostVec);
775: VecScatterDestroy(ghostScatter);
776: VecDestroy(appVec);
777: VecScatterDestroy(appScatter);
778: ElementVecDestroy(elemGhostVec);
779: ElementVecDestroy(elemAppVec);
780: ElementVecDestroy(vec);
781: ElementMatDestroy(mat);
782: VecAssemblyBegin(v);
783: VecAssemblyEnd(v);
784: return(0);
785: }
787: #undef __FUNCT__
789: int GVecEvaluateSystemMatrix_Triangular_2D(Grid grid, GVec x, GVec y, GVec f, void *ctx)
790: {
791: Mesh mesh = grid->mesh;
792: int numElements = mesh->numFaces;
793: int numMatOps = grid->numMatOps; /* The number of operators in the matrix */
794: GridOp *matOps = grid->matOps; /* The operators in the system matrix */
795: PetscTruth reduceSystem = grid->reduceSystem;
796: PetscTruth reduceElement = grid->reduceElement;
797: PetscTruth explicitConstraints = grid->explicitConstraints;
798: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
799: int numNewFields = grid->numNewFields; /* The number of new fields added by constraints */
800: ElementVec vec = grid->vec; /* The element vector */
801: PetscScalar *array = vec->array; /* The values in the element vector */
802: ElementMat mat = grid->mat; /* The element matrix */
803: PetscScalar *matArray = mat->array; /* The values in the element matrix */
804: Vec ghostVec = grid->ghostVec; /* The local solution vector */
805: ElementVec elemGhostVec = grid->ghostElementVec; /* Local solution vector */
806: PetscScalar *ghostArray = elemGhostVec->array; /* The values in the ghost element vector */
807: int numFields = grid->cm->numFields; /* The number of fields in the calculation */
808: int *fields = grid->cm->fields; /* The fields participating in the calculation */
809: LocalVarOrdering locOrder = grid->locOrder; /* The default local variable ordering */
810: int elemSize = locOrder->elemSize; /* The number of shape functions in the element matrix */
811: int *elemStart = locOrder->elemStart; /* The offset of each field in the element matrix */
812: int rank = mesh->part->rank; /* The processor rank */
813: MeshMover mover;
814: Grid ALEGrid; /* The grid describing the mesh velocity */
815: VarOrdering order; /* The default variable ordering */
816: ElementVec MeshALEVec; /* ALE velocity vector with mesh discretization */
817: ElementVec ALEVec; /* ALE velocity vector */
818: PetscScalar *ALEArray; /* The values in the ALE element vector */
819: Vec appVec; /* The local vec for y */
820: ElementVec elemAppVec; /* The element vec for y */
821: PetscScalar *appArray; /* The values in elemAppVec */
822: int elem, fieldIndex;
823: int newComp = 0;
824: int sField, tField, op, newField, row, col;
825: #ifdef PETSC_USE_BOPT_g
826: PetscTruth opt;
827: #endif
828: int ierr;
831: MeshGetMover(mesh, &mover);
832: MeshMoverGetVelocityGrid(mover, &ALEGrid);
833: PetscObjectQuery((PetscObject) x, "Order", (PetscObject *) &order);
834: /* Right now, we ignore the preconditioner */
835: /* Fill the local solution vectors */
836: if (x != PETSC_NULL) {
837: GridGlobalToLocal(grid, INSERT_VALUES, x);
838: }
839: VecDuplicate(ghostVec, &appVec);
840: GridGlobalToLocalGeneral(grid, y, appVec, INSERT_VALUES, grid->ghostScatter);
841: ElementVecDuplicate(elemGhostVec, &elemAppVec);
842: appArray = elemAppVec->array;
844: /* Setup ALE variables */
845: if (grid->ALEActive == PETSC_TRUE) {
846: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
847: MeshALEVec = ALEGrid->vec;
848: ierr = ElementVecDuplicate(grid->vec, &ALEVec);
849: ALEArray = ALEVec->array;
850: } else {
851: MeshALEVec = PETSC_NULL;
852: ALEArray = PETSC_NULL;
853: }
855: /* Loop over elements */
856: for(elem = 0; elem < numElements; elem++)
857: {
858: /* Initialize element vector */
859: ElementVecZero(vec);
860: vec->reduceSize = locOrder->elemSize;
861: elemGhostVec->reduceSize = locOrder->elemSize;
862: elemAppVec->reduceSize = locOrder->elemSize;
864: /* Setup global row and column indices */
865: GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
866: ElementVecDuplicateIndices(elemGhostVec, elemAppVec);
867: elemAppVec->reduceSize = elemGhostVec->reduceSize;
869: /* Setup local solution vector */
870: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
871: GridLocalToElementGeneral(grid, appVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemAppVec);
873: /* Must transform to unconstrained variables for element integrals */
874: GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);
875: GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemAppVec);
877: /* Setup ALE variables */
878: if (grid->ALEActive == PETSC_TRUE) {
879: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
880: GridLocalToElement(ALEGrid, MeshALEVec);
881: }
883: /* Calculate the contribution to the element matrix from each field */
884: for(op = 0; op < numMatOps; op++) {
885: sField = matOps[op].field;
886: tField = grid->fields[sField].disc->operators[matOps[op].op]->test->field;
887: if (grid->fields[sField].isActive) {
888: ElementMatZero(mat);
889: if (matOps[op].isALE) {
890: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
891: DiscretizationEvaluateALEOperatorGalerkinMF(grid->fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
892: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
893: &appArray[elemStart[sField]], ALEArray, array, matArray, ctx);
894:
895: } else {
896: DiscretizationEvaluateOperatorGalerkinMF(grid->fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
897: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
898: &appArray[elemStart[sField]], array, matArray, ctx);
899:
900: }
901: #ifdef PETSC_USE_BOPT_g
902: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
903: #endif
904: }
905: }
907: /* Setup global numbering, with reduction if necessary */
908: GridCalcGeneralElementVecIndices(grid, elem, order, PETSC_NULL, PETSC_FALSE, vec);
909: #ifdef PETSC_USE_BOPT_g
910: PetscOptionsHasName(PETSC_NULL, "-trace_vec_assembly", &opt);
911: if (opt == PETSC_TRUE) {
912: int var;
913: for(var = 0; var < vec->reduceSize; var++)
914: PetscPrintf(PETSC_COMM_SELF, "%2d %4.2gn", vec->indices[var], PetscRealPart(array[var]));
915: }
916: #endif
917: /* Put values in global vector */
918: ElementVecSetValues(vec, f, ADD_VALUES);
919: }
921: /* Evaluate self-interaction of new fields created by constraints */
922: if (explicitConstraints == PETSC_TRUE) {
923: /* WARNING: This only accomodates 1 constrained field */
924: /* Get constraint information */
925: for(fieldIndex = 0; fieldIndex < numFields; fieldIndex++) {
926: sField = fields[fieldIndex];
927: if (grid->fields[sField].isConstrained == PETSC_TRUE) {
928: newComp = grid->fields[sField].numComp + grid->fields[sField].constraintCompDiff;
929: break;
930: }
931: }
932: /* Calculate self-interaction */
933: for(newField = 0; newField < numNewFields; newField++) {
934: /* Initialize element matrix and vector */
935: ElementMatZero(mat);
936: ElementVecZero(vec);
937: mat->reduceRowSize = newComp;
938: mat->reduceColSize = newComp;
939: elemAppVec->reduceSize = newComp;
940: vec->reduceSize = newComp;
942: /* Calculate the indices and contribution to the element matrix from the new field */
943: (*constCtx->ops->newelemmat)(constCtx, order, newField, mat);
944: #ifdef PETSC_USE_BOPT_g
945: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly_constrained", &opt);
946: if (opt == PETSC_TRUE) {
947: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
948: }
949: #endif
950: /* Global vector indices are the same as the matrix indices */
951: for(row = 0; row < mat->reduceRowSize; row++) {
952: vec->indices[row] = mat->rowIndices[row];
953: }
954: /* Local vector indices can be calculated directly from the field number */
955: elemAppVec->indices[0] = grid->constraintOrder->firstVar[rank+1] - (numNewFields - newField)*newComp;
956: for(row = 1; row < elemAppVec->reduceSize; row++) {
957: elemAppVec->indices[row] = elemAppVec->indices[row-1]+1;
958: }
959: /* Retrieve element vector values from x */
960: GridLocalToElementGeneral(grid, appVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemAppVec);
961: /* Multiply element matrix and element vector */
962: for(row = 0; row < mat->reduceRowSize; row++) {
963: for(col = 0; col < mat->reduceColSize; col++) {
964: vec->array[row] += mat->array[row*mat->reduceColSize+col]*elemAppVec->array[col];
965: }
966: }
967: PetscLogFlops(2*mat->reduceRowSize*mat->reduceColSize);
968: /* Put values in global vector */
969: ElementVecSetValues(vec, f, ADD_VALUES);
970: #ifdef PETSC_USE_BOPT_g
971: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
972: #endif
973: }
974: }
976: /* Reset element vector */
977: elemGhostVec->reduceSize = locOrder->elemSize;
979: VecDestroy(appVec);
980: ElementVecDestroy(elemAppVec);
981: if (grid->ALEActive == PETSC_TRUE) {
982: ElementVecDestroy(ALEVec);
983: }
984: VecAssemblyBegin(f);
985: VecAssemblyEnd(f);
986: return(0);
987: }
989: #undef __FUNCT__
991: int GVecEvaluateSystemMatrixDiagonal_Triangular_2D(Grid grid, GVec x, GVec d, void *ctx)
992: {
993: Mesh mesh = grid->mesh;
994: int numNewFields = grid->numNewFields; /* The number of new fields added by constraints */
995: int numMatOps = grid->numMatOps; /* The number of operators in the matrix */
996: GridOp *matOps = grid->matOps; /* The operators in the system matrix */
997: VarOrdering constOrder = grid->constraintOrder; /* The constrained variable ordering */
998: PetscTruth reduceSystem = grid->reduceSystem;
999: PetscTruth reduceElement = grid->reduceElement;
1000: PetscTruth expConst = grid->explicitConstraints;
1001: PetscConstraintObject constCtx = grid->constraintCtx; /* The constraint object */
1002: int numFields = grid->cm->numFields; /* The number of fields in the calculation */
1003: int *fields = grid->cm->fields; /* The fields participating in the calculation */
1004: LocalVarOrdering locOrder = grid->locOrder; /* The default local variable ordering */
1005: int elemSize = locOrder->elemSize; /* The number of shape functions in the element matrix */
1006: int *elemStart = locOrder->elemStart; /* The offset of each field in the element matrix */
1007: ElementMat mat = grid->mat; /* The element matrix */
1008: PetscScalar *array = mat->array; /* The values in the element matrix */
1009: Vec ghostVec = grid->ghostVec; /* The local solution vector */
1010: ElementVec elemGhostVec = grid->ghostElementVec; /* The element vector from ghostVec */
1011: PetscScalar *ghostArray = elemGhostVec->array; /* The values in elemGhostVec */
1012: MeshMover mover;
1013: Grid ALEGrid; /* The grid describing the mesh velocity */
1014: VarOrdering order; /* The default variable ordering */
1015: ElementVec MeshALEVec; /* ALE velocity vector with mesh discretization */
1016: ElementVec ALEVec; /* ALE velocity vector */
1017: PetscScalar *ALEArray; /* The values in the ALE element vector */
1018: int newComp = 0;
1019: int numElements;
1020: int elem, f, sField, tField, op, newField;
1021: #ifdef PETSC_USE_BOPT_g
1022: PetscTruth opt;
1023: #endif
1024: int ierr;
1027: MeshGetMover(mesh, &mover);
1028: MeshMoverGetVelocityGrid(mover, &ALEGrid);
1029: MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
1030: if (expConst == PETSC_TRUE) {
1031: order = grid->constraintOrder;
1032: } else {
1033: order = grid->order;
1034: }
1035: /* Fill the local solution vectors */
1036: if (x != PETSC_NULL) {
1037: GridGlobalToLocal(grid, INSERT_VALUES, x);
1038: }
1040: /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
1041: if (grid->ALEActive == PETSC_TRUE) {
1042: /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
1043: MeshALEVec = ALEGrid->vec;
1044: ierr = ElementVecDuplicate(grid->vec, &ALEVec);
1045: ALEArray = ALEVec->array;
1046: } else {
1047: MeshALEVec = PETSC_NULL;
1048: ALEArray = PETSC_NULL;
1049: }
1051: /* Loop over elements */
1052: for(elem = 0; elem < numElements; elem++) {
1053: /* Initialize element matrix */
1054: ElementMatZero(mat);
1055: mat->reduceRowSize = locOrder->elemSize;
1056: mat->reduceColSize = locOrder->elemSize;
1057: elemGhostVec->reduceSize = locOrder->elemSize;
1059: /* Setup local row indices for the ghost vector */
1060: GridCalcLocalElementVecIndices(grid, elem, elemGhostVec);
1061: /* Setup local solution vector */
1062: GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
1063: /* Must transform to unconstrained variables for element integrals */
1064: GridProjectElementVec(grid, mesh, elem, order, PETSC_FALSE, elemGhostVec);
1066: /* Setup ALE variables */
1067: if (grid->ALEActive == PETSC_TRUE) {
1068: GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
1069: GridLocalToElement(ALEGrid, MeshALEVec);
1070: }
1072: /* Calculate the contribution to the element matrix from each field */
1073: for(op = 0; op < numMatOps; op++) {
1074: sField = matOps[op].field;
1075: tField = grid->fields[sField].disc->operators[matOps[op].op]->test->field;
1076: if (grid->fields[sField].isActive) {
1077: if (matOps[op].isALE) {
1078: GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
1079: DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1080: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1081: ALEArray, array, ctx);
1082:
1083: } else {
1084: DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc, mesh, elemSize, elemStart[tField], elemStart[sField],
1085: matOps[op].op, matOps[op].alpha, elem, &ghostArray[elemStart[sField]],
1086: array, ctx);
1087:
1088: }
1089: #ifdef PETSC_USE_BOPT_g
1090: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1091: #endif
1092: }
1093: }
1095: /* Setup global numbering, with reduction if necessary */
1096: GridCalcGeneralElementMatIndices(grid, elem, order, order, PETSC_FALSE, mat);
1097: #ifdef PETSC_USE_BOPT_g
1098: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
1099: if (opt == PETSC_TRUE) {
1100: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1101: }
1102: #endif
1103: /* Put diagonal values in the global matrix */
1104: ElementMatSetDiagonalValues(mat, d, ADD_VALUES);
1105: }
1107: /* Evaluate self-interaction of new fields created by constraints */
1108: if (expConst == PETSC_TRUE) {
1109: /* WARNING: This only accomodates 1 constrained field */
1110: /* Get constraint information */
1111: for(f = 0; f < numFields; f++) {
1112: sField = fields[f];
1113: if (grid->fields[sField].isConstrained == PETSC_TRUE) {
1114: newComp = grid->fields[sField].numComp + grid->fields[sField].constraintCompDiff;
1115: break;
1116: }
1117: }
1118: /* Calculate self-interaction */
1119: for(newField = 0; newField < numNewFields; newField++) {
1120: /* Initialize element matrix */
1121: ElementMatZero(mat);
1122: mat->reduceRowSize = newComp;
1123: mat->reduceColSize = newComp;
1125: /* Calculate the indices and contribution to the element matrix from the new field */
1126: (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
1127: #ifdef PETSC_USE_BOPT_g
1128: PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly_constrained", &opt);
1129: if (opt == PETSC_TRUE) {
1130: ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
1131: }
1132: #endif
1133: /* Put values in global matrix */
1134: ElementMatSetDiagonalValues(mat, d, ADD_VALUES);
1135: #ifdef PETSC_USE_BOPT_g
1136: PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
1137: #endif
1138: }
1139: }
1141: /* Assemble matrix */
1142: VecAssemblyBegin(d);
1143: VecAssemblyEnd(d);
1145: /* Reset element matrix and vector */
1146: mat->reduceRowSize = locOrder->elemSize;
1147: mat->reduceColSize = locOrder->elemSize;
1148: elemGhostVec->reduceSize = locOrder->elemSize;
1150: /* Cleanup */
1151: if (grid->ALEActive == PETSC_TRUE) {
1152: ElementVecDestroy(ALEVec);
1153: }
1155: return(0);
1156: }