Actual source code: gmat2d.c

  1: #ifdef PETSC_RCS_HEADER
  2: static char vcid[] = "$Id: gmat2d.c,v 1.24 2000/01/31 17:34:32 knepley Exp $";
  3: #endif

  5: /* Implements FE matrices derived from 2d triangular grids */
  6: #include "petscsles.h"                 /* For ALE Operators */
  7: #include "src/gvec/gvecimpl.h"         /*I "gvec.h" I*/
  8: #include "src/mesh/impls/triangular/triimpl.h"
  9: #include "src/grid/impls/triangular/2d/elemvec2d.h"
 10: #include "gmat2d.h"

 12: extern int GridResetConstrainedMultiply_Private(Grid, GMat);

 14: #undef  __FUNCT__
 16: static int PlaceVariables_Private(int startVar, int nodeVars, int locColStart, int locColEnd, int sStartVar, int sNodeVars,
 17:                                   PetscTruth rectangular, int *diagRows, int *offdiagRows)
 18: {
 19:   int var;

 22:   if ((nodeVars == 0) || (sNodeVars == 0)) return(0);
 23:   /* Check to see whether the variables fall within the diagonal block */
 24:   if ((sStartVar + sNodeVars <= locColStart) || (sStartVar >= locColEnd)) {
 25:     for(var = 0; var < nodeVars; var++) {
 26:       offdiagRows[startVar+var] += sNodeVars;
 27:     }
 28:   } else if ((sStartVar >= locColStart) && (sStartVar + sNodeVars <= locColEnd)) {
 29:     for(var = 0; var < nodeVars; var++) {
 30:       diagRows[startVar+var]    += sNodeVars;
 31:     }
 32:   } else if (rectangular) {
 33:     /* Allow cuts on a single node for rectangular matrices */
 34:     if (sStartVar < locColStart) {
 35:       /* Cut is from below */
 36:       for(var = 0; var < nodeVars; var++) {
 37:         diagRows[startVar+var]       += (sStartVar + sNodeVars) - locColStart;
 38:         offdiagRows[startVar+var]    += locColStart - sStartVar;
 39:       }
 40:     } else {
 41:       /* Cut is from above */
 42:       for(var = 0; var < nodeVars; var++) {
 43:         diagRows[startVar+var]       += locColEnd - sStartVar;
 44:         offdiagRows[startVar+var]    += (sStartVar + sNodeVars) - locColEnd;
 45:       }
 46:     }
 47:   } else {
 48:     /* Row blocking cuts variables on a single node. This is bad partitioning. */
 49:     SETERRQ(PETSC_ERR_ARG_WRONG, "Row blocking cut variables on a single node");
 50:   }
 51:   return(0);
 52: }

 54: #undef  __FUNCT__
 56: int GridCreateGMat_Triangular_2D(Grid grid, VarOrdering sOrder, VarOrdering tOrder, PetscTruth bdCols, GMat *gmat)
 57: {
 58:   MPI_Comm              comm;
 59:   Mesh                  mesh;
 60:   Partition             part;
 61:   int                   locRowStart;   /* The row that this partition starts on */
 62:   int                   locRowEnd;     /* The row that the next partition starts on */
 63:   int                   locColStart;   /* The column that this partition starts on */
 64:   int                   locColEnd;     /* The column that the next partition starts on */
 65:   int                   newLocColStart;/* The column that the new variable domain starts on */
 66:   int                   newLocColEnd;  /* The column after the new variable domain ends */
 67:   int                  *diagRows;      /* Number of nonzeros in each diagonal portion */
 68:   int                  *offdiagRows;   /* Number of nonzeros in each off-diagonal portion */
 69:   int                   nodeVars;      /* Number of variables on node */
 70:   int                   newNodeVars;   /* Number of new variables on node */
 71:   int                   sNodeVars;     /* Number of variables on a node in the support of a given node */
 72:   int                   sNewNodeVars;  /* Number of new variables on a node in the support of a given node */
 73:   int                   startVar;      /* First variable on a node */
 74:   int                   newStartVar;   /* First new variable on a node */
 75:   int                   sStartVar;     /* First variable on a support node (global numbering) */
 76:   int                   sNewStartVar;  /* First new variable on a support node (global numbering) */
 77:   int                  *nodeDone;      /* A 1 indicates that the node has already been processed */
 78:   int                  *nodeNeighbors; /* A list of the nodes in the support of a given node */
 79:   int                   degree;        /* The degree of a vertex */
 80:   int                  *support;       /* A list of elements in the support of a basis function */
 81:   PetscTruth            rectangular;   /* Flag for a rectangular matrix */
 82:   int                   numGhostNodes; /* The number of nodes constrained by variables in another domain */
 83:   int                   numGhostVars;  /* The number of new variables which lie in another domain */
 84:   int                  *ghostProcs;    /* The processor for each ghost node */
 85:   int                  *ghostNodes;    /* The global index for each ghost node */
 86:   int                  *ghostVarProcs; /* The processor for each ghost variable */
 87:   int                  *ghostVars;     /* The global index for each ghost variables */
 88:   int                   newComp;       /* The number of components in the new field */
 89:   int                   numOverlapElements;
 90:   PetscConstraintObject constCtx      = grid->constraintCtx;
 91:   FieldClassMap         rowMap, colMap;
 92:   int                   numCorners;
 93:   int                   numNodes;
 94:   int                   marker;
 95:   int                   maxDegree;
 96:   int                  *rowClasses,    *colClasses;
 97:   int                  *rowClassSizes, *colClassSizes;
 98:   int                  *rowIsConst,    *colIsConst;
 99:   int                   rowLocVars    = tOrder->numLocVars;
100:   int                   rowVars       = tOrder->numVars;
101:   int                  *rowFirstVar   = tOrder->firstVar;
102:   int                  *rowOffsets    = tOrder->offsets;
103:   int                   colLocVars    = sOrder->numLocVars;
104:   int                   colVars       = sOrder->numVars;
105:   int                  *colFirstVar   = sOrder->firstVar;
106:   int                  *colOffsets    = sOrder->offsets;
107:   int                   rank, numProcs;
108:   int                   proc, elem, sElem, corner, sCorner, neighbor, node, sNode, nclass, sNclass, var, count;
109:   PetscTruth            opt;
110:   int                   ierr;

113:   PetscObjectGetComm((PetscObject) grid, &comm);
114:   MPI_Comm_size(comm, &numProcs);
115:   MPI_Comm_rank(comm, &rank);
116:   GridGetMesh(grid, &mesh);
117:   MeshGetPartition(mesh, &part);
118:   VarOrderingGetClassMap(tOrder, &rowMap);
119:   VarOrderingGetClassMap(sOrder, &colMap);
120:   MeshGetNumCorners(mesh, &numCorners);
121:   MeshGetMaxDegree(mesh, &maxDegree);
122:   PartitionGetNumOverlapElements(part, &numOverlapElements);
123:   numNodes       = rowMap->numNodes;
124:   rowClasses     = rowMap->classes;
125:   rowClassSizes  = rowMap->classSizes;
126:   rowIsConst     = rowMap->isClassConstrained;
127:   colClasses     = colMap->classes;
128:   colClassSizes  = colMap->classSizes;
129:   colIsConst     = colMap->isClassConstrained;
130:   newLocColStart = -1;
131:   newLocColEnd   = -1;
132:   /* Get partition information */
133:   locRowStart   = rowFirstVar[rank];
134:   locRowEnd     = rowFirstVar[rank+1];
135:   locColStart   = colFirstVar[rank];
136:   locColEnd     = colFirstVar[rank+1];
137:   rectangular   = (sOrder->numVars != tOrder->numVars) ? PETSC_TRUE : PETSC_FALSE;
138:   /* Get new field information */
139:   if (constCtx != PETSC_NULL) {
140:     (*constCtx->ops->getsize)(constCtx, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, PETSC_NULL, &newComp);
141: 
142:   }

144:   /* Preallocate possible nonzeros - Note that we are being pessimistic since we set
145:            the whole dense element matrix, which we know contains some zeros for certain
146:            operators */
147:   PetscMalloc(numNodes                   * sizeof(int), &nodeDone);
148:   PetscMalloc(maxDegree*numCorners * sizeof(int), &nodeNeighbors);

150:   /* Get the number of ghost variables due to constraints */
151:   numGhostNodes = 0;
152:   numGhostVars  = 0;
153:   if ((grid->isConstrained == PETSC_TRUE) && (numProcs > 1)) {
154:     PetscMemzero(nodeDone, numNodes * sizeof(int));
155:     for(elem = 0; elem < numOverlapElements; elem++)
156:       for(corner = 0; corner < numCorners; corner++) {
157:         MeshGetNodeFromElement(mesh, elem, corner, &node);
158:         if (node >= numNodes) continue;
159:         if (nodeDone[node])   continue;
160:         nodeDone[node] = 1;

162:         nclass = rowClasses[node];
163:         if (rowIsConst[nclass]) {
164:           (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &startVar);
165:           /* Include only new variables since only they can be ghosts */
166:           nodeVars = newComp;
167:           if ((startVar < locRowStart) || (startVar >= locRowEnd)) {
168:             /* This is a constraint which generates an off-processor variable */
169:             numGhostNodes++;
170:             numGhostVars += nodeVars;
171:           }
172:         }
173:       }
174:   }

176:   /* Calculate matrix allocation */
177:   if (numGhostNodes > 0) {
178:     PetscMalloc(numGhostNodes * sizeof(int), &ghostNodes);
179:     PetscMalloc(numGhostNodes * sizeof(int), &ghostProcs);
180:     PetscMalloc(numGhostVars  * sizeof(int), &ghostVars);
181:     PetscMalloc(numGhostVars  * sizeof(int), &ghostVarProcs);
182:   }
183:   PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &diagRows);
184:   PetscMalloc((rowLocVars+numGhostVars) * sizeof(int), &offdiagRows);
185:   PetscMemzero(diagRows,    (rowLocVars+numGhostVars) * sizeof(int));
186:   PetscMemzero(offdiagRows, (rowLocVars+numGhostVars) * sizeof(int));
187:   PetscMemzero(nodeDone,     numNodes                 * sizeof(int));
188:   for(elem = 0, numGhostNodes = 0, numGhostVars = 0; elem < numOverlapElements; elem++) {
189:     for(corner = 0; corner < numCorners; corner++) {
190:       MeshGetNodeFromElement(mesh, elem, corner, &node);
191:       if (node >= numNodes)
192:         continue;
193:       if (nodeDone[node])
194:         continue;
195:       nodeDone[node] = 1;

197:       nclass      = rowClasses[node];
198:       startVar    = rowOffsets[node] - locRowStart;
199:       nodeVars    = rowClassSizes[nclass];
200:       newNodeVars = 0;
201:       if (rowIsConst[nclass]) {
202:         (*constCtx->ops->getindices)(constCtx, mesh, tOrder, node, CONSTRAINT_ROW_INDEX, &newStartVar);
203:         /* Include only new variables */
204:         newNodeVars = newComp;
205:         if ((newStartVar < locRowStart) || (newStartVar >= locRowEnd)) {
206:           /* This is a constraint which generates an off-processor variable */
207:           ghostNodes[numGhostNodes]     = newStartVar;
208:           for(proc = 0; newStartVar >= rowFirstVar[proc+1]; proc++) ;
209:           ghostProcs[numGhostNodes]     = proc;
210:           for(var = 0; var < newComp; var++, numGhostVars++) {
211:             ghostVars[numGhostVars]     = newStartVar + var;
212:             ghostVarProcs[numGhostVars] = proc;
213:           }
214:           numGhostNodes++;
215:           /* Set partition for the appropriate processor */
216:           newLocColStart = colFirstVar[proc];
217:           newLocColEnd   = colFirstVar[proc+1];
218:           /* Reset newStartVar to the correct position in diagRows */
219:           newStartVar    = rowLocVars + (numGhostVars - newComp);
220:         } else {
221:           newLocColStart = locColStart;
222:           newLocColEnd   = locColEnd;
223:           /* Reset newStartVar to the correct position in diagRows */
224:           newStartVar   -= locRowStart;
225:         }
226:       }
227:       if (nodeVars+newNodeVars == 0) continue;

229:       /* Loop over nodes on each element in the support of the node */
230:       MeshGetNodeSupport(mesh, node, elem, &degree, &support);
231:       for(sElem = 0, count = 0; sElem < degree; sElem++) {
232:         for(sCorner = 0; sCorner < numCorners; sCorner++) {
233:           /* Disregard normal columns if we are forming a boundary matrix */
234:           MeshGetNodeFromElement(mesh, support[sElem], sCorner, &sNode);
235:           MeshGetNodeBoundary(mesh, sNode, &marker);
236:           if ((bdCols == PETSC_TRUE) && (marker == 0)) continue;
237:           sNclass      = colClasses[sNode];
238:           sStartVar    = colOffsets[sNode];
239:           sNodeVars    = colClassSizes[sNclass];
240:           sNewNodeVars = 0;

242:           if (colIsConst[sNclass]) {
243:             (*constCtx->ops->getindices)(constCtx, mesh, sOrder, sNode, CONSTRAINT_COL_INDEX, &sNewStartVar);
244: 
245:             sNewNodeVars = newComp;
246:           }

248:           /* Check for duplicate node */
249:           for(neighbor = 0;  neighbor < count; neighbor++) {
250:             if (nodeNeighbors[neighbor] == sNode) break;
251:           }
252:           if (neighbor < count) {
253:             continue;
254:           } else {
255: #ifdef PETSC_USE_BOPT_g
256:             if (count >= maxDegree*numCorners) {
257:               SETERRQ1(PETSC_ERR_ARG_OUTOFRANGE, "Too many neighboring nodes: %d", count);
258:             }
259: #endif
260:             nodeNeighbors[count++] = sNode;
261:           }

263:           PlaceVariables_Private(startVar,    nodeVars,    locColStart,    locColEnd,    sStartVar,    sNodeVars,
264:                                         rectangular, diagRows, offdiagRows);
265: 
266:           PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sStartVar,    sNodeVars,
267:                                         rectangular, diagRows, offdiagRows);
268: 
269:           PlaceVariables_Private(startVar,    nodeVars,    locColStart,    locColEnd,    sNewStartVar, sNewNodeVars,
270:                                         rectangular, diagRows, offdiagRows);
271: 
272:           PlaceVariables_Private(newStartVar, newNodeVars, newLocColStart, newLocColEnd, sNewStartVar, sNewNodeVars,
273:                                         rectangular, diagRows, offdiagRows);
274: 
275: #ifdef PETSC_USE_BOPT_g
276:           if ((numProcs == 1) && (offdiagRows[startVar] > 0)) {
277:             for(proc = 0; proc <= numProcs; proc++)
278:               PetscPrintf(PETSC_COMM_SELF, "colFirstVar[%d]: %dn", proc, colFirstVar[proc]);
279:             for(node = 0; node < colMap->numNodes; node++)
280:               PetscPrintf(PETSC_COMM_SELF, "colOffsets[%d]: %dn", node, colOffsets[node]);
281:             PetscPrintf(PETSC_COMM_SELF, "sNode %d sStartVar %d in [%d,%d)n", sNode, sStartVar, locColStart, locColEnd);
282:             SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, startVar);
283:           }
284:           if ((numProcs == 1) && (rowIsConst[nclass]) && (offdiagRows[newStartVar] > 0)) {
285:             SETERRQ2(PETSC_ERR_PLIB, "Invalid var alloc in elem %d var %d", elem, newStartVar);
286:           }
287: #endif
288:         }
289:       }
290:       MeshRestoreNodeSupport(mesh, node, elem, &degree, &support);
291:     }
292:   }

294: #ifdef PETSC_USE_BOPT_g
295:   /* Check that we looked at every node */
296:   for(node = 0; node < numNodes; node++){
297:     if (!nodeDone[node]) SETERRQ1(PETSC_ERR_PLIB, "Node %d was not encountered", node);
298:   }
299: #endif
300:   PetscOptionsHasName(PETSC_NULL, "-trace_alloc", &opt);
301:   if (opt == PETSC_TRUE) {
302:     for(var = 0; var < rowLocVars; var++) {
303:       PetscSynchronizedPrintf(comm, "diagRows[%d]: %d offdiagRows[%d]: %dn",
304:                               var + rowFirstVar[rank], diagRows[var], var + rowFirstVar[rank], offdiagRows[var]);
305:     }
306:     PetscSynchronizedFlush(comm);
307:   }
308: #ifdef PETSC_USE_BOPT_g
309:   PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
310: #endif

312:   /* Communicate */
313:   if ((grid->isConstrained == PETSC_TRUE) && (numProcs > 1)) {
314:     PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
315:                               ADD_VALUES, SCATTER_REVERSE, diagRows,    &diagRows[rowLocVars]);
316: 
317:     PetscGhostExchange(comm, numGhostVars, ghostVarProcs, ghostVars, PETSC_INT, rowFirstVar,
318:                               ADD_VALUES, SCATTER_REVERSE, offdiagRows, &offdiagRows[rowLocVars]);
319: 
320:   }

322:   /* Stopgap solution for constrained variables */
323:   if (grid->isConstrained == PETSC_TRUE) {
324:     for(var = 0; var < rowLocVars; var++) {
325:       if (diagRows[var]    > colLocVars) diagRows[var] = colLocVars;
326:       if (offdiagRows[var] > colLocVars) offdiagRows[var] = colLocVars;
327:     }
328:   }

330:   /* Create the matrix */
331:   MatCreateMPIAIJ(comm, rowLocVars, colLocVars, rowVars, colVars, 0, diagRows, 0, offdiagRows, gmat);
332:   PetscObjectCompose((PetscObject) *gmat, "Grid", (PetscObject) grid);
333:   MatSetOption(*gmat, MAT_NEW_NONZERO_ALLOCATION_ERR);

335:   /* Cleanup */
336:   ierr   = PetscFree(diagRows);
337:   ierr   = PetscFree(offdiagRows);
338:   ierr   = PetscFree(nodeDone);
339:   ierr   = PetscFree(nodeNeighbors);
340:   if (numGhostNodes > 0) {
341:     PetscFree(ghostNodes);
342:     PetscFree(ghostProcs);
343:     PetscFree(ghostVars);
344:     PetscFree(ghostVarProcs);
345:   }

347:   return(0);
348: }

350: #undef  __FUNCT__
352: int GMatView_Draw_Triangular_2D(GMat gmat, PetscViewer v)
353: {

357:   MatView(gmat, v);
358:   PetscFunctionReturn(ierr);
359: }

361: #undef  __FUNCT__
363: int GMatView_Triangular_2D(GMat gmat, PetscViewer viewer)
364: {
365:   Grid       grid;
366:   PetscTruth isascii, isdraw;
367:   int        ierr;

370:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_ASCII, &isascii);
371:   PetscTypeCompare((PetscObject) viewer, PETSC_VIEWER_DRAW,  &isdraw);
372:   if (isascii == PETSC_TRUE) {
373:     GMatGetGrid(gmat, &grid);
374:     GridView(grid, viewer);
375:     PetscViewerFlush(viewer);
376:     MatView(gmat, viewer);
377:   } else if (isdraw == PETSC_TRUE) {
378:     GMatView_Draw_Triangular_2D(gmat, viewer);
379:   }

381:   return(0);
382: }

384: #undef  __FUNCT__
386: int GMatEvaluateALEOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
387:                                                   LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
388:                                                   LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
389:                                                   void *ctx)
390: {
391:   Mesh         mesh        = grid->mesh;
392:   Partition    part;
393:   int          numElements;
394:   int          sElemSize   = sLocOrder->elemSize;
395:   int          tElemSize   = tLocOrder->elemSize;
396:   int         *sElemStart  = sLocOrder->elemStart;
397:   int         *tElemStart  = tLocOrder->elemStart;
398:   ElementVec   ghostVec    = grid->ghostElementVec; /* Local solution vector */
399:   PetscScalar *ghostArray  = ghostVec->array;       /* The values in the ghost element vector */
400:   MeshMover    mover;
401:   Grid         ALEGrid;      /* The grid describing the mesh velocity */
402:   ElementMat   mat;          /* The element matrix */
403:   PetscScalar *array;        /* The values in the element matrix */
404:   ElementVec   MeshALEVec;   /* The ALE velocity vector with mesh discretization */
405:   ElementVec   ALEVec;       /* The ALE velocity vector */
406:   PetscScalar *ALEArray;     /* The values in the ALE element vector */
407:   int          sField, tField;
408:   int          f, elem;
409: #ifdef PETSC_USE_BOPT_g
410:   int          i, j;
411:   PetscTruth   opt;
412: #endif
413:   int          ierr;

416:   MeshGetPartition(mesh, &part);
417:   MeshGetMover(mesh, &mover);
418:   PartitionGetNumElements(part, &numElements);
419:   MeshMoverGetVelocityGrid(mover, &ALEGrid);
420:   /* Setup element matrix */
421:   ierr  = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
422:   array = mat->array;

424:   /* Setup ALE variables */
425:   if (grid->ALEActive == PETSC_TRUE) {
426:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
427:     MeshALEVec = ALEGrid->vec;
428:     ALEVec     = grid->vec;
429:     ALEArray   = ALEVec->array;
430:   } else {
431:     MeshALEVec = PETSC_NULL;
432:     ALEVec     = PETSC_NULL;
433:     ALEArray   = PETSC_NULL;
434:   }

436:   /* Setup the operator with information about the test function space */
437:   for(f = 0; f < numFields; f++) {
438:     grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
439:   }

441:   for(elem = 0; elem < numElements; elem++) {
442:     /* Initialize element matrix */
443:     ElementMatZero(mat);

445:     /* Setup global row and column indices */
446:     GridCalcLocalElementVecIndices(grid, elem, ghostVec);

448:     /* Setup local solution vector */
449:     GridLocalToElement(grid, ghostVec);

451:     /* Setup ALE variables */
452:     if (grid->ALEActive == PETSC_TRUE)
453:     {
454:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
455:       GridLocalToElement(ALEGrid, MeshALEVec);
456:     }

458:     for(f = 0; f < numFields; f++)
459:     {
460:       sField = sFields[f];
461:       tField = tFields[f];
462:       /* Calculate the contribution to the element matrix from the field */
463:       GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
464:       DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
465:                                                        op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
466: 
467: #ifdef PETSC_USE_BOPT_g
468:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
469: #endif
470:     }

472:     /* Setup global row and column indices */
473:     GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
474: #ifdef PETSC_USE_BOPT_g
475:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
476:     if (opt == PETSC_TRUE) {
477:       PetscPrintf(grid->comm, "      %3d", mat->colIndices[0]);
478:       for(i = 1; i < mat->reduceColSize; i++)
479:         PetscPrintf(grid->comm, "   %3d", mat->colIndices[i]);
480:       PetscPrintf(grid->comm, "n");
481:       for(i = 0; i < mat->reduceRowSize; i++)
482:       {
483:         PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
484:         for(j = 0; j < mat->reduceColSize; j++)
485:           PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
486:         PetscPrintf(grid->comm, "n");
487:       }
488:     }
489: #endif
490:     /* Put values in global matrix */
491:     ElementMatSetValues(mat, M, ADD_VALUES);
492:   }
493:   MatAssemblyBegin(M, type);
494:   MatAssemblyEnd(M, type);

496:   /* Cleanup */
497:   ElementMatDestroy(mat);

499:   /* Reset size functions */
500:   GridResetConstrainedMultiply_Private(grid, M);
501:   return(0);
502: }

504: #undef  __FUNCT__
506: int GMatEvaluateOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
507:                                                VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
508:                                                MatAssemblyType type, void *ctx)
509: {
510:   Mesh             mesh          = grid->mesh;
511:   PetscTruth       reduceSystem  = grid->reduceSystem;
512:   PetscTruth       reduceElement = grid->reduceElement;
513:   int              sElemSize     = sLocOrder->elemSize;
514:   int              tElemSize     = tLocOrder->elemSize;
515:   int             *sElemStart    = sLocOrder->elemStart;
516:   int             *tElemStart    = tLocOrder->elemStart;
517:   FieldClassMap    sMap,         tMap;
518:   int              numSFields,   numTFields;
519:   int             *sFields,     *tFields;
520:   PetscTruth       sConstrained, tConstrained;
521:   Vec              ghostVec;     /* The local ghost vector for x (usually the solution) */
522:   VecScatter       ghostScatter; /* The scatter from x to ghostVec */
523:   ElementMat       mat;
524:   ElementVec       elemGhostVec;
525:   PetscScalar     *ghostArray, *array;
526:   int              numElements;
527:   int              sField, tField;
528:   int              f, elem;
529: #ifdef PETSC_USE_BOPT_g
530:   PetscTruth       opt;
531: #endif
532:   int              ierr;

535:   MeshGetInfo(mesh, PETSC_NULL, PETSC_NULL, PETSC_NULL, &numElements);
536:   VarOrderingGetClassMap(sOrder, &sMap);
537:   VarOrderingGetClassMap(tOrder, &tMap);
538:   numSFields   = sMap->numFields;
539:   sFields      = sMap->fields;
540:   sConstrained = sMap->isConstrained;
541:   numTFields   = tMap->numFields;
542:   tFields      = tMap->fields;
543:   tConstrained = tMap->isConstrained;
544:   /* Setup reduction */
545:   (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
546:   /* Setup element vector and matrix */
547:   if (tConstrained == PETSC_TRUE) {
548:     for(f = 0; f < numTFields; f++) {
549:       if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
550:         tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
551:     }
552:   }
553:   if (sConstrained == PETSC_TRUE) {
554:     for(f = 0; f < numSFields; f++) {
555:       if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
556:         sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
557:     }
558:   }
559:   ierr       = ElementVecCreate(grid->comm, tElemSize, &elemGhostVec);
560:   ghostArray = elemGhostVec->array;
561:   ierr       = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
562:   array      = mat->array;
563:   ierr       = ElementVecZero(elemGhostVec);

565:   /* Fill the local solution vectors */
566:   if (x != PETSC_NULL) {
567:     GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
568:   }

570:   /* Setup the operator with information about the test function space */
571:   for(f = 0; f < numSFields; f++) {
572:     grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
573:   }

575:   for(elem = 0; elem < numElements; elem++) {
576:     /* Initialize element matrix */
577:     ElementMatZero(mat);
578:     mat->reduceRowSize       = tLocOrder->elemSize;
579:     mat->reduceColSize       = sLocOrder->elemSize;
580:     elemGhostVec->reduceSize = tLocOrder->elemSize;

582:     if (x != PETSC_NULL) {
583:       /* Setup local row indices for the ghost vector */
584:       GridCalcGeneralElementVecIndices(grid, elem, tOrder, PETSC_NULL, PETSC_TRUE, elemGhostVec);
585:       /* Setup local solution vector */
586:       GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
587:       /* Must transform to unconstrained variables for element integrals */
588:       GridProjectElementVec(grid, mesh, elem, tOrder, PETSC_FALSE, elemGhostVec);
589:     }
590:     for(f = 0; f < numSFields; f++) {
591:       sField = sFields[f];
592:       tField = tFields[f];
593:       /* Calculate the contribution to the element matrix from the field */
594:       DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
595:                                                     op, alpha, elem, &ghostArray[sElemStart[sField]], array, ctx);
596: 
597: #ifdef PETSC_USE_BOPT_g
598:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
599: #endif
600:     }

602:     /* Setup global row and column indices */
603:     GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
604: #ifdef PETSC_USE_BOPT_g
605:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
606:     if (opt == PETSC_TRUE) {
607:       ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
608:     }
609: #endif
610:     /* Put values in global matrix */
611:     ElementMatSetValues(mat, M, ADD_VALUES);
612:   }

614:   MatAssemblyBegin(M, type);
615:   MatAssemblyEnd(M, type);

617:   /* Cleanup */
618:   VecDestroy(ghostVec);
619:   VecScatterDestroy(ghostScatter);
620:   ElementVecDestroy(elemGhostVec);
621:   ElementMatDestroy(mat);

623:   return(0);
624: }

626: #undef  __FUNCT__
628: int GMatEvaluateALEConstrainedOperatorGalerkin_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
629:                                                              LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
630:                                                              LocalVarOrdering tLocOrder, int op, PetscScalar alpha, MatAssemblyType type,
631:                                                              void *ctx)
632: {
633:   Mesh         mesh        = grid->mesh;
634:   Partition    part;
635:   int          numElements;
636:   int          sElemSize   = sLocOrder->elemSize;
637:   int          tElemSize   = tLocOrder->elemSize;
638:   int         *sElemStart  = sLocOrder->elemStart;
639:   int         *tElemStart  = tLocOrder->elemStart;
640:   ElementVec   ghostVec    = grid->ghostElementVec; /* Local solution vector */
641:   PetscScalar *ghostArray  = ghostVec->array;       /* The values in the ghost element vector */
642:   MeshMover    mover;
643:   Grid         ALEGrid;      /* The grid describing the mesh velocity */
644:   ElementMat   mat;          /* The element matrix */
645:   PetscScalar *array;        /* The values in the element matrix */
646:   ElementVec   MeshALEVec;   /* The ALE velocity vector with mesh discretization */
647:   ElementVec   ALEVec;       /* The ALE velocity vector */
648:   PetscScalar *ALEArray;     /* The values in the ALE element vector */
649:   int          sField, tField;
650:   int          f, elem;
651: #ifdef PETSC_USE_BOPT_g
652:   PetscTruth   opt;
653:   int          i, j;
654: #endif
655:   int          ierr;

658:   MeshGetPartition(mesh, &part);
659:   MeshGetMover(mesh, &mover);
660:   PartitionGetNumElements(part, &numElements);
661:   MeshMoverGetVelocityGrid(mover, &ALEGrid);
662:   /* Setup element matrix */
663:   for(f = 0; f < numFields; f++) {
664:     if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
665:       sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
666:     if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
667:       tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
668:   }
669:   ierr  = ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);
670:   array = mat->array;

672:   /* Setup ALE variables -- No new variables should be ALE so ALEVec is not recalculated */
673:   if (grid->ALEActive == PETSC_TRUE) {
674:     /* Notice that the ALEArray is from this grid, not the mesh velocity grid */
675:     MeshALEVec = ALEGrid->vec;
676:     ALEVec     = grid->vec;
677:     ALEArray   = ALEVec->array;
678:   } else {
679:     MeshALEVec = PETSC_NULL;
680:     ALEVec     = PETSC_NULL;
681:     ALEArray   = PETSC_NULL;
682:   }

684:   /* Setup the operator with information about the test function space */
685:   for(f = 0; f < numFields; f++) {
686:     grid->fields[sFields[f]].disc->operators[op]->test = grid->fields[tFields[f]].disc;
687:   }

689:   for(elem = 0; elem < numElements; elem++)
690:   {
691:     /* Initialize element matrix */
692:     ElementMatZero(mat);
693:     mat->reduceRowSize = tLocOrder->elemSize;
694:     mat->reduceColSize = sLocOrder->elemSize;

696:     /* Setup global row and column indices */
697:     GridCalcLocalElementVecIndices(grid, elem, ghostVec);

699:     /* Setup local solution vector */
700:     GridLocalToElement(grid, ghostVec);

702:     /* Setup ALE variables */
703:     if (grid->ALEActive == PETSC_TRUE) {
704:       GridCalcLocalElementVecIndices(ALEGrid, elem, MeshALEVec);
705:       GridLocalToElement(ALEGrid, MeshALEVec);
706:     }

708:     for(f = 0; f < numFields; f++)
709:     {
710:       sField = sFields[f];
711:       tField = tFields[f];
712:       /* Calculate the contribution to the element matrix from the field */
713:       GridInterpolateElementVec(ALEGrid, 0, MeshALEVec, grid, sField, ALEVec);
714:       DiscretizationEvaluateALEOperatorGalerkin(grid->fields[sField].disc, mesh, sElemSize, tElemStart[tField], sElemStart[sField],
715:                                                        op, alpha, elem, &ghostArray[sElemStart[sField]], ALEArray, array, ctx);
716: 
717: #ifdef PETSC_USE_BOPT_g
718:       PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
719: #endif
720:     }

722:     /* Setup global row and column indices */
723:     GridCalcGeneralElementMatIndices(grid, elem, sOrder, tOrder, PETSC_FALSE, mat);
724: #ifdef PETSC_USE_BOPT_g
725:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
726:     if (opt == PETSC_TRUE) {
727:       PetscPrintf(grid->comm, "      %3d", mat->colIndices[0]);
728:       for(i = 1; i < mat->reduceColSize; i++)
729:         PetscPrintf(grid->comm, "   %3d", mat->colIndices[i]);
730:       PetscPrintf(grid->comm, "n");
731:       for(i = 0; i < mat->reduceRowSize; i++) {
732:         PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
733:         for(j = 0; j < mat->reduceColSize; j++)
734:           PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
735:         PetscPrintf(grid->comm, "n");
736:       }
737:     }
738: #endif
739:     /* Put values in global matrix */
740:     ElementMatSetValues(mat, M, ADD_VALUES);
741:   }

743:   MatAssemblyBegin(M, type);
744:   MatAssemblyEnd(M, type);

746:   /* Cleanup */
747:   ElementMatDestroy(mat);

749:   /* Reset size functions */
750:   GridResetConstrainedMultiply_Private(grid, M);
751:   return(0);
752: }

754: #undef  __FUNCT__
756: int GMatEvaluateNewFields_Triangular_2D(Grid grid, GMat M, int numFields, int *sFields, VarOrdering sOrder,
757:                                         LocalVarOrdering sLocOrder, int *tFields, VarOrdering tOrder,
758:                                         LocalVarOrdering tLocOrder, PetscScalar alpha, MatAssemblyType type, void *ctx)
759: {
760:   VarOrdering           constOrder = grid->constraintOrder; /* The constrained variable ordering */
761:   PetscConstraintObject constCtx   = grid->constraintCtx;   /* The constraint object */
762:   int                   sElemSize  = 0;
763:   int                   tElemSize  = 0;
764:   ElementMat            mat;          /* The element matrix */
765:   int                   f, newField;
766: #ifdef PETSC_USE_BOPT_g
767:   int                   i, j;
768:   PetscTruth            opt;
769: #endif
770:   int                   ierr;

773:   /* Setup element matrix */
774:   for(f = 0; f < numFields; f++) {
775:     if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
776:       sElemSize += grid->fields[sFields[f]].disc->comp + grid->fields[sFields[f]].constraintCompDiff;
777:     if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
778:       tElemSize += grid->fields[tFields[f]].disc->comp + grid->fields[tFields[f]].constraintCompDiff;
779:   }
780:   ElementMatCreate(grid->comm, tElemSize, sElemSize, &mat);

782:   for(newField = 0; newField < grid->numNewFields; newField++) {
783:     /* Initialize element matrix */
784:     ElementMatZero(mat);

786:     /* Calculate the indices and contribution to the element matrix from the new field */
787:     (*constCtx->ops->newelemmat)(constCtx, constOrder, newField, mat);
788: #ifdef PETSC_USE_BOPT_g
789:     PetscOptionsHasName(PETSC_NULL, "-trace_mat_assembly", &opt);
790:     if (opt == PETSC_TRUE) {
791:       PetscPrintf(grid->comm, "      %3d", mat->colIndices[0]);
792:       for(i = 1; i < mat->reduceColSize; i++)
793:         PetscPrintf(grid->comm, "   %3d", mat->colIndices[i]);
794:       PetscPrintf(grid->comm, "n");
795:       for(i = 0; i < mat->reduceRowSize; i++)
796:       {
797:         PetscPrintf(grid->comm, "%3d ", mat->rowIndices[i]);
798:         for(j = 0; j < mat->reduceColSize; j++)
799:           PetscPrintf(grid->comm, "%5.2g ", PetscRealPart(mat->array[i*mat->reduceColSize+j]));
800:         PetscPrintf(grid->comm, "n");
801:       }
802:     }
803: #endif
804:     /* Put values in global matrix */
805:     ElementMatSetValues(mat, M, ADD_VALUES);
806: #ifdef PETSC_USE_BOPT_g
807:     PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
808: #endif
809:   }

811:   MatAssemblyBegin(M, type);
812:   MatAssemblyEnd(M, type);

814:   /* Cleanup */
815:   ElementMatDestroy(mat);

817:   GridResetConstrainedMultiply_Private(grid, M);
818:   return(0);
819: }

821: #undef  __FUNCT__
823: int GMatEvaluateBoundaryOperatorGalerkin_Triangular_2D(Grid grid, GMat M, GVec x, VarOrdering sOrder, LocalVarOrdering sLocOrder,
824:                                                        VarOrdering tOrder, LocalVarOrdering tLocOrder, int op, PetscScalar alpha,
825:                                                        MatAssemblyType type, void *ctx)
826: {
827:   MPI_Comm                 comm;
828:   Mesh                     mesh          = grid->mesh;
829:   Partition                part;
830:   Mesh_Triangular         *tri           = (Mesh_Triangular *) mesh->data;
831:   PetscTruth               reduceSystem  = grid->reduceSystem;
832:   PetscTruth               reduceElement = grid->reduceElement;
833:   int                      sElemSize     = sLocOrder->elemSize;
834:   int                      tElemSize     = tLocOrder->elemSize;
835:   int                     *sElemStart    = sLocOrder->elemStart;
836:   int                     *tElemStart    = tLocOrder->elemStart;
837:   int                      numEdges;
838:   int                     *bdEdges       = tri->bdEdges;
839:   int                      rank;
840:   FieldClassMap            sMap,         tMap;
841:   int                      firstEdge;
842:   int                      numSFields,   numTFields;
843:   int                     *sFields,     *tFields;
844:   PetscTruth               sConstrained, tConstrained;
845:   Vec                      ghostVec;     /* The local ghost vector for x (usually the solution) */
846:   VecScatter               ghostScatter; /* The scatter from x to ghostVec */
847:   ElementMat               mat;
848:   ElementVec               elemGhostVec;
849:   PetscScalar             *array;
850:   EdgeContext              bdCtx;       /* A context wrapper to communicate the midnode of an edge */
851:   int                      sField, tField;
852:   int                      f, bd, edge, bdEdge, midNode;
853: #ifdef PETSC_USE_BOPT_g
854:   PetscTruth               opt;
855: #endif
856:   int                      ierr;

859:   PetscObjectGetComm((PetscObject) grid, &comm);
860:   MPI_Comm_rank(comm, &rank);
861:   VarOrderingGetClassMap(sOrder, &sMap);
862:   VarOrderingGetClassMap(tOrder, &tMap);
863:   MeshGetPartition(mesh, &part);
864:   PartitionGetNumEdges(part, &numEdges);
865:   PartitionGetStartEdge(part, &firstEdge);
866:   numSFields   = sMap->numFields;
867:   sFields      = sMap->fields;
868:   sConstrained = sMap->isConstrained;
869:   numTFields   = tMap->numFields;
870:   tFields      = tMap->fields;
871:   tConstrained = tMap->isConstrained;
872:   /* Setup reduction */
873:   (*grid->ops->gridsetupghostscatter)(grid, tOrder, &ghostVec, &ghostScatter);
874:   /* Setup element vector and matrix */
875:   if (tConstrained == PETSC_TRUE) {
876:     for(f = 0; f < numTFields; f++) {
877:       if (grid->fields[tFields[f]].isConstrained == PETSC_TRUE)
878:         tElemSize += grid->fields[tFields[f]].disc->funcs*grid->fields[tFields[f]].constraintCompDiff;
879:     }
880:   }
881:   if (sConstrained == PETSC_TRUE) {
882:     for(f = 0; f < numSFields; f++) {
883:       if (grid->fields[sFields[f]].isConstrained == PETSC_TRUE)
884:         sElemSize += grid->fields[sFields[f]].disc->funcs*grid->fields[sFields[f]].constraintCompDiff;
885:     }
886:   }
887:   ElementVecCreate(comm, tElemSize, &elemGhostVec);
888:   ElementMatCreate(comm, tElemSize, sElemSize, &mat);
889:   ElementVecZero(elemGhostVec);
890:   array = mat->array;

892:   /* Setup user context */
893:   bdCtx.ctx = ctx;

895:   /* Fill the local solution vectors */
896:   if (x != PETSC_NULL) {
897:     GridGlobalToLocalGeneral(grid, x, ghostVec, INSERT_VALUES, ghostScatter);
898:   }

900:   /* Setup the operator with information about the test function space */
901:   for(f = 0; f < numSFields; f++) {
902:     grid->fields[sFields[f]].disc->bdDisc->operators[op]->test = grid->fields[tFields[f]].disc;
903:   }

905:   /* Our problem here is that "edges" are not data structures like "elements". The element
906:      holds the midnodes which appear on it, but edges do not. Thus we must pass the midnode
907:      number to the discretization, which we do using a context wrapper. Unfortunately, the
908:      row indices were derived from element, so we must introduce another numbering function
909:      which operates on nodes alone. The midnode number is found by a search of the elements
910:      which could certainly be improved with geometric hints. We might also assume that it
911:      is the node lying between the two endpoints in the bdNodes[] array. In addition, the
912:      boundary variable ordering is in relation to boundary node numbers, so that the node
913:      number must be converted before calling the numbering function. This could be speeded up
914:      by placing boundary node numbers in the bdEdges[] array instead. */

916:   /* Loop over boundary edges */
917:   for(bd = 0, bdEdge = 0; bd < grid->numBd; bd++) {
918:     for(bdEdge = tri->bdEdgeBegin[bd]; bdEdge < tri->bdEdgeBegin[bd+1]; bdEdge++) {
919:       /* Check that edge is on this processor */
920:       edge = bdEdges[bdEdge] - firstEdge;
921:       if ((edge < 0) || (edge > numEdges)) continue;

923:       MeshGetMidnodeFromEdge(mesh, edge, &midNode);
924:       bdCtx.midnode = midNode;

926:       /* Initialize element matrix */
927:       ElementMatZero(mat);
928:       mat->reduceRowSize       = tLocOrder->elemSize;
929:       mat->reduceColSize       = sLocOrder->elemSize;
930:       elemGhostVec->reduceSize = tLocOrder->elemSize;

932:       if (x != PETSC_NULL) {
933:         /* Setup local row indices for the ghost vector */
934:         GridCalcBoundaryElementVecIndices(grid, bd, edge, midNode, tOrder, PETSC_TRUE, elemGhostVec);
935:         /* Setup local solution vector */
936:         GridLocalToElementGeneral(grid, ghostVec, grid->bdReduceVecCur, reduceSystem, reduceElement, elemGhostVec);
937:         /* Must transform to unconstrained variables for element integrals */
938:         GridProjectElementVec(grid, mesh, edge, tOrder, PETSC_FALSE, elemGhostVec);
939:         SETERRQ(PETSC_ERR_SUP, "Being reworked");
940:       }
941:       for(f = 0; f < numSFields; f++) {
942:         sField = sFields[f];
943:         tField = tFields[f];
944:         /* Calculate the contribution to the element matrix from the field */
945:         DiscretizationEvaluateOperatorGalerkin(grid->fields[sField].disc->bdDisc, mesh, sElemSize, tElemStart[tField],
946:                                                       sElemStart[sField], op, alpha, edge, PETSC_NULL, array, &bdCtx);
947: 
948: #ifdef PETSC_USE_BOPT_g
949:         PetscTrValid(__LINE__, __FUNCT__, __FILE__, __SDIR__);
950: #endif
951:       }

953:       /* Setup global row and column indices */
954:       GridCalcBoundaryElementMatIndices(grid, bd, edge, midNode, sOrder, tOrder, PETSC_FALSE, mat);
955: #ifdef PETSC_USE_BOPT_g
956:       PetscOptionsHasName(PETSC_NULL, "-trace_mat_bd_assembly", &opt);
957:       if (opt == PETSC_TRUE) {
958:         ElementMatView(mat, PETSC_VIEWER_STDOUT_(mat->comm));
959:       }
960: #endif
961:       /* Put values in the global matrix */
962:       ElementMatSetValues(mat, M, ADD_VALUES);
963:     }
964:   }
965: #ifdef PETSC_USE_BOPT_g
966:   if (bdEdge != mesh->numBdEdges) SETERRQ(PETSC_ERR_PLIB, "Invalid boundary edge numbering");
967: #endif

969:   MatAssemblyBegin(M, type);
970:   MatAssemblyEnd(M, type);

972:   /* Cleanup */
973:   VecDestroy(ghostVec);
974:   VecScatterDestroy(ghostScatter);
975:   ElementVecDestroy(elemGhostVec);
976:   ElementMatDestroy(mat);

978:   return(0);
979: }