Actual source code: ex2.c

  1: /*$Id: ex2.c,v 1.38 2001/03/23 23:24:45 balay Exp $*/
  2: static char help[] ="Solves a time-dependent nonlinear PDE. Uses implicitn
  3: timestepping.  Runtime options include:n
  4:   -M <xg>, where <xg> = number of grid pointsn
  5:   -debug : Activate debugging printoutsn
  6:   -nox   : Deactivate x-window graphicsnn";

  8: /*
  9:    Concepts: TS^time-dependent nonlinear problems
 10:    Processors: n
 11: */

 13: /* ------------------------------------------------------------------------

 15:    This program solves the PDE

 17:                u * u_xx 
 18:          u_t = ---------
 19:                2*(t+1)^2 

 21:     on the domain 0 <= x <= 1, with boundary conditions
 22:          u(t,0) = t + 1,  u(t,1) = 2*t + 2,
 23:     and initial condition
 24:          u(0,x) = 1 + x*x.

 26:     The exact solution is:
 27:          u(t,x) = (1 + x*x) * (1 + t)

 29:     Note that since the solution is linear in time and quadratic in x,
 30:     the finite difference scheme actually computes the "exact" solution.

 32:     We use by default the backward Euler method.

 34:   ------------------------------------------------------------------------- */

 36: /*
 37:    Include "petscts.h" to use the PETSc timestepping routines. Note that
 38:    this file automatically includes "petsc.h" and other lower-level
 39:    PETSc include files.

 41:    Include the "petscda.h" to allow us to use the distributed array data 
 42:    structures to manage the parallel grid.
 43: */
 44:  #include petscts.h
 45:  #include petscda.h

 47: /* 
 48:    User-defined application context - contains data needed by the 
 49:    application-provided callback routines.
 50: */
 51: typedef struct {
 52:   MPI_Comm   comm;          /* communicator */
 53:   DA         da;            /* distributed array data structure */
 54:   Vec        localwork;     /* local ghosted work vector */
 55:   Vec        u_local;       /* local ghosted approximate solution vector */
 56:   Vec        solution;      /* global exact solution vector */
 57:   int        m;             /* total number of grid points */
 58:   double     h;             /* mesh width: h = 1/(m-1) */
 59:   PetscTruth debug;         /* flag (1 indicates activation of debugging printouts) */
 60: } AppCtx;

 62: /* 
 63:    User-defined routines, provided below.
 64: */
 65: extern int InitialConditions(Vec,AppCtx*);
 66: extern int RHSFunction(TS,double,Vec,Vec,void*);
 67: extern int RHSJacobian(TS,double,Vec,Mat*,Mat*,MatStructure*,void*);
 68: extern int Monitor(TS,int,double,Vec,void*);
 69: extern int ExactSolution(double,Vec,AppCtx*);

 71: /*
 72:    Utility routine for finite difference Jacobian approximation
 73: */
 74: extern int RHSJacobianFD(TS,double,Vec,Mat*,Mat*,MatStructure*,void*);

 76: int main(int argc,char **argv)
 77: {
 78:   AppCtx     appctx;                 /* user-defined application context */
 79:   TS         ts;                     /* timestepping context */
 80:   Mat        A;                      /* Jacobian matrix data structure */
 81:   Vec        u;                      /* approximate solution vector */
 82:   int        time_steps_max = 1000;  /* default max timesteps */
 83:   int        ierr,steps;
 84:   double     ftime;                  /* final time */
 85:   double     dt;
 86:   double     time_total_max = 100.0; /* default max total time */
 87:   PetscTruth flg;

 89:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 90:      Initialize program and set problem parameters
 91:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
 92: 
 93:   PetscInitialize(&argc,&argv,(char*)0,help);

 95:   appctx.comm = PETSC_COMM_WORLD;
 96:   appctx.m    = 60;
 97:   PetscOptionsGetInt(PETSC_NULL,"-M",&appctx.m,PETSC_NULL);
 98:   PetscOptionsHasName(PETSC_NULL,"-debug",&appctx.debug);
 99:   appctx.h    = 1.0/(appctx.m-1.0);

101:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
102:      Create vector data structures
103:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

105:   /*
106:      Create distributed array (DA) to manage parallel grid and vectors
107:      and to set up the ghost point communication pattern.  There are M 
108:      total grid values spread equally among all the processors.
109:   */
110:   DACreate1d(PETSC_COMM_WORLD,DA_NONPERIODIC,appctx.m,1,1,PETSC_NULL,
111:                     &appctx.da);

113:   /*
114:      Extract global and local vectors from DA; we use these to store the
115:      approximate solution.  Then duplicate these for remaining vectors that
116:      have the same types.
117:   */
118:   DACreateGlobalVector(appctx.da,&u);
119:   DACreateLocalVector(appctx.da,&appctx.u_local);

121:   /*
122:      Create local work vector for use in evaluating right-hand-side function;
123:      create global work vector for storing exact solution.
124:   */
125:   VecDuplicate(appctx.u_local,&appctx.localwork);
126:   VecDuplicate(u,&appctx.solution);

128:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
129:      Create timestepping solver context; set callback routine for
130:      right-hand-side function evaluation.
131:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

133:   TSCreate(PETSC_COMM_WORLD,TS_NONLINEAR,&ts);
134:   TSSetRHSFunction(ts,RHSFunction,&appctx);

136:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
137:      Set optional user-defined monitoring routine
138:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

140:   TSSetMonitor(ts,Monitor,&appctx,PETSC_NULL);

142:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
143:      For nonlinear problems, the user can provide a Jacobian evaluation
144:      routine (or use a finite differencing approximation).

146:      Create matrix data structure; set Jacobian evaluation routine.
147:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

149:   MatCreate(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,appctx.m,appctx.m,&A);
150:   MatSetFromOptions(A);
151:   PetscOptionsHasName(PETSC_NULL,"-fdjac",&flg);
152:   if (flg) {
153:     TSSetRHSJacobian(ts,A,A,RHSJacobianFD,&appctx);
154:   } else {
155:     TSSetRHSJacobian(ts,A,A,RHSJacobian,&appctx);
156:   }

158:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
159:      Set solution vector and initial timestep
160:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

162:   dt   = appctx.h/2.0;
163:   TSSetInitialTimeStep(ts,0.0,dt);
164:   TSSetSolution(ts,u);

166:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
167:      Customize timestepping solver:  
168:        - Set the solution method to be the Backward Euler method.
169:        - Set timestepping duration info 
170:      Then set runtime options, which can override these defaults.
171:      For example,
172:           -ts_max_steps <maxsteps> -ts_max_time <maxtime>
173:      to override the defaults set by TSSetDuration().
174:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

176:   TSSetType(ts,TS_BEULER);
177:   TSSetDuration(ts,time_steps_max,time_total_max);
178:   TSSetFromOptions(ts);

180:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
181:      Solve the problem
182:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

184:   /*
185:      Evaluate initial conditions
186:   */
187:   InitialConditions(u,&appctx);

189:   /*
190:      Run the timestepping solver
191:   */
192:   TSStep(ts,&steps,&ftime);

194:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
195:      Free work space.  All PETSc objects should be destroyed when they
196:      are no longer needed.
197:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

199:   TSDestroy(ts);
200:   VecDestroy(u);
201:   MatDestroy(A);
202:   DADestroy(appctx.da);
203:   VecDestroy(appctx.localwork);
204:   VecDestroy(appctx.solution);
205:   VecDestroy(appctx.u_local);

207:   /*
208:      Always call PetscFinalize() before exiting a program.  This routine
209:        - finalizes the PETSc libraries as well as MPI
210:        - provides summary and diagnostic information if certain runtime
211:          options are chosen (e.g., -log_summary). 
212:   */
213:   PetscFinalize();
214:   return 0;
215: }
216: /* --------------------------------------------------------------------- */
217: /*
218:    InitialConditions - Computes the solution at the initial time. 

220:    Input Parameters:
221:    u - uninitialized solution vector (global)
222:    appctx - user-defined application context

224:    Output Parameter:
225:    u - vector with solution at initial time (global)
226: */
227: int InitialConditions(Vec u,AppCtx *appctx)
228: {
229:   Scalar *u_localptr,h = appctx->h,x;
230:   int    i,mybase,myend,ierr;

232:   /* 
233:      Determine starting point of each processor's range of
234:      grid values.
235:   */
236:   VecGetOwnershipRange(u,&mybase,&myend);

238:   /* 
239:     Get a pointer to vector data.
240:     - For default PETSc vectors, VecGetArray() returns a pointer to
241:       the data array.  Otherwise, the routine is implementation dependent.
242:     - You MUST call VecRestoreArray() when you no longer need access to
243:       the array.
244:     - Note that the Fortran interface to VecGetArray() differs from the
245:       C version.  See the users manual for details.
246:   */
247:   VecGetArray(u,&u_localptr);

249:   /* 
250:      We initialize the solution array by simply writing the solution
251:      directly into the array locations.  Alternatively, we could use
252:      VecSetValues() or VecSetValuesLocal().
253:   */
254:   for (i=mybase; i<myend; i++) {
255:     x = h*(double)i; /* current location in global grid */
256:     u_localptr[i-mybase] = 1.0 + x*x;
257:   }

259:   /* 
260:      Restore vector
261:   */
262:   VecRestoreArray(u,&u_localptr);

264:   /* 
265:      Print debugging information if desired
266:   */
267:   if (appctx->debug) {
268:      PetscPrintf(appctx->comm,"initial guess vectorn");
269:      VecView(u,PETSC_VIEWER_STDOUT_WORLD);
270:   }

272:   return 0;
273: }
274: /* --------------------------------------------------------------------- */
275: /*
276:    ExactSolution - Computes the exact solution at a given time.

278:    Input Parameters:
279:    t - current time
280:    solution - vector in which exact solution will be computed
281:    appctx - user-defined application context

283:    Output Parameter:
284:    solution - vector with the newly computed exact solution
285: */
286: int ExactSolution(double t,Vec solution,AppCtx *appctx)
287: {
288:   Scalar *s_localptr,h = appctx->h,x;
289:   int    i,mybase,myend,ierr;

291:   /* 
292:      Determine starting and ending points of each processor's 
293:      range of grid values
294:   */
295:   VecGetOwnershipRange(solution,&mybase,&myend);

297:   /*
298:      Get a pointer to vector data.
299:   */
300:   VecGetArray(solution,&s_localptr);

302:   /* 
303:      Simply write the solution directly into the array locations.
304:      Alternatively, we could use VecSetValues() or VecSetValuesLocal().
305:   */
306:   for (i=mybase; i<myend; i++) {
307:     x = h*(double)i;
308:     s_localptr[i-mybase] = (t + 1.0)*(1.0 + x*x);
309:   }

311:   /* 
312:      Restore vector
313:   */
314:   VecRestoreArray(solution,&s_localptr);
315:   return 0;
316: }
317: /* --------------------------------------------------------------------- */
318: /*
319:    Monitor - User-provided routine to monitor the solution computed at 
320:    each timestep.  This example plots the solution and computes the
321:    error in two different norms.

323:    Input Parameters:
324:    ts     - the timestep context
325:    step   - the count of the current step (with 0 meaning the
326:             initial condition)
327:    time   - the current time
328:    u      - the solution at this timestep
329:    ctx    - the user-provided context for this monitoring routine.
330:             In this case we use the application context which contains 
331:             information about the problem size, workspace and the exact 
332:             solution.
333: */
334: int Monitor(TS ts,int step,double time,Vec u,void *ctx)
335: {
336:   AppCtx   *appctx = (AppCtx*) ctx;   /* user-defined application context */
337:   int      ierr;
338:   double   en2,en2s,enmax;
339:   Scalar   mone = -1.0;
340:   PetscDraw     draw;

342:   /*
343:      We use the default X windows viewer
344:              PETSC_VIEWER_DRAW_(appctx->comm)
345:      that is associated with the current communicator. This saves
346:      the effort of calling PetscViewerDrawOpen() to create the window.
347:      Note that if we wished to plot several items in separate windows we
348:      would create each viewer with PetscViewerDrawOpen() and store them in
349:      the application context, appctx.

351:      Double buffering makes graphics look better.
352:   */
353:   PetscViewerDrawGetDraw(PETSC_VIEWER_DRAW_(appctx->comm),0,&draw);
354:   PetscDrawSetDoubleBuffer(draw);
355:   VecView(u,PETSC_VIEWER_DRAW_(appctx->comm));

357:   /*
358:      Compute the exact solution at this timestep
359:   */
360:   ExactSolution(time,appctx->solution,appctx);

362:   /*
363:      Print debugging information if desired
364:   */
365:   if (appctx->debug) {
366:      PetscPrintf(appctx->comm,"Computed solution vectorn");
367:      VecView(u,PETSC_VIEWER_STDOUT_WORLD);
368:      PetscPrintf(appctx->comm,"Exact solution vectorn");
369:      VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
370:   }

372:   /*
373:      Compute the 2-norm and max-norm of the error
374:   */
375:   VecAXPY(&mone,u,appctx->solution);
376:   VecNorm(appctx->solution,NORM_2,&en2);
377:   en2s  = sqrt(appctx->h)*en2; /* scale the 2-norm by the grid spacing */
378:   VecNorm(appctx->solution,NORM_MAX,&enmax);

380:   /*
381:      PetscPrintf() causes only the first processor in this 
382:      communicator to print the timestep information.
383:   */
384:   PetscPrintf(appctx->comm,"Timestep %d: time = %g,2-norm error = %g, max norm error = %gn",
385:               step,time,en2s,enmax);

387:   /*
388:      Print debugging information if desired
389:   */
390:   if (appctx->debug) {
391:      PetscPrintf(appctx->comm,"Error vectorn");
392:      VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
393:   }
394:   return 0;
395: }
396: /* --------------------------------------------------------------------- */
397: /*
398:    RHSFunction - User-provided routine that evalues the right-hand-side
399:    function of the ODE.  This routine is set in the main program by 
400:    calling TSSetRHSFunction().  We compute:
401:           global_out = F(global_in)

403:    Input Parameters:
404:    ts         - timesteping context
405:    t          - current time
406:    global_in  - vector containing the current iterate
407:    ctx        - (optional) user-provided context for function evaluation.
408:                 In this case we use the appctx defined above.

410:    Output Parameter:
411:    global_out - vector containing the newly evaluated function
412: */
413: int RHSFunction(TS ts,double t,Vec global_in,Vec global_out,void *ctx)
414: {
415:   AppCtx *appctx = (AppCtx*) ctx;       /* user-defined application context */
416:   DA     da = appctx->da;               /* distributed array */
417:   Vec    local_in = appctx->u_local;    /* local ghosted input vector */
418:   Vec    localwork = appctx->localwork; /* local ghosted work vector */
419:   int    ierr,i,localsize,rank,size;
420:   Scalar *copyptr,*localptr,sc;

422:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
423:      Get ready for local function computations
424:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
425:   /*
426:      Scatter ghost points to local vector, using the 2-step process
427:         DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
428:      By placing code between these two statements, computations can be
429:      done while messages are in transition.
430:   */
431:   DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
432:   DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);

434:   /*
435:       Access directly the values in our local INPUT work array
436:   */
437:   VecGetArray(local_in,&localptr);

439:   /*
440:       Access directly the values in our local OUTPUT work array
441:   */
442:   VecGetArray(localwork,&copyptr);

444:   sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));

446:   /*
447:       Evaluate our function on the nodes owned by this processor
448:   */
449:   VecGetLocalSize(local_in,&localsize);

451:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
452:      Compute entries for the locally owned part 
453:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

455:   /*
456:      Handle boundary conditions: This is done by using the boundary condition 
457:         u(t,boundary) = g(t,boundary) 
458:      for some function g. Now take the derivative with respect to t to obtain
459:         u_{t}(t,boundary) = g_{t}(t,boundary)

461:      In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1 
462:              and  u(t,1) = 2t+ 1, so that u_{t}(t,1) = 2
463:   */
464:   MPI_Comm_rank(appctx->comm,&rank);
465:   MPI_Comm_size(appctx->comm,&size);
466:   if (!rank)          copyptr[0]           = 1.0;
467:   if (rank == size-1) copyptr[localsize-1] = 2.0;

469:   /*
470:      Handle the interior nodes where the PDE is replace by finite 
471:      difference operators.
472:   */
473:   for (i=1; i<localsize-1; i++) {
474:     copyptr[i] =  localptr[i] * sc * (localptr[i+1] + localptr[i-1] - 2.0*localptr[i]);
475:   }

477:   /* 
478:      Restore vectors
479:   */
480:   VecRestoreArray(local_in,&localptr);
481:   VecRestoreArray(localwork,&copyptr);

483:   /*
484:      Insert values from the local OUTPUT vector into the global 
485:      output vector
486:   */
487:   DALocalToGlobal(da,localwork,INSERT_VALUES,global_out);

489:   /* Print debugging information if desired */
490:   if (appctx->debug) {
491:      PetscPrintf(appctx->comm,"RHS function vectorn");
492:      VecView(global_out,PETSC_VIEWER_STDOUT_WORLD);
493:   }

495:   return 0;
496: }
497: /* --------------------------------------------------------------------- */
498: /*
499:    RHSJacobian - User-provided routine to compute the Jacobian of
500:    the nonlinear right-hand-side function of the ODE.

502:    Input Parameters:
503:    ts - the TS context
504:    t - current time
505:    global_in - global input vector
506:    dummy - optional user-defined context, as set by TSetRHSJacobian()

508:    Output Parameters:
509:    AA - Jacobian matrix
510:    BB - optionally different preconditioning matrix
511:    str - flag indicating matrix structure

513:   Notes:
514:   RHSJacobian computes entries for the locally owned part of the Jacobian.
515:    - Currently, all PETSc parallel matrix formats are partitioned by
516:      contiguous chunks of rows across the processors. 
517:    - Each processor needs to insert only elements that it owns
518:      locally (but any non-local elements will be sent to the
519:      appropriate processor during matrix assembly). 
520:    - Always specify global row and columns of matrix entries when
521:      using MatSetValues().
522:    - Here, we set all entries for a particular row at once.
523:    - Note that MatSetValues() uses 0-based row and column numbers
524:      in Fortran as well as in C.
525: */
526: int RHSJacobian(TS ts,double t,Vec global_in,Mat *AA,Mat *BB,MatStructure *str,void *ctx)
527: {
528:   Mat    A = *AA;                      /* Jacobian matrix */
529:   AppCtx *appctx = (AppCtx*)ctx;     /* user-defined application context */
530:   Vec    local_in = appctx->u_local;   /* local ghosted input vector */
531:   DA     da = appctx->da;              /* distributed array */
532:   Scalar v[3],*localptr,sc;
533:   int    ierr,i,mstart,mend,mstarts,mends,idx[3],is;

535:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
536:      Get ready for local Jacobian computations
537:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
538:   /*
539:      Scatter ghost points to local vector, using the 2-step process
540:         DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
541:      By placing code between these two statements, computations can be
542:      done while messages are in transition.
543:   */
544:   DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,local_in);
545:   DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,local_in);

547:   /*
548:      Get pointer to vector data
549:   */
550:   VecGetArray(local_in,&localptr);

552:   /* 
553:      Get starting and ending locally owned rows of the matrix
554:   */
555:   MatGetOwnershipRange(A,&mstarts,&mends);
556:   mstart = mstarts; mend = mends;

558:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
559:      Compute entries for the locally owned part of the Jacobian.
560:       - Currently, all PETSc parallel matrix formats are partitioned by
561:         contiguous chunks of rows across the processors. 
562:       - Each processor needs to insert only elements that it owns
563:         locally (but any non-local elements will be sent to the
564:         appropriate processor during matrix assembly). 
565:       - Here, we set all entries for a particular row at once.
566:       - We can set matrix entries either using either
567:         MatSetValuesLocal() or MatSetValues().
568:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */

570:   /* 
571:      Set matrix rows corresponding to boundary data
572:   */
573:   if (mstart == 0) {
574:     v[0] = 0.0;
575:     MatSetValues(A,1,&mstart,1,&mstart,v,INSERT_VALUES);
576:     mstart++;
577:   }
578:   if (mend == appctx->m) {
579:     mend--;
580:     v[0] = 0.0;
581:     MatSetValues(A,1,&mend,1,&mend,v,INSERT_VALUES);
582:   }

584:   /*
585:      Set matrix rows corresponding to interior data.  We construct the 
586:      matrix one row at a time.
587:   */
588:   sc = 1.0/(appctx->h*appctx->h*2.0*(1.0+t)*(1.0+t));
589:   for (i=mstart; i<mend; i++) {
590:     idx[0] = i-1; idx[1] = i; idx[2] = i+1;
591:     is     = i - mstart + 1;
592:     v[0]   = sc*localptr[is];
593:     v[1]   = sc*(localptr[is+1] + localptr[is-1] - 4.0*localptr[is]);
594:     v[2]   = sc*localptr[is];
595:     MatSetValues(A,1,&i,3,idx,v,INSERT_VALUES);
596:   }

598:   /* 
599:      Restore vector
600:   */
601:   VecRestoreArray(local_in,&localptr);

603:   /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
604:      Complete the matrix assembly process and set some options
605:      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
606:   /*
607:      Assemble matrix, using the 2-step process:
608:        MatAssemblyBegin(), MatAssemblyEnd()
609:      Computations can be done while messages are in transition
610:      by placing code between these two statements.
611:   */
612:   MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
613:   MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);

615:   /*
616:      Set flag to indicate that the Jacobian matrix retains an identical
617:      nonzero structure throughout all timestepping iterations (although the
618:      values of the entries change). Thus, we can save some work in setting
619:      up the preconditioner (e.g., no need to redo symbolic factorization for
620:      ILU/ICC preconditioners).
621:       - If the nonzero structure of the matrix is different during
622:         successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
623:         must be used instead.  If you are unsure whether the matrix
624:         structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
625:       - Caution:  If you specify SAME_NONZERO_PATTERN, PETSc
626:         believes your assertion and does not check the structure
627:         of the matrix.  If you erroneously claim that the structure
628:         is the same when it actually is not, the new preconditioner
629:         will not function correctly.  Thus, use this optimization
630:         feature with caution!
631:   */
632:   *str = SAME_NONZERO_PATTERN;

634:   /*
635:      Set and option to indicate that we will never add a new nonzero location 
636:      to the matrix. If we do, it will generate an error.
637:   */
638:   MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR);

640:   return 0;
641: }