Actual source code: ex4.c
1: /*$Id: ex4.c,v 1.26 2001/04/10 19:37:12 bsmith Exp $*/
3: /* Program usage: mpirun -np <procs> ex4 [-help] [all PETSc options] */
5: static char help[] ="Solves a simple time-dependent linear PDE (the heat equation).n
6: Input parameters include:n
7: -m <points>, where <points> = number of grid pointsn
8: -time_dependent_rhs : Treat the problem as having a time-dependent right-hand siden
9: -debug : Activate debugging printoutsn
10: -nox : Deactivate x-window graphicsnn";
12: /*
13: Concepts: TS^time-dependent linear problems
14: Concepts: TS^heat equation
15: Concepts: TS^diffusion equation
16: Processors: n
17: */
19: /* ------------------------------------------------------------------------
21: This program solves the one-dimensional heat equation (also called the
22: diffusion equation),
23: u_t = u_xx,
24: on the domain 0 <= x <= 1, with the boundary conditions
25: u(t,0) = 0, u(t,1) = 0,
26: and the initial condition
27: u(0,x) = sin(6*pi*x) + 3*sin(2*pi*x).
28: This is a linear, second-order, parabolic equation.
30: We discretize the right-hand side using finite differences with
31: uniform grid spacing h:
32: u_xx = (u_{i+1} - 2u_{i} + u_{i-1})/(h^2)
33: We then demonstrate time evolution using the various TS methods by
34: running the program via
35: mpirun -np <procs> ex3 -ts_type <timestepping solver>
37: We compare the approximate solution with the exact solution, given by
38: u_exact(x,t) = exp(-36*pi*pi*t) * sin(6*pi*x) +
39: 3*exp(-4*pi*pi*t) * sin(2*pi*x)
41: Notes:
42: This code demonstrates the TS solver interface to two variants of
43: linear problems, u_t = f(u,t), namely
44: - time-dependent f: f(u,t) is a function of t
45: - time-independent f: f(u,t) is simply f(u)
47: The uniprocessor version of this code is ts/examples/tutorials/ex3.c
49: ------------------------------------------------------------------------- */
51: /*
52: Include "petscda.h" so that we can use distributed arrays (DAs) to manage
53: the parallel grid. Include "petscts.h" so that we can use TS solvers.
54: Note that this file automatically includes:
55: petsc.h - base PETSc routines petscvec.h - vectors
56: petscsys.h - system routines petscmat.h - matrices
57: petscis.h - index sets petscksp.h - Krylov subspace methods
58: petscviewer.h - viewers petscpc.h - preconditioners
59: petscsles.h - linear solvers petscsnes.h - nonlinear solvers
60: */
62: #include "petscda.h"
63: #include "petscts.h"
65: /*
66: User-defined application context - contains data needed by the
67: application-provided call-back routines.
68: */
69: typedef struct {
70: MPI_Comm comm; /* communicator */
71: DA da; /* distributed array data structure */
72: Vec localwork; /* local ghosted work vector */
73: Vec u_local; /* local ghosted approximate solution vector */
74: Vec solution; /* global exact solution vector */
75: int m; /* total number of grid points */
76: double h; /* mesh width h = 1/(m-1) */
77: PetscTruth debug; /* flag (1 indicates activation of debugging printouts) */
78: PetscViewer viewer1,viewer2; /* viewers for the solution and error */
79: double norm_2,norm_max; /* error norms */
80: } AppCtx;
82: /*
83: User-defined routines
84: */
85: extern int InitialConditions(Vec,AppCtx*);
86: extern int RHSMatrixHeat(TS,double,Mat*,Mat*,MatStructure*,void*);
87: extern int Monitor(TS,int,double,Vec,void*);
88: extern int ExactSolution(double,Vec,AppCtx*);
90: int main(int argc,char **argv)
91: {
92: AppCtx appctx; /* user-defined application context */
93: TS ts; /* timestepping context */
94: Mat A; /* matrix data structure */
95: Vec u; /* approximate solution vector */
96: double time_total_max = 100.0; /* default max total time */
97: int time_steps_max = 100; /* default max timesteps */
98: PetscDraw draw; /* drawing context */
99: int ierr,steps,size,m;
100: double dt,ftime;
101: PetscTruth flg;
103: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
104: Initialize program and set problem parameters
105: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
106:
107: PetscInitialize(&argc,&argv,(char*)0,help);
108: appctx.comm = PETSC_COMM_WORLD;
110: m = 60;
111: PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
112: PetscOptionsHasName(PETSC_NULL,"-debug",&appctx.debug);
113: appctx.m = m;
114: appctx.h = 1.0/(m-1.0);
115: appctx.norm_2 = 0.0;
116: appctx.norm_max = 0.0;
117: MPI_Comm_size(PETSC_COMM_WORLD,&size);
118: PetscPrintf(PETSC_COMM_WORLD,"Solving a linear TS problem, number of processors = %dn",size);
120: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
121: Create vector data structures
122: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
123: /*
124: Create distributed array (DA) to manage parallel grid and vectors
125: and to set up the ghost point communication pattern. There are M
126: total grid values spread equally among all the processors.
127: */
129: DACreate1d(PETSC_COMM_WORLD,DA_NONPERIODIC,m,1,1,PETSC_NULL,&appctx.da);
131: /*
132: Extract global and local vectors from DA; we use these to store the
133: approximate solution. Then duplicate these for remaining vectors that
134: have the same types.
135: */
136: DACreateGlobalVector(appctx.da,&u);
137: DACreateLocalVector(appctx.da,&appctx.u_local);
139: /*
140: Create local work vector for use in evaluating right-hand-side function;
141: create global work vector for storing exact solution.
142: */
143: VecDuplicate(appctx.u_local,&appctx.localwork);
144: VecDuplicate(u,&appctx.solution);
146: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
147: Set up displays to show graphs of the solution and error
148: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
150: PetscViewerDrawOpen(PETSC_COMM_WORLD,0,"",80,380,400,160,&appctx.viewer1);
151: PetscViewerDrawGetDraw(appctx.viewer1,0,&draw);
152: PetscDrawSetDoubleBuffer(draw);
153: PetscViewerDrawOpen(PETSC_COMM_WORLD,0,"",80,0,400,160,&appctx.viewer2);
154: PetscViewerDrawGetDraw(appctx.viewer2,0,&draw);
155: PetscDrawSetDoubleBuffer(draw);
157: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
158: Create timestepping solver context
159: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
161: TSCreate(PETSC_COMM_WORLD,TS_LINEAR,&ts);
163: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
164: Set optional user-defined monitoring routine
165: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
167: TSSetMonitor(ts,Monitor,&appctx,PETSC_NULL);
169: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
171: Create matrix data structure; set matrix evaluation routine.
172: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
174: MatCreate(PETSC_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,m,m,&A);
175: MatSetFromOptions(A);
177: PetscOptionsHasName(PETSC_NULL,"-time_dependent_rhs",&flg);
178: if (flg) {
179: /*
180: For linear problems with a time-dependent f(u,t) in the equation
181: u_t = f(u,t), the user provides the discretized right-hand-side
182: as a time-dependent matrix.
183: */
184: TSSetRHSMatrix(ts,A,A,RHSMatrixHeat,&appctx);
185: } else {
186: /*
187: For linear problems with a time-independent f(u) in the equation
188: u_t = f(u), the user provides the discretized right-hand-side
189: as a matrix only once, and then sets a null matrix evaluation
190: routine.
191: */
192: MatStructure A_structure;
193: RHSMatrixHeat(ts,0.0,&A,&A,&A_structure,&appctx);
194: TSSetRHSMatrix(ts,A,A,PETSC_NULL,&appctx);
195: }
197: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
198: Set solution vector and initial timestep
199: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
201: dt = appctx.h*appctx.h/2.0;
202: TSSetInitialTimeStep(ts,0.0,dt);
203: TSSetSolution(ts,u);
205: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
206: Customize timestepping solver:
207: - Set the solution method to be the Backward Euler method.
208: - Set timestepping duration info
209: Then set runtime options, which can override these defaults.
210: For example,
211: -ts_max_steps <maxsteps> -ts_max_time <maxtime>
212: to override the defaults set by TSSetDuration().
213: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
215: TSSetDuration(ts,time_steps_max,time_total_max);
216: TSSetFromOptions(ts);
218: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
219: Solve the problem
220: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
222: /*
223: Evaluate initial conditions
224: */
225: InitialConditions(u,&appctx);
227: /*
228: Run the timestepping solver
229: */
230: TSStep(ts,&steps,&ftime);
232: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
233: View timestepping solver info
234: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
236: PetscPrintf(PETSC_COMM_WORLD,"avg. error (2 norm) = %g, avg. error (max norm) = %gn",
237: appctx.norm_2/steps,appctx.norm_max/steps);
238: TSView(ts,PETSC_VIEWER_STDOUT_WORLD);
240: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
241: Free work space. All PETSc objects should be destroyed when they
242: are no longer needed.
243: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
245: TSDestroy(ts);
246: MatDestroy(A);
247: VecDestroy(u);
248: PetscViewerDestroy(appctx.viewer1);
249: PetscViewerDestroy(appctx.viewer2);
250: VecDestroy(appctx.localwork);
251: VecDestroy(appctx.solution);
252: VecDestroy(appctx.u_local);
253: DADestroy(appctx.da);
255: /*
256: Always call PetscFinalize() before exiting a program. This routine
257: - finalizes the PETSc libraries as well as MPI
258: - provides summary and diagnostic information if certain runtime
259: options are chosen (e.g., -log_summary).
260: */
261: PetscFinalize();
262: return 0;
263: }
264: /* --------------------------------------------------------------------- */
265: /*
266: InitialConditions - Computes the solution at the initial time.
268: Input Parameter:
269: u - uninitialized solution vector (global)
270: appctx - user-defined application context
272: Output Parameter:
273: u - vector with solution at initial time (global)
274: */
275: int InitialConditions(Vec u,AppCtx *appctx)
276: {
277: Scalar *u_localptr,h = appctx->h;
278: int i,mybase,myend,ierr;
280: /*
281: Determine starting point of each processor's range of
282: grid values.
283: */
284: VecGetOwnershipRange(u,&mybase,&myend);
286: /*
287: Get a pointer to vector data.
288: - For default PETSc vectors, VecGetArray() returns a pointer to
289: the data array. Otherwise, the routine is implementation dependent.
290: - You MUST call VecRestoreArray() when you no longer need access to
291: the array.
292: - Note that the Fortran interface to VecGetArray() differs from the
293: C version. See the users manual for details.
294: */
295: VecGetArray(u,&u_localptr);
297: /*
298: We initialize the solution array by simply writing the solution
299: directly into the array locations. Alternatively, we could use
300: VecSetValues() or VecSetValuesLocal().
301: */
302: for (i=mybase; i<myend; i++) {
303: u_localptr[i-mybase] = PetscSinScalar(PETSC_PI*i*6.*h) + 3.*PetscSinScalar(PETSC_PI*i*2.*h);
304: }
306: /*
307: Restore vector
308: */
309: VecRestoreArray(u,&u_localptr);
311: /*
312: Print debugging information if desired
313: */
314: if (appctx->debug) {
315: PetscPrintf(appctx->comm,"initial guess vectorn");
316: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
317: }
319: return 0;
320: }
321: /* --------------------------------------------------------------------- */
322: /*
323: ExactSolution - Computes the exact solution at a given time.
325: Input Parameters:
326: t - current time
327: solution - vector in which exact solution will be computed
328: appctx - user-defined application context
330: Output Parameter:
331: solution - vector with the newly computed exact solution
332: */
333: int ExactSolution(double t,Vec solution,AppCtx *appctx)
334: {
335: Scalar *s_localptr,h = appctx->h,ex1,ex2,sc1,sc2;
336: int i,mybase,myend,ierr;
338: /*
339: Determine starting and ending points of each processor's
340: range of grid values
341: */
342: VecGetOwnershipRange(solution,&mybase,&myend);
344: /*
345: Get a pointer to vector data.
346: */
347: VecGetArray(solution,&s_localptr);
349: /*
350: Simply write the solution directly into the array locations.
351: Alternatively, we culd use VecSetValues() or VecSetValuesLocal().
352: */
353: ex1 = exp(-36.*PETSC_PI*PETSC_PI*t); ex2 = exp(-4.*PETSC_PI*PETSC_PI*t);
354: sc1 = PETSC_PI*6.*h; sc2 = PETSC_PI*2.*h;
355: for (i=mybase; i<myend; i++) {
356: s_localptr[i-mybase] = PetscSinScalar(sc1*(double)i)*ex1 + 3.*PetscSinScalar(sc2*(double)i)*ex2;
357: }
359: /*
360: Restore vector
361: */
362: VecRestoreArray(solution,&s_localptr);
363: return 0;
364: }
365: /* --------------------------------------------------------------------- */
366: /*
367: Monitor - User-provided routine to monitor the solution computed at
368: each timestep. This example plots the solution and computes the
369: error in two different norms.
371: Input Parameters:
372: ts - the timestep context
373: step - the count of the current step (with 0 meaning the
374: initial condition)
375: time - the current time
376: u - the solution at this timestep
377: ctx - the user-provided context for this monitoring routine.
378: In this case we use the application context which contains
379: information about the problem size, workspace and the exact
380: solution.
381: */
382: int Monitor(TS ts,int step,double time,Vec u,void *ctx)
383: {
384: AppCtx *appctx = (AppCtx*) ctx; /* user-defined application context */
385: int ierr;
386: double norm_2,norm_max;
387: Scalar mone = -1.0;
389: /*
390: View a graph of the current iterate
391: */
392: VecView(u,appctx->viewer2);
394: /*
395: Compute the exact solution
396: */
397: ExactSolution(time,appctx->solution,appctx);
399: /*
400: Print debugging information if desired
401: */
402: if (appctx->debug) {
403: PetscPrintf(appctx->comm,"Computed solution vectorn");
404: VecView(u,PETSC_VIEWER_STDOUT_WORLD);
405: PetscPrintf(appctx->comm,"Exact solution vectorn");
406: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
407: }
409: /*
410: Compute the 2-norm and max-norm of the error
411: */
412: VecAXPY(&mone,u,appctx->solution);
413: VecNorm(appctx->solution,NORM_2,&norm_2);
414: norm_2 = sqrt(appctx->h)*norm_2;
415: VecNorm(appctx->solution,NORM_MAX,&norm_max);
417: /*
418: PetscPrintf() causes only the first processor in this
419: communicator to print the timestep information.
420: */
421: PetscPrintf(appctx->comm,"Timestep %d: time = %g, 2-norm error = %g, max norm error = %gn",
422: step,time,norm_2,norm_max);
423: appctx->norm_2 += norm_2;
424: appctx->norm_max += norm_max;
426: /*
427: View a graph of the error
428: */
429: VecView(appctx->solution,appctx->viewer1);
431: /*
432: Print debugging information if desired
433: */
434: if (appctx->debug) {
435: PetscPrintf(appctx->comm,"Error vectorn");
436: VecView(appctx->solution,PETSC_VIEWER_STDOUT_WORLD);
437: }
439: return 0;
440: }
441: /* --------------------------------------------------------------------- */
442: /*
443: RHSMatrixHeat - User-provided routine to compute the right-hand-side
444: matrix for the heat equation.
446: Input Parameters:
447: ts - the TS context
448: t - current time
449: global_in - global input vector
450: dummy - optional user-defined context, as set by TSetRHSJacobian()
452: Output Parameters:
453: AA - Jacobian matrix
454: BB - optionally different preconditioning matrix
455: str - flag indicating matrix structure
457: Notes:
458: RHSMatrixHeat computes entries for the locally owned part of the system.
459: - Currently, all PETSc parallel matrix formats are partitioned by
460: contiguous chunks of rows across the processors.
461: - Each processor needs to insert only elements that it owns
462: locally (but any non-local elements will be sent to the
463: appropriate processor during matrix assembly).
464: - Always specify global row and columns of matrix entries when
465: using MatSetValues(); we could alternatively use MatSetValuesLocal().
466: - Here, we set all entries for a particular row at once.
467: - Note that MatSetValues() uses 0-based row and column numbers
468: in Fortran as well as in C.
469: */
470: int RHSMatrixHeat(TS ts,double t,Mat *AA,Mat *BB,MatStructure *str,void *ctx)
471: {
472: Mat A = *AA; /* Jacobian matrix */
473: AppCtx *appctx = (AppCtx*)ctx; /* user-defined application context */
474: int ierr,i,mstart,mend,idx[3];
475: Scalar v[3],stwo = -2./(appctx->h*appctx->h),sone = -.5*stwo;
477: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
478: Compute entries for the locally owned part of the matrix
479: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
481: MatGetOwnershipRange(A,&mstart,&mend);
483: /*
484: Set matrix rows corresponding to boundary data
485: */
487: if (mstart == 0) { /* first processor only */
488: v[0] = 1.0;
489: MatSetValues(A,1,&mstart,1,&mstart,v,INSERT_VALUES);
490: mstart++;
491: }
493: if (mend == appctx->m) { /* last processor only */
494: mend--;
495: v[0] = 1.0;
496: MatSetValues(A,1,&mend,1,&mend,v,INSERT_VALUES);
497: }
499: /*
500: Set matrix rows corresponding to interior data. We construct the
501: matrix one row at a time.
502: */
503: v[0] = sone; v[1] = stwo; v[2] = sone;
504: for (i=mstart; i<mend; i++) {
505: idx[0] = i-1; idx[1] = i; idx[2] = i+1;
506: MatSetValues(A,1,&i,3,idx,v,INSERT_VALUES);
507: }
509: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
510: Complete the matrix assembly process and set some options
511: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
512: /*
513: Assemble matrix, using the 2-step process:
514: MatAssemblyBegin(), MatAssemblyEnd()
515: Computations can be done while messages are in transition
516: by placing code between these two statements.
517: */
518: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
519: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
521: /*
522: Set flag to indicate that the Jacobian matrix retains an identical
523: nonzero structure throughout all timestepping iterations (although the
524: values of the entries change). Thus, we can save some work in setting
525: up the preconditioner (e.g., no need to redo symbolic factorization for
526: ILU/ICC preconditioners).
527: - If the nonzero structure of the matrix is different during
528: successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
529: must be used instead. If you are unsure whether the matrix
530: structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
531: - Caution: If you specify SAME_NONZERO_PATTERN, PETSc
532: believes your assertion and does not check the structure
533: of the matrix. If you erroneously claim that the structure
534: is the same when it actually is not, the new preconditioner
535: will not function correctly. Thus, use this optimization
536: feature with caution!
537: */
538: *str = SAME_NONZERO_PATTERN;
540: /*
541: Set and option to indicate that we will never add a new nonzero location
542: to the matrix. If we do, it will generate an error.
543: */
544: MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR);
546: return 0;
547: }