Actual source code: ex2f.F
1: !
2: ! "$Id: ex2f.F,v 1.34 2001/01/15 21:48:36 bsmith Exp $";
3: !/*T
4: ! Concepts: TS^time-dependent nonlinear problems
5: ! Processors: n
6: !T*/
7: !
8: ! ------------------------------------------------------------------------
9: !
10: ! This program solves a simple time-dependent nonlinear PDE using implicit
11: ! timestepping:
12: ! u * u_xx
13: ! u_t = ---------
14: ! 2*(t+1)^2
15: !
16: ! u(0,x) = 1 + x*x; u(t,0) = t + 1; u(t,1) = 2*t + 2
17: !
18: ! The exact solution is u(t,x) = (1 + x*x) * (1 + t).
19: !
20: ! Note that since the solution is linear in time and quadratic in x,
21: ! the finite difference scheme actually computes the "exact" solution.
22: !
23: ! We use the backward Euler method.
24: !
25: ! --------------------------------------------------------------------------
27: program main
28: implicit none
30: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
31: ! Include files
32: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
33: !
34: ! Each routine within this program uses the include file 'ex2f.h',
35: ! which itself includes the various PETSc include files as well as
36: ! problem-specific data in several common blocks.
37: !
38: ! This program uses CPP for preprocessing, as indicated by the use of
39: ! PETSc include files in the directory petsc/include/finclude. This
40: ! convention enables use of the CPP preprocessor, which allows the use
41: ! of the #include statements that define PETSc objects and variables.
42: !
43: ! Use of the conventional Fortran include statements is also supported
44: ! In this case, the PETsc include files are located in the directory
45: ! petsc/include/foldinclude.
46: !
47: ! Since one must be very careful to include each file no more than once
48: ! in a Fortran routine, application programmers must exlicitly list
49: ! each file needed for the various PETSc components within their
50: ! program (unlike the C/C++ interface).
51: !
52: ! See the Fortran section of the PETSc users manual for details.
54: #include "ex2f.h"
56: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
57: ! Variable declarations
58: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
59: !
60: ! Variables:
61: ! ts - timestepping solver
62: ! A - Jacobian matrix context
63: ! u_local - local vector
64: ! u - approximate solution vector
65: ! ftime - final time
66: ! time_total_max - default total max time
67: ! time_steps_max - default max timesteps
68: !
69: ! Note that vectors are declared as PETSc "Vec" objects. These vectors
70: ! are mathematical objects that contain more than just an array of
71: ! double precision numbers. I.e., vectors in PETSc are not just
72: ! double precision x(*).
73: ! However, local vector data can be easily accessed via VecGetArray().
74: ! See the Fortran section of the PETSc users manual for details.
76: TS ts
77: Vec u
78: Mat A
79: integer flg,ierr
80: integer time_steps_max,steps
81: double precision dt,ftime,time_total_max
83: ! Note: Any user-defined Fortran routines (such as RHSFunction)
84: ! MUST be declared as external.
86: external Monitor,RHSFunction
87: external InitialConditions,RHSJacobian
89: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
90: ! Beginning of program
91: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
93: call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
94: comm = PETSC_COMM_WORLD
95: call MPI_Comm_size(comm,size,ierr)
96: call MPI_Comm_rank(comm,rank,ierr)
98: ! Initialize problem parameters
100: time_steps_max = 1000
101: time_total_max = 100.0
102: m = 60
103: debug = 0
104: call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',M,flg,ierr)
105: call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-debug',debug,ierr)
106: one_d0 = 1.0d0
107: two_d0 = 2.0d0
108: four_d0 = 4.0d0
109: h = one_d0/(m-one_d0)
111: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
112: ! Create vector data structures
113: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
115: ! Create distributed array (DA) to manage parallel grid and vectors
116: ! Set up the ghost point communication pattern. There are m total
117: ! grid values spread equally among all the processors.
119: call DACreate1d(PETSC_COMM_WORLD,DA_NONPERIODIC,m,1,1,
120: & PETSC_NULL_INTEGER,da,ierr)
122: ! Extract global and local vectors from DA; then duplicate for remaining
123: ! vectors that are the same types.
125: call DACreateGlobalVector(da,u,ierr)
126: call DACreateLocalVector(da,u_local,ierr)
128: ! Make local work vector for evaluating right-hand-side function
129: call VecDuplicate(u_local,localwork,ierr)
131: ! Make global work vector for storing exact solution
132: call VecDuplicate(u,solution,ierr)
134: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
135: ! Create timestepping solver context; set callback routine for
136: ! right-hand-side function evaluation.
137: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
139: call TSCreate(comm,TS_NONLINEAR,ts,ierr)
140: call TSSetRHSFunction(ts,RHSFunction,PETSC_NULL_OBJECT,ierr)
142: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
143: ! Set optional user-defined monitoring routine
144: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
146: call TSSetMonitor(ts,Monitor,PETSC_NULL_OBJECT,
147: & PETSC_NULL_FUNCTION,ierr)
149: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
150: ! For nonlinear problems, the user can provide a Jacobian evaluation
151: ! routine (or use a finite differencing approximation).
152: !
153: ! Create matrix data structure; set Jacobian evaluation routine.
154: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
156: call MatCreate(comm,PETSC_DECIDE,PETSC_DECIDE,m,m,A,ierr)
157: call PetscOptionsHasName(PETSC_NULL_CHARACTER,'-fdjac',flg,ierr)
158: if (flg .eq. 1) then
159: call SetCRoutineFromFortran(ts,A,A,ierr)
160: else
161: call TSSetRHSJacobian(ts,A,A,RHSJacobian,PETSC_NULL_OBJECT, &
162: & ierr)
163: endif
165: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
166: ! Set solution vector and initial timestep
167: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
169: dt = h/two_d0
170: call TSSetInitialTimeStep(ts,0.d0,dt,ierr)
171: call TSSetSolution(ts,u,ierr)
173: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
174: ! Customize timestepping solver:
175: ! - Set the solution method to be the Backward Euler method.
176: ! - Set timestepping duration info
177: ! Then set runtime options, which can override these defaults.
178: ! For example,
179: ! -ts_max_steps <maxsteps> -ts_max_time <maxtime>
180: ! to override the defaults set by TSSetDuration().
181: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
183: call TSSetType(ts,TS_BEULER,ierr)
184: call TSSetDuration(ts,time_steps_max,time_total_max,ierr)
185: call TSSetFromOptions(ts,ierr)
187: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
188: ! Solve the problem
189: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
191: ! Evaluate initial conditions
193: call InitialConditions(u)
195: ! Run the timestepping solver
197: call TSStep(ts,steps,ftime,ierr)
199: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
200: ! Free work space. All PETSc objects should be destroyed when they
201: ! are no longer needed.
202: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
204: call TSDestroy(ts,ierr)
205: call VecDestroy(localwork,ierr)
206: call VecDestroy(solution,ierr)
207: call VecDestroy(u_local,ierr)
208: call VecDestroy(u,ierr)
209: call DADestroy(da,ierr)
210: call MatDestroy(A,ierr)
212: ! Always call PetscFinalize() before exiting a program. This routine
213: ! - finalizes the PETSc libraries as well as MPI
214: ! - provides summary and diagnostic information if certain runtime
215: ! options are chosen (e.g., -log_summary).
217: call PetscFinalize(ierr)
218: end
220: ! ------------------------------------------------------------------------
221: !
222: ! InitialConditions - Computes the solution at the initial time.
223: !
224: ! Input Parameters:
225: ! u - uninitialized solution vector (global)
226: ! appctx - user-defined application context
227: !
228: ! Output Parameter:
229: ! u - vector with solution at initial time (global)
230: !
231: subroutine InitialConditions(u)
232: implicit none
233: #include "ex2f.h"
235: ! Input/output parameters:
236: Vec u
238: ! Local variables:
239: double precision localptr(1),x
240: integer i,mybase,myend,ierr
241: PetscOffset idx
243: ! Determine starting and ending point of each processor's range of
244: ! grid values. Note that we shift by 1 to convert from the 0-based
245: ! C convention of starting indices to the 1-based Fortran convention.
247: call VecGetOwnershipRange(u,mybase,myend,ierr)
248: mybase = mybase + 1
250: ! Get a pointer to vector data.
251: ! - For default PETSc vectors, VecGetArray() returns a pointer to
252: ! the data array. Otherwise, the routine is implementation dependent.
253: ! - You MUST call VecRestoreArray() when you no longer need access to
254: ! the array.
255: ! - Note that the Fortran interface to VecGetArray() differs from the
256: ! C version. See the users manual for details.
258: call VecGetArray(u,localptr,idx,ierr)
260: ! We initialize the solution array by simply writing the solution
261: ! directly into the array locations. Alternatively, we could use
262: ! VecSetValues() or VecSetValuesLocal().
264: do 10, i=mybase,myend
265: ! x - current location in global grid
266: x = h*(i-1)
267: localptr(i-mybase+idx+1) = one_d0 + x*x
268: 10 continue
270: ! Restore vector
272: call VecRestoreArray(u,localptr,idx,ierr)
274: ! Print debugging information if desired
275: if (debug .eq. 1) then
276: if (rank .eq. 0) write(6,*) 'initial guess vector'
277: call VecView(u,PETSC_VIEWER_STDOUT_WORLD,ierr)
278: endif
280: return
281: end
283: ! ------------------------------------------------------------------------
284: !
285: ! ExactSolution - Computes the exact solution at a given time.
286: !
287: ! Input Parameters:
288: ! t - current time
289: ! appctx - user-defined application context
290: !
291: ! Output Parameter:
292: ! ierr - error code
293: !
294: ! Note: The solution vector is stored in the common block!
295: !
296: subroutine ExactSolution(t,ierr)
297: implicit none
298: #include "ex2f.h"
300: ! Input/output parameters:
301: double precision t,x
302: integer ierr
304: ! Local variables:
305: double precision localptr(1)
306: integer i,mybase,myend
307: PetscOffset idx
309: ! Determine starting and ending point of each processors range of
310: ! grid values. Note that we shift by 1 to convert from the 0-based
311: ! C convention of starting indices to the 1-based Fortran convention.
313: call VecGetOwnershipRange(solution,mybase,myend,ierr)
314: mybase = mybase + 1
316: ! Get a pointer to vector data
317: call VecGetArray(solution,localptr,idx,ierr)
319: ! Simply write the solution directly into the array locations
320: ! Alternatively, we could use VecSetValues() or VecSetValuesLocal().
322: do 10, i=mybase,myend
323: ! x - current location in global grid
324: x = h*(i-1)
325: localptr(i-mybase+idx+1) = (t + one_d0)*(one_d0 + x*x)
326: 10 continue
328: ! Restore vector
330: call VecRestoreArray(solution,localptr,idx,ierr)
332: return
333: end
335: ! ------------------------------------------------------------------------
336: !
337: ! Monitor - A user-provided routine to monitor the solution computed at
338: ! each time-step. This example plots the solution and computes the
339: ! error in two different norms.
340: !
341: ! Input Parameters:
342: ! ts - the time-step context
343: ! step - the count of the current step (with 0 meaning the
344: ! initial condition)
345: ! time - the current time
346: ! u - the solution at this timestep
347: ! dummy - optional user-provided context for this monitoring
348: ! routine (not used here)
349: !
350: subroutine Monitor(ts,step,time,u,dummy,ierr)
351: implicit none
352: #include "ex2f.h"
354: ! Input/output parameters:
355: TS ts
356: integer step,dummy
357: double precision time
358: Vec u
359: integer ierr
361: ! Local variables:
362: double precision en2,en2s,enmax
363: double precision mone
364: PetscDraw draw
366: mone = -1.0d0
368: ! We use the default X windows viewer
369: ! PETSC_VIEWER_DRAW_WORLD
370: ! that is associated with the PETSC_COMM_WORLD communicator. This
371: ! saves the effort of calling PetscViewerDrawOpen() to create the window.
372: ! Note that if we wished to plot several items in separate windows we
373: ! would create each viewer with PetscViewerDrawOpen() and store them in
374: ! the application context, appctx.
375: !
376: ! Double buffering makes graphics look better.
378: call PetscViewerDrawGetDraw(PETSC_VIEWER_DRAW_WORLD,0,draw,ierr)
379: call PetscDrawSetDoubleBuffer(draw,ierr)
380: call VecView(u,PETSC_VIEWER_DRAW_WORLD,ierr)
382: ! Compute the exact solution at this time-step. Note that the
383: ! solution vector is passed via the user-defined common block.
384: call ExactSolution(time,ierr)
386: ! Print debugging information if desired
387: if (debug .eq. 1) then
388: if (rank .eq. 0) write(6,*) 'Computed solution vector'
389: call VecView(u,PETSC_VIEWER_STDOUT_WORLD,ierr)
390: if (rank .eq. 0) write(6,*) 'Exact solution vector'
391: call VecView(solution,PETSC_VIEWER_STDOUT_WORLD,ierr)
392: endif
394: ! Compute the 2-norm and max-norm of the error
395: call VecAXPY(mone,u,solution,ierr)
396: call VecNorm(solution,NORM_2,en2,ierr)
397: ! Scale the 2-norm by the grid spacing
398: en2s = dsqrt(h)*en2
399: call VecNorm(solution,NORM_MAX,enmax,ierr)
401: ! Print only from processor 0
402: if (rank .eq. 0) write(6,100) step,time,en2s,enmax
403: 100 format('Timestep = ',i5,',time = ',f8.3, &
404: & ' sec, error [2-norm] = ',e9.3, &
405: & ', error [max-norm] = ',e9.3)
407: ! Print debugging information if desired
408: if (debug .eq. 1) then
409: if (rank .eq. 0) write(6,*) 'Error vector'
410: call VecView(solution,PETSC_VIEWER_STDOUT_WORLD,ierr)
411: endif
413: return
414: end
416: ! ------------------------------------------------------------------------
417: !
418: ! RHSFunction - User-provided routine that evalues the RHS function
419: ! in the ODE. This routine is set in the main program by calling
420: ! TSSetRHSFunction(). We compute:
421: ! global_out = F(global_in)
422: !
423: ! Input Parameters:
424: ! ts - timestep context
425: ! t - current time
426: ! global_in - input vector to function
427: ! dummy - (optional) user-provided context for function evaluation
428: ! (not used here because we use a common block instead)
429: !
430: ! Output Parameter:
431: ! global_out - value of function
432: !
433: subroutine RHSFunction(ts,t,global_in,global_out,dummy)
434: implicit none
435: #include "ex2f.h"
437: ! Input/output parameters:
438: TS ts
439: double precision t
440: Vec global_in,global_out
441: integer dummy
443: ! Local variables:
444: double precision localptr(1),copyptr(1),sc
445: integer i,il,ierr,localsize
446: PetscOffset idx_c,idx_l
448: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
449: ! Get ready for local function computations
450: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
452: ! Scatter ghost points to local vector, using the 2-step process
453: ! DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
454: ! By placing code between these two statements, computations can be
455: ! done while messages are in transition.
457: call DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,u_local,ierr)
458: call DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,u_local,ierr)
460: ! Access directly the values in our local INPUT work vector
462: call VecGetArray(u_local,localptr,idx_l,ierr)
464: ! Access directly the values in our local OUTPUT work vector
466: call VecGetArray(localwork,copyptr,idx_c,ierr)
468: sc = one_d0/(h*h*two_d0*(one_d0+t)*(one_d0+t))
470: ! Evaluate our function on the nodes owned by this processor
472: call VecGetLocalSize(u_local,localsize,ierr)
474: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
475: ! Compute entries for the locally owned part
476: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
478: ! Handle boundary conditions: This is done by using the boundary condition
479: ! u(t,boundary) = g(t,boundary)
480: ! for some function g. Now take the derivative with respect to t to obtain
481: ! u_{t}(t,boundary) = g_{t}(t,boundary)
482: !
483: ! In our case, u(t,0) = t + 1, so that u_{t}(t,0) = 1
484: ! and u(t,1) = 2t+ 1, so that u_{t}(t,1) = 2
486: if (rank .eq. 0) copyptr(1+idx_c) = one_d0
487: if (rank .eq. size-1) copyptr(localsize+idx_c) = two_d0
489: ! Handle the interior nodes where the PDE is replace by finite
490: ! difference operators.
492: do 10, i=2,localsize-1
493: il = i + idx_l
494: copyptr(i+idx_c) = localptr(il) * sc * &
495: & (localptr(il+1) + localptr(il-1) - two_d0*localptr(il))
496: 10 continue
498: call VecRestoreArray(u_local,localptr,idx_l,ierr)
499: call VecRestoreArray(localwork,copyptr,idx_c,ierr)
501: ! Return the values from our local OUTPUT vector into our global
502: ! output vector.
504: call DALocalToGlobal(da,localwork,INSERT_VALUES,global_out,ierr)
506: ! Print debugging information if desired
507: if (debug .eq. 1) then
508: if (rank .eq. 0) write(6,*) 'RHS function vector'
509: call VecView(global_out,PETSC_VIEWER_STDOUT_WORLD,ierr)
510: endif
512: return
513: end
515: ! ------------------------------------------------------------------------
516: !
517: ! RHSJacobian - User-provided routine to compute the Jacobian of the
518: ! right-hand-side function.
519: !
520: ! Input Parameters:
521: ! ts - the TS context
522: ! t - current time
523: ! global_in - global input vector
524: ! dummy - optional user-defined context, as set by TSetRHSJacobian()
525: !
526: ! Output Parameters:
527: ! A - Jacobian matrix
528: ! B - optionally different preconditioning matrix
529: ! str - flag indicating matrix structure
530: !
531: ! Notes:
532: ! RHSJacobian computes entries for the locally owned part of the Jacobian.
533: ! - Currently, all PETSc parallel matrix formats are partitioned by
534: ! contiguous chunks of rows across the processors. The "grow"
535: ! parameter computed below specifies the global row number
536: ! corresponding to each local grid point.
537: ! - Each processor needs to insert only elements that it owns
538: ! locally (but any non-local elements will be sent to the
539: ! appropriate processor during matrix assembly).
540: ! - Always specify global row and columns of matrix entries.
541: ! - Here, we set all entries for a particular row at once.
542: ! - Note that MatSetValues() uses 0-based row and column numbers
543: ! in Fortran as well as in C.
545: subroutine RHSJacobian(ts,t,global_in,A,B,str,dummy)
546: implicit none
547: #include "ex2f.h"
549: ! Input/output parameters:
550: TS ts
551: double precision t
552: Vec global_in
553: Mat A,B
554: MatStructure str
555: integer dummy
557: ! Local variables:
558: double precision localptr(1),sc,v(3)
559: integer i,ierr,col(3),is
560: integer mstart,mend,mstarts,mends
561: PetscOffset idx
563: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
564: ! Get ready for local Jacobian computations
565: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
567: ! Scatter ghost points to local vector, using the 2-step process
568: ! DAGlobalToLocalBegin(), DAGlobalToLocalEnd().
569: ! By placing code between these two statements, computations can be
570: ! done while messages are in transition.
572: call DAGlobalToLocalBegin(da,global_in,INSERT_VALUES,u_local,ierr)
573: call DAGlobalToLocalEnd(da,global_in,INSERT_VALUES,u_local,ierr)
575: ! Get pointer to vector data
577: call VecGetArray(u_local,localptr,idx,ierr)
579: ! Get starting and ending locally owned rows of the matrix
581: call MatGetOwnershipRange(A,mstarts,mends,ierr)
582: mstart = mstarts
583: mend = mends
585: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
586: ! Compute entries for the locally owned part of the Jacobian.
587: ! - Currently, all PETSc parallel matrix formats are partitioned by
588: ! contiguous chunks of rows across the processors.
589: ! - Each processor needs to insert only elements that it owns
590: ! locally (but any non-local elements will be sent to the
591: ! appropriate processor during matrix assembly).
592: ! - Here, we set all entries for a particular row at once.
593: ! - We can set matrix entries either using either
594: ! MatSetValuesLocal() or MatSetValues().
595: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
597: ! Set matrix rows corresponding to boundary data
599: if (mstart .eq. 0) then
600: v(1) = zero_d0
601: call MatSetValues(A,1,mstart,1,mstart,v,INSERT_VALUES,ierr)
602: mstart = mstart + 1
603: endif
604: if (mend .eq. m) then
605: mend = mend - 1
606: v(1) = zero_d0
607: call MatSetValues(A,1,mend,1,mend,v,INSERT_VALUES,ierr)
608: endif
610: ! Set matrix rows corresponding to interior data.
611: ! We construct the matrix one row at a time.
613: sc = one_d0/(h*h*two_d0*(one_d0+t)*(one_d0+t))
614: do 10, i=mstart,mend-1
615: col(1) = i-1
616: col(2) = i
617: col(3) = i+1
618: is = i - mstart + 1 +idx + 1
619: v(1) = sc*localptr(is)
620: v(2) = sc*(localptr(is+1) + localptr(is-1) - &
621: & four_d0*localptr(is))
622: v(3) = sc*localptr(is)
623: call MatSetValues(A,1,i,3,col,v,INSERT_VALUES,ierr)
624: 10 continue
626: ! Restore vector
628: call VecRestoreArray(u_local,localptr,idx,ierr)
630: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
631: ! Complete the matrix assembly process and set some options
632: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
634: ! Assemble matrix, using the 2-step process:
635: ! MatAssemblyBegin(), MatAssemblyEnd()
636: ! Computations can be done while messages are in transition,
637: ! by placing code between these two statements.
639: call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
640: call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)
642: ! Set flag to indicate that the Jacobian matrix retains an identical
643: ! nonzero structure throughout all timestepping iterations (although the
644: ! values of the entries change). Thus, we can save some work in setting
645: ! up the preconditioner (e.g., no need to redo symbolic factorization for
646: ! ILU/ICC preconditioners).
647: ! - If the nonzero structure of the matrix is different during
648: ! successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
649: ! must be used instead. If you are unsure whether the matrix
650: ! structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
651: ! - Caution: If you specify SAME_NONZERO_PATTERN, PETSc
652: ! believes your assertion and does not check the structure
653: ! of the matrix. If you erroneously claim that the structure
654: ! is the same when it actually is not, the new preconditioner
655: ! will not function correctly. Thus, use this optimization
656: ! feature with caution!
658: str = SAME_NONZERO_PATTERN
660: ! Set and option to indicate that we will never add a new nonzero location
661: ! to the matrix. If we do, it will generate an error.
663: call MatSetOption(A,MAT_NEW_NONZERO_LOCATION_ERR,ierr)
665: return
666: end