Actual source code: ex2.c
2: /* Program usage: mpiexec -np <procs> ex2 [-help] [all PETSc options] */
4: static char help[] = "Solves a linear system in parallel with KSP.\n\
5: Input parameters include:\n\
6: -random_exact_sol : use a random exact solution vector\n\
7: -view_exact_sol : write exact solution vector to stdout\n\
8: -m <mesh_x> : number of mesh points in x-direction\n\
9: -n <mesh_n> : number of mesh points in y-direction\n\n";
11: /*T
12: Concepts: KSP^basic parallel example;
13: Concepts: KSP^Laplacian, 2d
14: Concepts: Laplacian, 2d
15: Processors: n
16: T*/
18: /*
19: Include "petscksp.h" so that we can use KSP solvers. Note that this file
20: automatically includes:
21: petsc.h - base PETSc routines petscvec.h - vectors
22: petscsys.h - system routines petscmat.h - matrices
23: petscis.h - index sets petscksp.h - Krylov subspace methods
24: petscviewer.h - viewers petscpc.h - preconditioners
25: */
26: #include petscksp.h
30: int main(int argc,char **args)
31: {
32: Vec x,b,u; /* approx solution, RHS, exact solution */
33: Mat A; /* linear system matrix */
34: KSP ksp; /* linear solver context */
35: PetscRandom rctx; /* random number generator context */
36: PetscReal norm; /* norm of solution error */
37: PetscInt i,j,Ii,J,Istart,Iend,m = 8,n = 7,its;
39: PetscTruth flg;
40: PetscScalar v,one = 1.0,neg_one = -1.0;
42: PetscInitialize(&argc,&args,(char *)0,help);
43: PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
44: PetscOptionsGetInt(PETSC_NULL,"-n",&n,PETSC_NULL);
45: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
46: Compute the matrix and right-hand-side vector that define
47: the linear system, Ax = b.
48: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
49: /*
50: Create parallel matrix, specifying only its global dimensions.
51: When using MatCreate(), the matrix format can be specified at
52: runtime. Also, the parallel partitioning of the matrix is
53: determined by PETSc at runtime.
55: Performance tuning note: For problems of substantial size,
56: preallocation of matrix memory is crucial for attaining good
57: performance. See the matrix chapter of the users manual for details.
58: */
59: MatCreate(PETSC_COMM_WORLD,&A);
60: MatSetSizes(A,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
61: MatSetType(A, MATAIJ);
62: MatSetFromOptions(A);
63: MatMPIAIJSetPreallocation(A,5,PETSC_NULL,5,PETSC_NULL);
64: MatSeqAIJSetPreallocation(A,5,PETSC_NULL);
66: /*
67: Currently, all PETSc parallel matrix formats are partitioned by
68: contiguous chunks of rows across the processors. Determine which
69: rows of the matrix are locally owned.
70: */
71: MatGetOwnershipRange(A,&Istart,&Iend);
73: /*
74: Set matrix elements for the 2-D, five-point stencil in parallel.
75: - Each processor needs to insert only elements that it owns
76: locally (but any non-local elements will be sent to the
77: appropriate processor during matrix assembly).
78: - Always specify global rows and columns of matrix entries.
80: Note: this uses the less common natural ordering that orders first
81: all the unknowns for x = h then for x = 2h etc; Hence you see J = Ii +- n
82: instead of J = I +- m as you might expect. The more standard ordering
83: would first do all variables for y = h, then y = 2h etc.
85: */
86: for (Ii=Istart; Ii<Iend; Ii++) {
87: v = -1.0; i = Ii/n; j = Ii - i*n;
88: if (i>0) {J = Ii - n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
89: if (i<m-1) {J = Ii + n; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
90: if (j>0) {J = Ii - 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
91: if (j<n-1) {J = Ii + 1; MatSetValues(A,1,&Ii,1,&J,&v,INSERT_VALUES);}
92: v = 4.0; MatSetValues(A,1,&Ii,1,&Ii,&v,INSERT_VALUES);
93: }
95: /*
96: Assemble matrix, using the 2-step process:
97: MatAssemblyBegin(), MatAssemblyEnd()
98: Computations can be done while messages are in transition
99: by placing code between these two statements.
100: */
101: MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY);
102: MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY);
104: /*
105: Create parallel vectors.
106: - We form 1 vector from scratch and then duplicate as needed.
107: - When using VecCreate(), VecSetSizes and VecSetFromOptions()
108: in this example, we specify only the
109: vector's global dimension; the parallel partitioning is determined
110: at runtime.
111: - When solving a linear system, the vectors and matrices MUST
112: be partitioned accordingly. PETSc automatically generates
113: appropriately partitioned matrices and vectors when MatCreate()
114: and VecCreate() are used with the same communicator.
115: - The user can alternatively specify the local vector and matrix
116: dimensions when more sophisticated partitioning is needed
117: (replacing the PETSC_DECIDE argument in the VecSetSizes() statement
118: below).
119: */
120: VecCreate(PETSC_COMM_WORLD,&u);
121: VecSetSizes(u,PETSC_DECIDE,m*n);
122: VecSetFromOptions(u);
123: VecDuplicate(u,&b);
124: VecDuplicate(b,&x);
126: /*
127: Set exact solution; then compute right-hand-side vector.
128: By default we use an exact solution of a vector with all
129: elements of 1.0; Alternatively, using the runtime option
130: -random_sol forms a solution vector with random components.
131: */
132: PetscOptionsHasName(PETSC_NULL,"-random_exact_sol",&flg);
133: if (flg) {
134: PetscRandomCreate(PETSC_COMM_WORLD,&rctx);
135: PetscRandomSetFromOptions(rctx);
136: VecSetRandom(u,rctx);
137: PetscRandomDestroy(rctx);
138: } else {
139: VecSet(u,one);
140: }
141: MatMult(A,u,b);
143: /*
144: View the exact solution vector if desired
145: */
146: PetscOptionsHasName(PETSC_NULL,"-view_exact_sol",&flg);
147: if (flg) {VecView(u,PETSC_VIEWER_STDOUT_WORLD);}
149: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
150: Create the linear solver and set various options
151: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
153: /*
154: Create linear solver context
155: */
156: KSPCreate(PETSC_COMM_WORLD,&ksp);
158: /*
159: Set operators. Here the matrix that defines the linear system
160: also serves as the preconditioning matrix.
161: */
162: KSPSetOperators(ksp,A,A,DIFFERENT_NONZERO_PATTERN);
164: /*
165: Set linear solver defaults for this problem (optional).
166: - By extracting the KSP and PC contexts from the KSP context,
167: we can then directly call any KSP and PC routines to set
168: various options.
169: - The following two statements are optional; all of these
170: parameters could alternatively be specified at runtime via
171: KSPSetFromOptions(). All of these defaults can be
172: overridden at runtime, as indicated below.
173: */
174: KSPSetTolerances(ksp,1.e-2/((m+1)*(n+1)),1.e-50,PETSC_DEFAULT,
175: PETSC_DEFAULT);
177: /*
178: Set runtime options, e.g.,
179: -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
180: These options will override those specified above as long as
181: KSPSetFromOptions() is called _after_ any other customization
182: routines.
183: */
184: KSPSetFromOptions(ksp);
186: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
187: Solve the linear system
188: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
190: KSPSolve(ksp,b,x);
192: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
193: Check solution and clean up
194: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
196: /*
197: Check the error
198: */
199: VecAXPY(x,neg_one,u);
200: VecNorm(x,NORM_2,&norm);
201: KSPGetIterationNumber(ksp,&its);
202: /* Scale the norm */
203: /* norm *= sqrt(1.0/((m+1)*(n+1))); */
205: /*
206: Print convergence information. PetscPrintf() produces a single
207: print statement from all processes that share a communicator.
208: An alternative is PetscFPrintf(), which prints to a file.
209: */
210: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %A iterations %D\n",
211: norm,its);
213: /*
214: Free work space. All PETSc objects should be destroyed when they
215: are no longer needed.
216: */
217: KSPDestroy(ksp);
218: VecDestroy(u); VecDestroy(x);
219: VecDestroy(b); MatDestroy(A);
221: /*
222: Always call PetscFinalize() before exiting a program. This routine
223: - finalizes the PETSc libraries as well as MPI
224: - provides summary and diagnostic information if certain runtime
225: options are chosen (e.g., -log_summary).
226: */
227: PetscFinalize();
228: return 0;
229: }