download the original source code.
1 /*
2 Example 11
3
4 Interface: Linear-Algebraic (IJ)
5
6 Compile with: make ex11
7
8 Sample run: mpirun -np 4 ex11
9
10 Description: This example solves the 2-D Laplacian eigenvalue
11 problem with zero boundary conditions on an nxn grid.
12 The number of unknowns is N=n^2. The standard 5-point
13 stencil is used, and we solve for the interior nodes
14 only.
15
16 We use the same matrix as in Examples 3 and 5.
17 The eigensolver is LOBPCG with AMG preconditioner.
18 */
19
20 #include <math.h>
21 #include "_hypre_utilities.h"
22 #include "krylov.h"
23 #include "HYPRE.h"
24 #include "HYPRE_parcsr_ls.h"
25
26 /* lobpcg stuff */
27 #include "HYPRE_lobpcg.h"
28 #include "interpreter.h"
29 #include "HYPRE_MatvecFunctions.h"
30 #include "temp_multivector.h"
31
32 int main (int argc, char *argv[])
33 {
34 int i;
35 int myid, num_procs;
36 int N, n;
37 int blockSize;
38
39 int ilower, iupper;
40 int local_size, extra;
41
42 int print_solution;
43
44 double h, h2;
45
46 HYPRE_IJMatrix A;
47 HYPRE_ParCSRMatrix parcsr_A;
48 HYPRE_IJVector b;
49 HYPRE_ParVector par_b;
50 HYPRE_IJVector x;
51 HYPRE_ParVector par_x;
52 HYPRE_ParVector* pvx;
53
54 HYPRE_Solver precond, lobpcg_solver;
55 mv_InterfaceInterpreter* interpreter;
56 HYPRE_MatvecFunctions matvec_fn;
57
58 /* Initialize MPI */
59 MPI_Init(&argc, &argv);
60 MPI_Comm_rank(MPI_COMM_WORLD, &myid);
61 MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
62
63 /* Default problem parameters */
64 n = 33;
65 blockSize = 10;
66 print_solution = 0;
67
68 /* Parse command line */
69 {
70 int arg_index = 0;
71 int print_usage = 0;
72
73 while (arg_index < argc)
74 {
75 if ( strcmp(argv[arg_index], "-n") == 0 )
76 {
77 arg_index++;
78 n = atoi(argv[arg_index++]);
79 }
80 else if ( strcmp(argv[arg_index], "-blockSize") == 0 )
81 {
82 arg_index++;
83 blockSize = atoi(argv[arg_index++]);
84 }
85 else if ( strcmp(argv[arg_index], "-print_solution") == 0 )
86 {
87 arg_index++;
88 print_solution = 1;
89 }
90 else if ( strcmp(argv[arg_index], "-help") == 0 )
91 {
92 print_usage = 1;
93 break;
94 }
95 else
96 {
97 arg_index++;
98 }
99 }
100
101 if ((print_usage) && (myid == 0))
102 {
103 printf("\n");
104 printf("Usage: %s [<options>]\n", argv[0]);
105 printf("\n");
106 printf(" -n <n> : problem size in each direction (default: 33)\n");
107 printf(" -blockSize <n> : eigenproblem block size (default: 10)\n");
108 printf(" -print_solution : print the solution vector\n");
109 printf("\n");
110 }
111
112 if (print_usage)
113 {
114 MPI_Finalize();
115 return (0);
116 }
117 }
118
119 /* Preliminaries: want at least one processor per row */
120 if (n*n < num_procs) n = sqrt(num_procs) + 1;
121 N = n*n; /* global number of rows */
122 h = 1.0/(n+1); /* mesh size*/
123 h2 = h*h;
124
125 /* Each processor knows only of its own rows - the range is denoted by ilower
126 and iupper. Here we partition the rows. We account for the fact that
127 N may not divide evenly by the number of processors. */
128 local_size = N/num_procs;
129 extra = N - local_size*num_procs;
130
131 ilower = local_size*myid;
132 ilower += hypre_min(myid, extra);
133
134 iupper = local_size*(myid+1);
135 iupper += hypre_min(myid+1, extra);
136 iupper = iupper - 1;
137
138 /* How many rows do I have? */
139 local_size = iupper - ilower + 1;
140
141 /* Create the matrix.
142 Note that this is a square matrix, so we indicate the row partition
143 size twice (since number of rows = number of cols) */
144 HYPRE_IJMatrixCreate(MPI_COMM_WORLD, ilower, iupper, ilower, iupper, &A);
145
146 /* Choose a parallel csr format storage (see the User's Manual) */
147 HYPRE_IJMatrixSetObjectType(A, HYPRE_PARCSR);
148
149 /* Initialize before setting coefficients */
150 HYPRE_IJMatrixInitialize(A);
151
152 /* Now go through my local rows and set the matrix entries.
153 Each row has at most 5 entries. For example, if n=3:
154
155 A = [M -I 0; -I M -I; 0 -I M]
156 M = [4 -1 0; -1 4 -1; 0 -1 4]
157
158 Note that here we are setting one row at a time, though
159 one could set all the rows together (see the User's Manual).
160 */
161 {
162 int nnz;
163 double values[5];
164 int cols[5];
165
166 for (i = ilower; i <= iupper; i++)
167 {
168 nnz = 0;
169
170 /* The left identity block:position i-n */
171 if ((i-n)>=0)
172 {
173 cols[nnz] = i-n;
174 values[nnz] = -1.0;
175 nnz++;
176 }
177
178 /* The left -1: position i-1 */
179 if (i%n)
180 {
181 cols[nnz] = i-1;
182 values[nnz] = -1.0;
183 nnz++;
184 }
185
186 /* Set the diagonal: position i */
187 cols[nnz] = i;
188 values[nnz] = 4.0;
189 nnz++;
190
191 /* The right -1: position i+1 */
192 if ((i+1)%n)
193 {
194 cols[nnz] = i+1;
195 values[nnz] = -1.0;
196 nnz++;
197 }
198
199 /* The right identity block:position i+n */
200 if ((i+n)< N)
201 {
202 cols[nnz] = i+n;
203 values[nnz] = -1.0;
204 nnz++;
205 }
206
207 /* Set the values for row i */
208 HYPRE_IJMatrixSetValues(A, 1, &nnz, &i, cols, values);
209 }
210 }
211
212 /* Assemble after setting the coefficients */
213 HYPRE_IJMatrixAssemble(A);
214 /* Get the parcsr matrix object to use */
215 HYPRE_IJMatrixGetObject(A, (void**) &parcsr_A);
216
217 /* Create sample rhs and solution vectors */
218 HYPRE_IJVectorCreate(MPI_COMM_WORLD, ilower, iupper,&b);
219 HYPRE_IJVectorSetObjectType(b, HYPRE_PARCSR);
220 HYPRE_IJVectorInitialize(b);
221 HYPRE_IJVectorAssemble(b);
222 HYPRE_IJVectorGetObject(b, (void **) &par_b);
223
224 HYPRE_IJVectorCreate(MPI_COMM_WORLD, ilower, iupper,&x);
225 HYPRE_IJVectorSetObjectType(x, HYPRE_PARCSR);
226 HYPRE_IJVectorInitialize(x);
227 HYPRE_IJVectorAssemble(x);
228 HYPRE_IJVectorGetObject(x, (void **) &par_x);
229
230 /* Create a preconditioner and solve the eigenproblem */
231
232 /* AMG preconditioner */
233 {
234 HYPRE_BoomerAMGCreate(&precond);
235 HYPRE_BoomerAMGSetPrintLevel(precond, 1); /* print amg solution info */
236 HYPRE_BoomerAMGSetCoarsenType(precond, 6);
237 HYPRE_BoomerAMGSetRelaxType(precond, 6); /* Sym G.S./Jacobi hybrid */
238 HYPRE_BoomerAMGSetNumSweeps(precond, 1);
239 HYPRE_BoomerAMGSetTol(precond, 0.0); /* conv. tolerance zero */
240 HYPRE_BoomerAMGSetMaxIter(precond, 1); /* do only one iteration! */
241 }
242
243 /* LOBPCG eigensolver */
244 {
245 int time_index;
246
247 int maxIterations = 100; /* maximum number of iterations */
248 int pcgMode = 1; /* use rhs as initial guess for inner pcg iterations */
249 int verbosity = 1; /* print iterations info */
250 double tol = 1.e-8; /* absolute tolerance (all eigenvalues) */
251 int lobpcgSeed = 775; /* random seed */
252
253 mv_MultiVectorPtr eigenvectors = NULL;
254 mv_MultiVectorPtr constraints = NULL;
255 double *eigenvalues = NULL;
256
257 if (myid != 0)
258 verbosity = 0;
259
260 /* define an interpreter for the ParCSR interface */
261 interpreter = hypre_CTAlloc(mv_InterfaceInterpreter,1);
262 HYPRE_ParCSRSetupInterpreter(interpreter);
263 HYPRE_ParCSRSetupMatvec(&matvec_fn);
264
265 /* eigenvectors - create a multivector */
266 eigenvectors =
267 mv_MultiVectorCreateFromSampleVector(interpreter, blockSize, par_x);
268 mv_MultiVectorSetRandom (eigenvectors, lobpcgSeed);
269
270 /* eigenvectors - get a pointer */
271 {
272 mv_TempMultiVector* tmp = mv_MultiVectorGetData(eigenvectors);
273 pvx = (HYPRE_ParVector*)(tmp -> vector);
274 }
275
276 /* eigenvalues - allocate space */
277 eigenvalues = (double*) calloc( blockSize, sizeof(double) );
278
279 HYPRE_LOBPCGCreate(interpreter, &matvec_fn, &lobpcg_solver);
280 HYPRE_LOBPCGSetMaxIter(lobpcg_solver, maxIterations);
281 HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcgMode);
282 HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
283 HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, verbosity);
284
285 /* use a preconditioner */
286 HYPRE_LOBPCGSetPrecond(lobpcg_solver,
287 (HYPRE_PtrToSolverFcn) HYPRE_BoomerAMGSolve,
288 (HYPRE_PtrToSolverFcn) HYPRE_BoomerAMGSetup,
289 precond);
290
291 HYPRE_LOBPCGSetup(lobpcg_solver, (HYPRE_Matrix)parcsr_A,
292 (HYPRE_Vector)par_b, (HYPRE_Vector)par_x);
293
294 time_index = hypre_InitializeTiming("LOBPCG Solve");
295 hypre_BeginTiming(time_index);
296
297 HYPRE_LOBPCGSolve(lobpcg_solver, constraints, eigenvectors, eigenvalues );
298
299 hypre_EndTiming(time_index);
300 hypre_PrintTiming("Solve phase times", MPI_COMM_WORLD);
301 hypre_FinalizeTiming(time_index);
302 hypre_ClearTiming();
303
304 /* clean-up */
305 HYPRE_BoomerAMGDestroy(precond);
306 HYPRE_LOBPCGDestroy(lobpcg_solver);
307 hypre_TFree(eigenvalues);
308 hypre_TFree(interpreter);
309 }
310
311 /* Print the solution */
312 if (print_solution)
313 HYPRE_ParVectorPrint(pvx[blockSize-1], "ij.out.x");
314
315 /* Clean up */
316 HYPRE_IJMatrixDestroy(A);
317 HYPRE_IJVectorDestroy(b);
318 HYPRE_IJVectorDestroy(x);
319
320 /* Finalize MPI*/
321 MPI_Finalize();
322
323 return(0);
324 }
syntax highlighted by Code2HTML, v. 0.9.1