Actual source code: ex10.c
1: /*$Id: ex10.c,v 1.25 2001/04/10 19:37:07 bsmith Exp $*/
3: /*
4: Program usage: mpirun -np <procs> usg [-help] [all PETSc options]
5: */
7: #if !defined(PETSC_USE_COMPLEX)
9: static char help[] = "An Unstructured Grid Example.n
10: This example demonstrates how to solve a nonlinear system in paralleln
11: with SNES for an unstructured mesh. The mesh and partitioning informationn
12: is read in an application defined ordering,which is later transformedn
13: into another convenient ordering (called the local ordering). The localn
14: ordering, apart from being efficient on cpu cycles and memory, allowsn
15: the use of the SPMD model of parallel programming. After partitioningn
16: is done, scatters are created between local (sequential)and globaln
17: (distributed) vectors. Finally, we set up the nonlinear solver contextn
18: in the usual way as a structured grid (seen
19: petsc/src/snes/examples/tutorials/ex5.c).n
20: The command line options include:n
21: -vert <Nv>, where Nv is the global number of nodesn
22: -elem <Ne>, where Ne is the global number of elementsn
23: -nl_par <lambda>, where lambda is the multiplier for the non linear term (u*u) termn
24: -lin_par <alpha>, where alpha is the multiplier for the linear term (u) n";
26: /*T
27: Concepts: SNES^unstructured grid
28: Concepts: AO^application to PETSc ordering or vice versa;
29: Concepts: VecScatter^using vector scatter operations;
30: Processors: n
31: T*/
33: /* ------------------------------------------------------------------------
35: PDE Solved : L(u) + lambda*u*u + alpha*u = 0 where L(u) is the Laplacian.
37: The Laplacian is approximated in the following way: each edge is given a weight
38: of one meaning that the diagonal term will have the weight equal to the degree
39: of a node. The off diagonal terms will get a weight of -1.
41: -----------------------------------------------------------------------*/
43: /*
44: Include petscao.h so that we can use AO (Application Ordering) object's services.
45: Include "petscsnes.h" so that we can use SNES solvers. Note that this
46: file automatically includes:
47: petsc.h - base PETSc routines petscvec.h - vectors
48: petscsys.h - system routines petscmat.h - matrices
49: petscis.h - index sets petscksp.h - Krylov subspace methods
50: petscviewer.h - viewers petscpc.h - preconditioners
51: petscsles.h - linear solvers
52: */
53: #include "petscao.h"
54: #include "petscsnes.h"
57: #define MAX_ELEM 500 /* Maximum number of elements */
58: #define MAX_VERT 100 /* Maximum number of vertices */
59: #define MAX_VERT_ELEM 3 /* Vertices per element */
61: /*
62: Application-defined context for problem specific data
63: */
64: typedef struct {
65: int Nvglobal,Nvlocal; /* global and local number of vertices */
66: int Neglobal,Nelocal; /* global and local number of vertices */
67: int AdjM[MAX_VERT][50]; /* adjacency list of a vertex */
68: int itot[MAX_VERT]; /* total number of neighbors for a vertex */
69: int icv[MAX_ELEM][MAX_VERT_ELEM]; /* vertices belonging to an element */
70: int v2p[MAX_VERT]; /* processor number for a vertex */
71: int *locInd,*gloInd; /* local and global orderings for a node */
72: Vec localX,localF; /* local solution (u) and f(u) vectors */
73: double non_lin_param; /* nonlinear parameter for the PDE */
74: double lin_param; /* linear parameter for the PDE */
75: VecScatter scatter; /* scatter context for the local and
76: distributed vectors */
77: } AppCtx;
79: /*
80: User-defined routines
81: */
82: int FormJacobian(SNES,Vec,Mat*,Mat*,MatStructure*,void*),
83: FormFunction(SNES,Vec,Vec,void*),
84: FormInitialGuess(AppCtx*,Vec);
86: int main(int argc,char **argv)
87: {
88: SNES snes; /* SNES context */
89: SNESType type = SNESEQLS; /* default nonlinear solution method */
90: Vec x,r; /* solution, residual vectors */
91: Mat Jac; /* Jacobian matrix */
92: AppCtx user; /* user-defined application context */
93: AO ao; /* Application Ordering object */
94: IS isglobal,islocal; /* global and local index sets */
95: int rank,size; /* rank of a process, number of processors */
96: int rstart; /* starting index of PETSc ordering for a processor */
97: int nfails; /* number of unsuccessful Newton steps */
98: int bs = 1; /* block size for multicomponent systems */
99: int nvertices; /* number of local plus ghost nodes of a processor */
100: int *pordering; /* PETSc ordering */
101: int *vertices; /* list of all vertices (incl. ghost ones)
102: on a processor */
103: int *verticesmask,*svertices;
104: int *tmp;
105: int i,j,jstart,inode,nb,nbrs,Nvneighborstotal = 0;
106: int ierr,its,N;
107: Scalar *xx;
108: char str[256],form[256],part_name[256];
109: FILE *fptr,*fptr1;
110: ISLocalToGlobalMapping isl2g;
111: #if defined (UNUSED_VARIABLES)
112: PetscDraw draw; /* drawing context */
113: Scalar *ff,*gg;
114: double tiny = 1.0e-10,zero = 0.0,one = 1.0,big = 1.0e+10;
115: int *tmp1,*tmp2;
116: #endif
117: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
118: Initialize program
119: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
121: PetscInitialize(&argc,&argv,"options.inf",help);
122: MPI_Comm_rank(MPI_COMM_WORLD,&rank);
123: MPI_Comm_size(MPI_COMM_WORLD,&size);
125: /* The current input file options.inf is for 2 proc run only */
126: if (size != 2) SETERRQ(1,"This Example currently runs on 2 procs only.");
128: /*
129: Initialize problem parameters
130: */
131: user.Nvglobal = 16; /*Global # of vertices */
132: user.Neglobal = 18; /*Global # of elements */
133: PetscOptionsGetInt(PETSC_NULL,"-vert",&user.Nvglobal,PETSC_NULL);
134: PetscOptionsGetInt(PETSC_NULL,"-elem",&user.Neglobal,PETSC_NULL);
135: user.non_lin_param = 0.06;
136: PetscOptionsGetDouble(PETSC_NULL,"-nl_par",&user.non_lin_param,PETSC_NULL);
137: user.lin_param = -1.0;
138: PetscOptionsGetDouble(PETSC_NULL,"-lin_par",&user.lin_param,PETSC_NULL);
139: user.Nvlocal = 0;
140: user.Nelocal = 0;
142: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
143: Read the mesh and partitioning information
144: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
145:
146: /*
147: Read the mesh and partitioning information from 'adj.in'.
148: The file format is as follows.
149: For each line the first entry is the processor rank where the
150: current node belongs. The second entry is the number of
151: neighbors of a node. The rest of the line is the adjacency
152: list of a node. Currently this file is set up to work on two
153: processors.
155: This is not a very good example of reading input. In the future,
156: we will put an example that shows the style that should be
157: used in a real application, where partitioning will be done
158: dynamically by calling partitioning routines (at present, we have
159: a ready interface to ParMeTiS).
160: */
161: fptr = fopen("adj.in","r");
162: if (!fptr) {
163: SETERRQ(0,"Could not open adj.in")
164: }
165:
166: /*
167: Each processor writes to the file output.<rank> where rank is the
168: processor's rank.
169: */
170: sprintf(part_name,"output.%d",rank);
171: fptr1 = fopen(part_name,"w");
172: if (!fptr1) {
173: SETERRQ(0,"Could no open output file");
174: }
175: PetscMalloc(user.Nvglobal*sizeof(int),&user.gloInd);
176: fprintf(fptr1,"Rank is %dn",rank);
177: for (inode = 0; inode < user.Nvglobal; inode++) {
178: fgets(str,256,fptr);
179: sscanf(str,"%d",&user.v2p[inode]);
180: if (user.v2p[inode] == rank) {
181: fprintf(fptr1,"Node %d belongs to processor %dn",inode,user.v2p[inode]);
182: user.gloInd[user.Nvlocal] = inode;
183: sscanf(str,"%*d %d",&nbrs);
184: fprintf(fptr1,"Number of neighbors for the vertex %d is %dn",inode,nbrs);
185: user.itot[user.Nvlocal] = nbrs;
186: Nvneighborstotal += nbrs;
187: for (i = 0; i < user.itot[user.Nvlocal]; i++){
188: form[0]='0';
189: for (j=0; j < i+2; j++){
190: PetscStrcat(form,"%*d ");
191: }
192: PetscStrcat(form,"%d");
193: sscanf(str,form,&user.AdjM[user.Nvlocal][i]);
194: fprintf(fptr1,"%d ",user.AdjM[user.Nvlocal][i]);
195: }
196: fprintf(fptr1,"n");
197: user.Nvlocal++;
198: }
199: }
200: fprintf(fptr1,"Total # of Local Vertices is %d n",user.Nvlocal);
202: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
203: Create different orderings
204: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
206: /*
207: Create the local ordering list for vertices. First a list using the PETSc global
208: ordering is created. Then we use the AO object to get the PETSc-to-application and
209: application-to-PETSc mappings. Each vertex also gets a local index (stored in the
210: locInd array).
211: */
212: MPI_Scan(&user.Nvlocal,&rstart,1,MPI_INT,MPI_SUM,MPI_COMM_WORLD);
213: rstart -= user.Nvlocal;
214: PetscMalloc(user.Nvlocal*sizeof(int),&pordering);
216: for (i=0; i < user.Nvlocal; i++) {
217: pordering[i] = rstart + i;
218: }
220: /*
221: Create the AO object
222: */
223: AOCreateBasic(MPI_COMM_WORLD,user.Nvlocal,user.gloInd,pordering,&ao);
224: PetscFree(pordering);
225:
226: /*
227: Keep the global indices for later use
228: */
229: PetscMalloc(user.Nvlocal*sizeof(int),&user.locInd);
230: PetscMalloc(Nvneighborstotal*sizeof(int),&tmp);
231:
232: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
233: Demonstrate the use of AO functionality
234: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
236: fprintf(fptr1,"Before AOApplicationToPetsc, local indices are : n");
237: for (i=0; i < user.Nvlocal; i++) {
238: fprintf(fptr1," %d ",user.gloInd[i]);
239: user.locInd[i] = user.gloInd[i];
240: }
241: fprintf(fptr1,"n");
242: jstart = 0;
243: for (i=0; i < user.Nvlocal; i++) {
244: fprintf(fptr1,"Neghbors of local vertex %d are : ",user.gloInd[i]);
245: for (j=0; j < user.itot[i]; j++) {
246: fprintf(fptr1,"%d ",user.AdjM[i][j]);
247: tmp[j + jstart] = user.AdjM[i][j];
248: }
249: jstart += user.itot[i];
250: fprintf(fptr1,"n");
251: }
253: /*
254: Now map the vlocal and neighbor lists to the PETSc ordering
255: */
256: AOApplicationToPetsc(ao,user.Nvlocal,user.locInd);
257: AOApplicationToPetsc(ao,Nvneighborstotal,tmp);
258:
259: fprintf(fptr1,"After AOApplicationToPetsc, local indices are : n");
260: for (i=0; i < user.Nvlocal; i++) {
261: fprintf(fptr1," %d ",user.locInd[i]);
262: }
263: fprintf(fptr1,"n");
265: jstart = 0;
266: for (i=0; i < user.Nvlocal; i++) {
267: fprintf(fptr1,"Neghbors of local vertex %d are : ",user.locInd[i]);
268: for (j=0; j < user.itot[i]; j++) {
269: user.AdjM[i][j] = tmp[j+jstart];
270: fprintf(fptr1,"%d ",user.AdjM[i][j]);
271: }
272: jstart += user.itot[i];
273: fprintf(fptr1,"n");
274: }
276: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
277: Extract the ghost vertex information for each processor
278: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
279: /*
280: Next, we need to generate a list of vertices required for this processor
281: and a local numbering scheme for all vertices required on this processor.
282: vertices - integer array of all vertices needed on this processor in PETSc
283: global numbering; this list consists of first the "locally owned"
284: vertices followed by the ghost vertices.
285: verticesmask - integer array that for each global vertex lists its local
286: vertex number (in vertices) + 1. If the global vertex is not
287: represented on this processor, then the corresponding
288: entry in verticesmask is zero
289:
290: Note: vertices and verticesmask are both Nvglobal in length; this may
291: sound terribly non-scalable, but in fact is not so bad for a reasonable
292: number of processors. Importantly, it allows us to use NO SEARCHING
293: in setting up the data structures.
294: */
295: ierr = PetscMalloc(user.Nvglobal*sizeof(int),&vertices);
296: ierr = PetscMalloc(user.Nvglobal*sizeof(int),&verticesmask);
297: ierr = PetscMemzero(verticesmask,user.Nvglobal*sizeof(int));
298: nvertices = 0;
299:
300: /*
301: First load "owned vertices" into list
302: */
303: for (i=0; i < user.Nvlocal; i++) {
304: vertices[nvertices++] = user.locInd[i];
305: verticesmask[user.locInd[i]] = nvertices;
306: }
307:
308: /*
309: Now load ghost vertices into list
310: */
311: for (i=0; i < user.Nvlocal; i++) {
312: for (j=0; j < user.itot[i]; j++) {
313: nb = user.AdjM[i][j];
314: if (!verticesmask[nb]) {
315: vertices[nvertices++] = nb;
316: verticesmask[nb] = nvertices;
317: }
318: }
319: }
321: fprintf(fptr1,"n");
322: fprintf(fptr1,"The array vertices is :n");
323: for (i=0; i < nvertices; i++) {
324: fprintf(fptr1,"%d ",vertices[i]);
325: }
326: fprintf(fptr1,"n");
327:
328: /*
329: Map the vertices listed in the neighbors to the local numbering from
330: the global ordering that they contained initially.
331: */
332: fprintf(fptr1,"n");
333: fprintf(fptr1,"After mapping neighbors in the local contiguous orderingn");
334: for (i=0; i<user.Nvlocal; i++) {
335: fprintf(fptr1,"Neghbors of local vertex %d are :n",i);
336: for (j = 0; j < user.itot[i]; j++) {
337: nb = user.AdjM[i][j];
338: user.AdjM[i][j] = verticesmask[nb] - 1;
339: fprintf(fptr1,"%d ",user.AdjM[i][j]);
340: }
341: fprintf(fptr1,"n");
342: }
344: N = user.Nvglobal;
345:
346: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
347: Create vector and matrix data structures
348: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
350: /*
351: Create vector data structures
352: */
353: VecCreateMPI(MPI_COMM_WORLD,user.Nvlocal,N,&x);
354: VecDuplicate(x,&r);
355: VecCreateSeq(MPI_COMM_SELF,bs*nvertices,&user.localX);
356: VecDuplicate(user.localX,&user.localF);
358: /*
359: Create the scatter between the global representation and the
360: local representation
361: */
362: ISCreateStride(MPI_COMM_SELF,bs*nvertices,0,1,&islocal);
363: PetscMalloc(nvertices*sizeof(int),&svertices);
364: for (i=0; i<nvertices; i++) svertices[i] = bs*vertices[i];
365: ISCreateBlock(MPI_COMM_SELF,bs,nvertices,svertices,&isglobal);
366: PetscFree(svertices);
367: VecScatterCreate(x,isglobal,user.localX,islocal,&user.scatter);
369: /*
370: Create matrix data structure; Just to keep the example simple, we have not done any
371: preallocation of memory for the matrix. In real application code with big matrices,
372: preallocation should always be done to expedite the matrix creation.
373: */
374: MatCreate(MPI_COMM_WORLD,PETSC_DECIDE,PETSC_DECIDE,N,N,&Jac);
375: MatSetFromOptions(Jac);
377: /*
378: The following routine allows us to set the matrix values in local ordering
379: */
380: ISLocalToGlobalMappingCreate(MPI_COMM_SELF,bs*nvertices,vertices,&isl2g);
381: MatSetLocalToGlobalMapping(Jac,isl2g);
383: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
384: Create nonlinear solver context
385: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
387: SNESCreate(MPI_COMM_WORLD,SNES_NONLINEAR_EQUATIONS,&snes);
388: SNESSetType(snes,type);
390: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
391: Set routines for function and Jacobian evaluation
392: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
394: FormInitialGuess(&user,x);
395: SNESSetFunction(snes,r,FormFunction,(void *)&user);
396: SNESSetJacobian(snes,Jac,Jac,FormJacobian,(void *)&user);
398: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
399: Customize nonlinear solver; set runtime options
400: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
402: SNESSetFromOptions(snes);
404: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
405: Evaluate initial guess; then solve nonlinear system
406: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
408: /*
409: Note: The user should initialize the vector, x, with the initial guess
410: for the nonlinear solver prior to calling SNESSolve(). In particular,
411: to employ an initial guess of zero, the user should explicitly set
412: this vector to zero by calling VecSet().
413: */
414: FormInitialGuess(&user,x);
416: /*
417: Print the initial guess
418: */
419: VecGetArray(x,&xx);
420: for (inode = 0; inode < user.Nvlocal; inode++)
421: fprintf(fptr1,"Initial Solution at node %d is %f n",inode,xx[inode]);
422: VecRestoreArray(x,&xx);
424: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
425: Now solve the nonlinear system
426: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
428: SNESSolve(snes,x,&its);
429: SNESGetNumberUnsuccessfulSteps(snes,&nfails);
430:
431: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
432: Print the output : solution vector and other information
433: Each processor writes to the file output.<rank> where rank is the
434: processor's rank.
435: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
437: VecGetArray(x,&xx);
438: for (inode = 0; inode < user.Nvlocal; inode++)
439: fprintf(fptr1,"Solution at node %d is %f n",inode,xx[inode]);
440: VecRestoreArray(x,&xx);
441: fclose(fptr1);
442: PetscPrintf(MPI_COMM_WORLD,"number of Newton iterations = %d, ",its);
443: PetscPrintf(MPI_COMM_WORLD,"number of unsuccessful steps = %dn",nfails);
445: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
446: Free work space. All PETSc objects should be destroyed when they
447: are no longer needed.
448: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
450: VecDestroy(x);
451: VecDestroy(r);
452: VecDestroy(user.localX);
453: VecDestroy(user.localF);
454: MatDestroy(Jac); SNESDestroy(snes);
455: /*PetscDrawDestroy(draw);*/
456: PetscFinalize();
458: return 0;
459: }
460: /* -------------------- Form initial approximation ----------------- */
462: /*
463: FormInitialGuess - Forms initial approximation.
465: Input Parameters:
466: user - user-defined application context
467: X - vector
469: Output Parameter:
470: X - vector
471: */
472: int FormInitialGuess(AppCtx *user,Vec X)
473: {
474: int i,Nvlocal,ierr;
475: int *gloInd;
476: Scalar *x;
477: #if defined (UNUSED_VARIABLES)
478: double temp1,temp,hx,hy,hxdhy,hydhx,sc;
479: int Neglobal,Nvglobal,j,row;
480: double alpha,lambda;
482: Nvglobal = user->Nvglobal;
483: Neglobal = user->Neglobal;
484: lambda = user->non_lin_param;
485: alpha = user->lin_param;
486: #endif
488: Nvlocal = user->Nvlocal;
489: gloInd = user->gloInd;
491: /*
492: Get a pointer to vector data.
493: - For default PETSc vectors, VecGetArray() returns a pointer to
494: the data array. Otherwise, the routine is implementation dependent.
495: - You MUST call VecRestoreArray() when you no longer need access to
496: the array.
497: */
498: VecGetArray(X,&x);
500: /*
501: Compute initial guess over the locally owned part of the grid
502: */
503: for (i=0; i < Nvlocal; i++) {
504: x[i] = (double)gloInd[i];
505: }
507: /*
508: Restore vector
509: */
510: VecRestoreArray(X,&x);
511: return 0;
512: }
513: /* -------------------- Evaluate Function F(x) --------------------- */
514: /*
515: FormFunction - Evaluates nonlinear function, F(x).
517: Input Parameters:
518: . snes - the SNES context
519: . X - input vector
520: . ptr - optional user-defined context, as set by SNESSetFunction()
522: Output Parameter:
523: . F - function vector
524: */
525: int FormFunction(SNES snes,Vec X,Vec F,void *ptr)
526: {
527: AppCtx *user = (AppCtx*)ptr;
528: int ierr,i,j,Nvlocal;
529: double alpha,lambda;
530: Scalar *x,*f;
531: VecScatter scatter;
532: Vec localX = user->localX;
533: #if defined (UNUSED_VARIABLES)
534: Scalar ut,ub,ul,ur,u,*g,sc,uyy,uxx;
535: double hx,hy,hxdhy,hydhx;
536: double two = 2.0,one = 1.0;
537: int Nvglobal,Neglobal,row;
538: int *gloInd;
540: Nvglobal = user->Nvglobal;
541: Neglobal = user->Neglobal;
542: gloInd = user->gloInd;
543: #endif
545: Nvlocal = user->Nvlocal;
546: lambda = user->non_lin_param;
547: alpha = user->lin_param;
548: scatter = user->scatter;
550: /*
551: PDE : L(u) + lambda*u*u +alpha*u = 0 where L(u) is the approximate Laplacian as
552: described in the beginning of this code
553:
554: First scatter the distributed vector X into local vector localX (that includes
555: values for ghost nodes. If we wish,we can put some other work between
556: VecScatterBegin() and VecScatterEnd() to overlap the communication with
557: computation.
558: */
559: VecScatterBegin(X,localX,INSERT_VALUES,SCATTER_FORWARD,scatter);
560: VecScatterEnd(X,localX,INSERT_VALUES,SCATTER_FORWARD,scatter);
562: /*
563: Get pointers to vector data
564: */
565: VecGetArray(localX,&x);
566: VecGetArray(F,&f);
568: /*
569: Now compute the f(x). As mentioned earlier, the computed Laplacian is just an
570: approximate one chosen for illustrative purpose only. Another point to notice
571: is that this is a local (completly parallel) calculation. In practical application
572: codes, function calculation time is a dominat portion of the overall execution time.
573: */
574: for (i=0; i < Nvlocal; i++) {
575: f[i] = (user->itot[i] - alpha)*x[i] - lambda*x[i]*x[i];
576: for (j = 0; j < user->itot[i]; j++) {
577: f[i] -= x[user->AdjM[i][j]];
578: }
579: }
581: /*
582: Restore vectors
583: */
584: VecRestoreArray(localX,&x);
585: VecRestoreArray(F,&f);
586: /*VecView(F,PETSC_VIEWER_STDOUT_WORLD);*/
588: return 0;
589: }
591: /* -------------------- Evaluate Jacobian F'(x) -------------------- */
592: /*
593: FormJacobian - Evaluates Jacobian matrix.
595: Input Parameters:
596: . snes - the SNES context
597: . X - input vector
598: . ptr - optional user-defined context, as set by SNESSetJacobian()
600: Output Parameters:
601: . A - Jacobian matrix
602: . B - optionally different preconditioning matrix
603: . flag - flag indicating matrix structure
605: */
606: int FormJacobian(SNES snes,Vec X,Mat *J,Mat *B,MatStructure *flag,void *ptr)
607: {
608: AppCtx *user = (AppCtx*)ptr;
609: Mat jac = *B;
610: int i,j,Nvlocal,col[50],ierr;
611: Scalar alpha,lambda,value[50];
612: Vec localX = user->localX;
613: VecScatter scatter;
614: Scalar *x;
615: #if defined (UNUSED_VARIABLES)
616: Scalar two = 2.0,one = 1.0;
617: int row,Nvglobal,Neglobal;
618: int *gloInd;
620: Nvglobal = user->Nvglobal;
621: Neglobal = user->Neglobal;
622: gloInd = user->gloInd;
623: #endif
624:
625: /*printf("Entering into FormJacobian n");*/
626: Nvlocal = user->Nvlocal;
627: lambda = user->non_lin_param;
628: alpha = user->lin_param;
629: scatter = user->scatter;
631: /*
632: PDE : L(u) + lambda*u*u +alpha*u = 0 where L(u) is the approximate Laplacian as
633: described in the beginning of this code
634:
635: First scatter the distributed vector X into local vector localX (that includes
636: values for ghost nodes. If we wish, we can put some other work between
637: VecScatterBegin() and VecScatterEnd() to overlap the communication with
638: computation.
639: */
640: VecScatterBegin(X,localX,INSERT_VALUES,SCATTER_FORWARD,scatter);
641: VecScatterEnd(X,localX,INSERT_VALUES,SCATTER_FORWARD,scatter);
642:
643: /*
644: Get pointer to vector data
645: */
646: VecGetArray(localX,&x);
648: for (i=0; i < Nvlocal; i++) {
649: col[0] = i;
650: value[0] = user->itot[i] - 2.0*lambda*x[i] - alpha;
651: for (j = 0; j < user->itot[i]; j++) {
652: col[j+1] = user->AdjM[i][j];
653: value[j+1] = -1.0;
654: }
656: /*
657: Set the matrix values in the local ordering. Note that in order to use this
658: feature we must call the routine MatSetLocalToGlobalMapping() after the
659: matrix has been created.
660: */
661: MatSetValuesLocal(jac,1,&i,1+user->itot[i],col,value,INSERT_VALUES);
662: }
664: /*
665: Assemble matrix, using the 2-step process:
666: MatAssemblyBegin(), MatAssemblyEnd().
667: Between these two calls, the pointer to vector data has been restored to
668: demonstrate the use of overlapping communicationn with computation.
669: */
670: MatAssemblyBegin(jac,MAT_FINAL_ASSEMBLY);
671: VecRestoreArray(localX,&x);
672: MatAssemblyEnd(jac,MAT_FINAL_ASSEMBLY);
674: /*
675: Set flag to indicate that the Jacobian matrix retains an identical
676: nonzero structure throughout all nonlinear iterations (although the
677: values of the entries change). Thus, we can save some work in setting
678: up the preconditioner (e.g., no need to redo symbolic factorization for
679: ILU/ICC preconditioners).
680: - If the nonzero structure of the matrix is different during
681: successive linear solves, then the flag DIFFERENT_NONZERO_PATTERN
682: must be used instead. If you are unsure whether the matrix
683: structure has changed or not, use the flag DIFFERENT_NONZERO_PATTERN.
684: - Caution: If you specify SAME_NONZERO_PATTERN, PETSc
685: believes your assertion and does not check the structure
686: of the matrix. If you erroneously claim that the structure
687: is the same when it actually is not, the new preconditioner
688: will not function correctly. Thus, use this optimization
689: feature with caution!
690: */
691: *flag = SAME_NONZERO_PATTERN;
693: /*
694: Tell the matrix we will never add a new nonzero location to the
695: matrix. If we do, it will generate an error.
696: */
697: MatSetOption(jac,MAT_NEW_NONZERO_LOCATION_ERR);
698: /* MatView(jac,PETSC_VIEWER_STDOUT_SELF); */
699: return 0;
700: }
701: #else
702: #include <stdio.h>
703: int main(int argc,char **args)
704: {
705: fprintf(stdout,"This example does not work for complex numbers.n");
706: return 0;
707: }
708: #endif