Actual source code: pinit.c
1: /*$Id: pinit.c,v 1.58 2001/08/10 03:28:54 bsmith Exp $*/
2: /*
3: This file defines the initialization of PETSc, including PetscInitialize()
4: */
6: #include petsc.h
7: #include petscsys.h
9: EXTERN int PetscLogBegin_Private(void);
11: /* -----------------------------------------------------------------------------------------*/
13: extern FILE *petsc_history;
15: EXTERN int PetscInitialize_DynamicLibraries(void);
16: EXTERN int PetscFinalize_DynamicLibraries(void);
17: EXTERN int PetscFListDestroyAll(void);
18: EXTERN int PetscSequentialPhaseBegin_Private(MPI_Comm,int);
19: EXTERN int PetscSequentialPhaseEnd_Private(MPI_Comm,int);
20: EXTERN int PetscLogCloseHistoryFile(FILE **);
22: /* this is used by the _, __, and ___ macros (see include/petscerror.h) */
23: int __g0;
25: /*
26: Checks the options database for initializations related to the
27: PETSc components
28: */
31: int PetscOptionsCheckInitial_Components(void)
32: {
33: MPI_Comm comm = PETSC_COMM_WORLD;
34: PetscTruth flg1;
35: int ierr;
38: /*
39: Publishing to the AMS
40: */
41: #if defined(PETSC_HAVE_AMS)
42: PetscOptionsHasName(PETSC_NULL,"-ams_publish_objects",&flg1);
43: if (flg1) {
44: PetscAMSPublishAll = PETSC_TRUE;
45: }
46: PetscOptionsHasName(PETSC_NULL,"-ams_publish_stack",&flg1);
47: if (flg1) {
48: PetscStackPublish();
49: }
50: #endif
52: PetscOptionsHasName(PETSC_NULL,"-help",&flg1);
53: if (flg1) {
54: #if defined (PETSC_USE_LOG)
55: (*PetscHelpPrintf)(comm,"------Additional PETSc component options--------\n");
56: (*PetscHelpPrintf)(comm," -log_summary_exclude: <vec,mat,sles,snes>\n");
57: (*PetscHelpPrintf)(comm," -log_info_exclude: <null,vec,mat,sles,snes,ts>\n");
58: (*PetscHelpPrintf)(comm,"-----------------------------------------------\n");
59: #endif
60: }
61: return(0);
62: }
66: /*@C
67: PetscInitializeNoArguments - Calls PetscInitialize() from C/C++ without
68: the command line arguments.
70: Collective
71:
72: Level: advanced
74: .seealso: PetscInitialize(), PetscInitializeFortran()
75: @*/
76: int PetscInitializeNoArguments(void)
77: {
78: int ierr,argc = 0;
79: char **args = 0;
82: PetscInitialize(&argc,&args,PETSC_NULL,PETSC_NULL);
83: PetscFunctionReturn(ierr);
84: }
86: EXTERN int PetscOptionsCheckInitial_Private(void);
87: extern PetscTruth PetscBeganMPI;
89: /*
90: This function is the MPI reduction operation used to compute the sum of the
91: first half of the datatype and the max of the second half.
92: */
93: MPI_Op PetscMaxSum_Op = 0;
95: EXTERN_C_BEGIN
98: void PetscMaxSum_Local(void *in,void *out,int *cnt,MPI_Datatype *datatype)
99: {
100: int *xin = (int *)in,*xout = (int*)out,i,count = *cnt;
103: if (*datatype != MPI_2INT) {
104: (*PetscErrorPrintf)("Can only handle MPI_2INT data types");
105: MPI_Abort(MPI_COMM_WORLD,1);
106: }
108: for (i=0; i<count; i++) {
109: xout[2*i] = PetscMax(xout[2*i],xin[2*i]);
110: xout[2*i+1] += xin[2*i+1];
111: }
112: PetscStackPop;
113: return;
114: }
115: EXTERN_C_END
117: /*
118: Returns the max of the first entry owned by this processor and the
119: sum of the second entry.
120: */
123: int PetscMaxSum(MPI_Comm comm,const int nprocs[],int *max,int *sum)
124: {
125: int size,rank,ierr,*work;
126:
128: MPI_Comm_size(comm,&size);
129: MPI_Comm_rank(comm,&rank);
130: PetscMalloc(2*size*sizeof(int),&work);
131: MPI_Allreduce((void*)nprocs,work,size,MPI_2INT,PetscMaxSum_Op,comm);
132: *max = work[2*rank];
133: *sum = work[2*rank+1];
134: PetscFree(work);
135: return(0);
136: }
138: /* ----------------------------------------------------------------------------*/
139: MPI_Op PetscADMax_Op = 0;
141: EXTERN_C_BEGIN
144: void PetscADMax_Local(void *in,void *out,int *cnt,MPI_Datatype *datatype)
145: {
146: PetscScalar *xin = (PetscScalar *)in,*xout = (PetscScalar*)out;
147: int i,count = *cnt;
150: if (*datatype != MPIU_2SCALAR) {
151: (*PetscErrorPrintf)("Can only handle MPIU_2SCALAR data (i.e. double or complex) types");
152: MPI_Abort(MPI_COMM_WORLD,1);
153: }
155: for (i=0; i<count; i++) {
156: if (PetscRealPart(xout[2*i]) < PetscRealPart(xin[2*i])) {
157: xout[2*i] = xin[2*i];
158: xout[2*i+1] = xin[2*i+1];
159: }
160: }
162: PetscStackPop;
163: return;
164: }
165: EXTERN_C_END
167: MPI_Op PetscADMin_Op = 0;
169: EXTERN_C_BEGIN
172: void PetscADMin_Local(void *in,void *out,int *cnt,MPI_Datatype *datatype)
173: {
174: PetscScalar *xin = (PetscScalar *)in,*xout = (PetscScalar*)out;
175: int i,count = *cnt;
178: if (*datatype != MPIU_2SCALAR) {
179: (*PetscErrorPrintf)("Can only handle MPIU_2SCALAR data (i.e. double or complex) types");
180: MPI_Abort(MPI_COMM_WORLD,1);
181: }
183: for (i=0; i<count; i++) {
184: if (PetscRealPart(xout[2*i]) > PetscRealPart(xin[2*i])) {
185: xout[2*i] = xin[2*i];
186: xout[2*i+1] = xin[2*i+1];
187: }
188: }
190: PetscStackPop;
191: return;
192: }
193: EXTERN_C_END
194: /* ---------------------------------------------------------------------------------------*/
196: #if defined(PETSC_USE_COMPLEX)
197: MPI_Op PetscSum_Op = 0;
199: EXTERN_C_BEGIN
202: void PetscSum_Local(void *in,void *out,int *cnt,MPI_Datatype *datatype)
203: {
204: PetscScalar *xin = (PetscScalar *)in,*xout = (PetscScalar*)out;
205: int i,count = *cnt;
208: if (*datatype != MPIU_SCALAR) {
209: (*PetscErrorPrintf)("Can only handle MPIU_SCALAR data (i.e. double or complex) types");
210: MPI_Abort(MPI_COMM_WORLD,1);
211: }
213: for (i=0; i<count; i++) {
214: xout[i] += xin[i];
215: }
217: PetscStackPop;
218: return;
219: }
220: EXTERN_C_END
221: #endif
223: static int PetscGlobalArgc = 0;
224: static char **PetscGlobalArgs = 0;
228: /*@C
229: PetscGetArgs - Allows you to access the raw command line arguments anywhere
230: after PetscInitialize() is called but before PetscFinalize().
232: Not Collective
234: Output Parameters:
235: + argc - count of number of command line arguments
236: - args - the command line arguments
238: Level: intermediate
240: Notes:
241: This is usually used to pass the command line arguments into other libraries
242: that are called internally deep in PETSc or the application.
244: Concepts: command line arguments
245:
246: .seealso: PetscFinalize(), PetscInitializeFortran()
248: @*/
249: int PetscGetArgs(int *argc,char ***args)
250: {
252: if (!PetscGlobalArgs) {
253: SETERRQ(1,"You must call after PetscInitialize() but before PetscFinalize()");
254: }
255: *argc = PetscGlobalArgc;
256: *args = PetscGlobalArgs;
257: return(0);
258: }
262: /*@C
263: PetscInitialize - Initializes the PETSc database and MPI.
264: PetscInitialize() calls MPI_Init() if that has yet to be called,
265: so this routine should always be called near the beginning of
266: your program -- usually the very first line!
268: Collective on MPI_COMM_WORLD or PETSC_COMM_WORLD if it has been set
270: Input Parameters:
271: + argc - count of number of command line arguments
272: . args - the command line arguments
273: . file - [optional] PETSc database file, defaults to ~username/.petscrc
274: (use PETSC_NULL for default)
275: - help - [optional] Help message to print, use PETSC_NULL for no message
277: Options Database Keys:
278: + -start_in_debugger [noxterm,dbx,xdb,gdb,...] - Starts program in debugger
279: . -on_error_attach_debugger [noxterm,dbx,xdb,gdb,...] - Starts debugger when error detected
280: . -on_error_emacs <machinename> causes emacsclient to jump to error file
281: . -debugger_nodes [node1,node2,...] - Indicates nodes to start in debugger
282: . -debugger_pause [sleeptime] (in seconds) - Pauses debugger
283: . -stop_for_debugger - Print message on how to attach debugger manually to
284: process and wait (-debugger_pause) seconds for attachment
285: . -trmalloc - Indicates use of PETSc error-checking malloc
286: . -trmalloc no - Indicates not to use error-checking malloc
287: . -fp_trap - Stops on floating point exceptions (Note that on the
288: IBM RS6000 this slows code by at least a factor of 10.)
289: . -no_signal_handler - Indicates not to trap error signals
290: . -shared_tmp - indicates /tmp directory is shared by all processors
291: . -not_shared_tmp - each processor has own /tmp
292: . -tmp - alternative name of /tmp directory
293: . -get_total_flops - returns total flops done by all processors
294: - -get_resident_set_size - Print memory usage at end of run
296: Options Database Keys for Profiling:
297: See the Profiling chapter of the users manual for details.
298: + -log_trace [filename] - Print traces of all PETSc calls
299: to the screen (useful to determine where a program
300: hangs without running in the debugger). See PetscLogTraceBegin().
301: . -log_info <optional filename> - Prints verbose information to the screen
302: - -log_info_exclude <null,vec,mat,sles,snes,ts> - Excludes some of the verbose messages
304: Environmental Variables:
305: + PETSC_TMP - alternative tmp directory
306: . PETSC_SHARED_TMP - tmp is shared by all processes
307: . PETSC_NOT_SHARED_TMP - each process has its own private tmp
308: . PETSC_VIEWER_SOCKET_PORT - socket number to use for socket viewer
309: - PETSC_VIEWER_SOCKET_MACHINE - machine to use for socket viewer to connect to
312: Level: beginner
314: Notes:
315: If for some reason you must call MPI_Init() separately, call
316: it before PetscInitialize().
318: Fortran Version:
319: In Fortran this routine has the format
320: $ call PetscInitialize(file,ierr)
322: + ierr - error return code
323: - file - [optional] PETSc database file name, defaults to
324: ~username/.petscrc (use PETSC_NULL_CHARACTER for default)
325:
326: Important Fortran Note:
327: In Fortran, you MUST use PETSC_NULL_CHARACTER to indicate a
328: null character string; you CANNOT just use PETSC_NULL as
329: in the C version. See the users manual for details.
332: Concepts: initializing PETSc
333:
334: .seealso: PetscFinalize(), PetscInitializeFortran(), PetescGetArgs()
336: @*/
337: int PetscInitialize(int *argc,char ***args,char file[],const char help[])
338: {
339: int ierr,flag,dummy_tag,size;
340: PetscTruth flg;
341: char hostname[256];
344: if (PetscInitializeCalled) return(0);
346: PetscOptionsCreate();
348: /*
349: We initialize the program name here (before MPI_Init()) because MPICH has a bug in
350: it that it sets args[0] on all processors to be args[0] on the first processor.
351: */
352: if (argc && *argc) {
353: PetscSetProgramName(**args);
354: } else {
355: PetscSetProgramName("Unknown Name");
356: }
359: MPI_Initialized(&flag);
360: if (!flag) {
361: MPI_Init(argc,args);
362: PetscBeganMPI = PETSC_TRUE;
363: }
364: if (argc && args) {
365: PetscGlobalArgc = *argc;
366: PetscGlobalArgs = *args;
367: }
368: PetscInitializeCalled = PETSC_TRUE;
370: /* Also initialize the initial datestamp. Done after init due to a bug in MPICH-GM? */
371: PetscSetInitialDate();
373: if (!PETSC_COMM_WORLD) {
374: PETSC_COMM_WORLD = MPI_COMM_WORLD;
375: }
377: MPI_Comm_rank(MPI_COMM_WORLD,&PetscGlobalRank);
378: MPI_Comm_size(MPI_COMM_WORLD,&PetscGlobalSize);
380: #if defined(PETSC_USE_COMPLEX)
381: /*
382: Initialized the global complex variable; this is because with
383: shared libraries the constructors for global variables
384: are not called; at least on IRIX.
385: */
386: {
387: PetscScalar ic(0.0,1.0);
388: PETSC_i = ic;
389: }
390: MPI_Type_contiguous(2,MPIU_REAL,&MPIU_COMPLEX);
391: MPI_Type_commit(&MPIU_COMPLEX);
392: MPI_Op_create(PetscSum_Local,1,&PetscSum_Op);
393: #endif
395: /*
396: Create the PETSc MPI reduction operator that sums of the first
397: half of the entries and maxes the second half.
398: */
399: MPI_Op_create(PetscMaxSum_Local,1,&PetscMaxSum_Op);
401: MPI_Type_contiguous(2,MPIU_SCALAR,&MPIU_2SCALAR);
402: MPI_Type_commit(&MPIU_2SCALAR);
403: MPI_Op_create(PetscADMax_Local,1,&PetscADMax_Op);
404: MPI_Op_create(PetscADMin_Local,1,&PetscADMin_Op);
406: /*
407: Build the options database and check for user setup requests
408: */
409: PetscOptionsInsert(argc,args,file);
411: /*
412: Print main application help message
413: */
414: PetscOptionsHasName(PETSC_NULL,"-help",&flg);
415: if (help && flg) {
416: PetscPrintf(PETSC_COMM_WORLD,help);
417: }
418: PetscOptionsCheckInitial_Private();
420: /* SHOULD PUT IN GUARDS: Make sure logging is initialized, even if we od not print it out */
421: PetscLogBegin_Private();
423: /*
424: Initialize PETSC_COMM_SELF and WORLD as a MPI_Comm with the PETSc attribute.
425:
426: We delay until here to do it, since PetscMalloc() may not have been
427: setup before this.
428: */
429: PetscCommDuplicate_Private(MPI_COMM_SELF,&PETSC_COMM_SELF,&dummy_tag);
430: PetscCommDuplicate_Private(PETSC_COMM_WORLD,&PETSC_COMM_WORLD,&dummy_tag);
432: /*
433: Load the dynamic libraries (on machines that support them), this registers all
434: the solvers etc. (On non-dynamic machines this initializes the PetscDraw and PetscViewer classes)
435: */
436: PetscInitialize_DynamicLibraries();
438: /*
439: Initialize all the default viewers
440: */
441: MPI_Comm_size(PETSC_COMM_WORLD,&size);
442: PetscLogInfo(0,"PetscInitialize:PETSc successfully started: number of processors = %d\n",size);
443: PetscGetHostName(hostname,256);
444: PetscLogInfo(0,"PetscInitialize:Running on machine: %s\n",hostname);
446: PetscOptionsCheckInitial_Components();
448: PetscFunctionReturn(ierr);
449: }
454: /*@C
455: PetscFinalize - Checks for options to be called at the conclusion
456: of the program and calls MPI_Finalize().
458: Collective on PETSC_COMM_WORLD
460: Options Database Keys:
461: + -options_table - Calls OptionsPrint()
462: . -options_left - Prints unused options that remain in the database
463: . -options_left no - Does not print unused options that remain in the database
464: . -mpidump - Calls PetscMPIDump()
465: . -trdump - Calls PetscTrDump()
466: . -trinfo - Prints total memory usage
467: . -trdebug - Calls malloc_debug(2) to activate memory
468: allocation diagnostics (used by PETSC_ARCH=sun4,
469: BOPT=[g,g_c++,g_complex] only!)
470: - -trmalloc_log - Prints summary of memory usage
472: Options Database Keys for Profiling:
473: See the Profiling chapter of the users manual for details.
474: + -log_summary [filename] - Prints summary of flop and timing
475: information to screen. If the filename is specified the
476: summary is written to the file. (for code compiled with
477: PETSC_USE_LOG). See PetscLogPrintSummary().
478: . -log_all [filename] - Logs extensive profiling information
479: (for code compiled with PETSC_USE_LOG). See PetscLogDump().
480: . -log [filename] - Logs basic profiline information (for
481: code compiled with PETSC_USE_LOG). See PetscLogDump().
482: . -log_sync - Log the synchronization in scatters, inner products
483: and norms
484: - -log_mpe [filename] - Creates a logfile viewable by the
485: utility Upshot/Nupshot (in MPICH distribution)
487: Level: beginner
489: Note:
490: See PetscInitialize() for more general runtime options.
492: .seealso: PetscInitialize(), PetscOptionsPrint(), PetscTrDump(), PetscMPIDump(), PetscEnd()
493: @*/
494: int PetscFinalize(void)
495: {
496: int ierr,rank,nopt;
497: PetscLogDouble rss;
498: PetscTruth flg1,flg2,flg3;
499:
502: if (!PetscInitializeCalled) {
503: (*PetscErrorPrintf)("PETSc ERROR: PetscInitialize() must be called before PetscFinalize()\n");
504: return(0);
505: }
506: /* Destroy auxiliary packages */
507: PetscViewerMathematicaFinalizePackage();
508: PetscPLAPACKFinalizePackage();
510: /*
511: Destroy all the function registration lists created
512: */
513: PetscFinalize_DynamicLibraries();
516: PetscOptionsHasName(PETSC_NULL,"-get_resident_set_size",&flg1);
517: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
518: if (flg1) {
519: PetscGetResidentSetSize(&rss);
520: PetscPrintf(PETSC_COMM_SELF,"[%d] Size of entire process memory %d\n",rank,(int)rss);
521: }
523: #if defined(PETSC_USE_LOG)
524: PetscOptionsHasName(PETSC_NULL,"-get_total_flops",&flg1);
525: if (flg1) {
526: PetscLogDouble flops = 0;
527: MPI_Reduce(&_TotalFlops,&flops,1,MPI_DOUBLE,MPI_SUM,0,PETSC_COMM_WORLD);
528: PetscPrintf(PETSC_COMM_WORLD,"Total flops over all processors %g\n",flops);
529: }
530: #endif
532: /*
533: Free all objects registered with PetscObjectRegisterDestroy() such ast
534: PETSC_VIEWER_XXX_().
535: */
536: PetscObjectRegisterDestroyAll();
538: #if defined(PETSC_USE_STACK)
539: if (PetscStackActive) {
540: PetscStackDestroy();
541: }
542: #endif
544: #if defined(PETSC_USE_LOG)
545: {
546: char mname[PETSC_MAX_PATH_LEN];
547: #if defined(PETSC_HAVE_MPE)
548: mname[0] = 0;
549: PetscOptionsGetString(PETSC_NULL,"-log_mpe",mname,PETSC_MAX_PATH_LEN,&flg1);
550: if (flg1){
551: if (mname[0]) {PetscLogMPEDump(mname);}
552: else {PetscLogMPEDump(0);}
553: }
554: #endif
555: mname[0] = 0;
556: PetscOptionsGetString(PETSC_NULL,"-log_summary",mname,PETSC_MAX_PATH_LEN,&flg1);
557: if (flg1) {
558: if (mname[0]) {PetscLogPrintSummary(PETSC_COMM_WORLD,mname);}
559: else {PetscLogPrintSummary(PETSC_COMM_WORLD,0);}
560: }
562: mname[0] = 0;
563: PetscOptionsGetString(PETSC_NULL,"-log_all",mname,PETSC_MAX_PATH_LEN,&flg1);
564: PetscOptionsGetString(PETSC_NULL,"-log",mname,PETSC_MAX_PATH_LEN,&flg2);
565: if (flg1 || flg2){
566: if (mname[0]) PetscLogDump(mname);
567: else PetscLogDump(0);
568: }
569: PetscLogDestroy();
570: }
571: #endif
572: PetscOptionsHasName(PETSC_NULL,"-no_signal_handler",&flg1);
573: if (!flg1) { PetscPopSignalHandler();}
574: PetscOptionsHasName(PETSC_NULL,"-mpidump",&flg1);
575: if (flg1) {
576: PetscMPIDump(stdout);
577: }
578: PetscOptionsHasName(PETSC_NULL,"-trdump",&flg1);
579: PetscOptionsHasName(PETSC_NULL,"-options_table",&flg2);
580: if (flg2) {
581: if (!rank) {PetscOptionsPrint(stdout);}
582: }
584: /* to prevent PETSc -options_left from warning */
585: PetscOptionsHasName(PETSC_NULL,"-nox_warning",&flg1);CHKERRQ(ierr)
586: PetscOptionsHasName(PETSC_NULL,"-error_output_stderr",&flg1);
588: PetscOptionsGetLogical(PETSC_NULL,"-options_left",&flg2,&flg1);
589: PetscOptionsAllUsed(&nopt);
590: if (flg2) {
591: PetscOptionsPrint(stdout);
592: if (!nopt) {
593: PetscPrintf(PETSC_COMM_WORLD,"There are no unused options.\n");
594: } else if (nopt == 1) {
595: PetscPrintf(PETSC_COMM_WORLD,"There is one unused database option. It is:\n");
596: } else {
597: PetscPrintf(PETSC_COMM_WORLD,"There are %d unused database options. They are:\n",nopt);
598: }
599: }
600: #if defined(PETSC_USE_BOPT_g)
601: if (nopt && !flg1 && !flg2) {
602: PetscPrintf(PETSC_COMM_WORLD,"WARNING! There are options you set that were not used!\n");
603: PetscPrintf(PETSC_COMM_WORLD,"WARNING! could be spelling mistake, etc!\n");
604: PetscOptionsLeft();
605: } else if (nopt && flg2) {
606: #else
607: if (nopt && flg2) {
608: #endif
609: PetscOptionsLeft();
610: }
612: PetscOptionsHasName(PETSC_NULL,"-log_history",&flg1);
613: if (flg1) {
614: PetscLogCloseHistoryFile(&petsc_history);
615: petsc_history = 0;
616: }
619: /*
620: Destroy PETSC_COMM_SELF/WORLD as a MPI_Comm with the PETSc
621: attribute.
622: */
623: PetscCommDestroy_Private(&PETSC_COMM_SELF);
624: PetscCommDestroy_Private(&PETSC_COMM_WORLD);
626: /*
627: Free all the registered create functions, such as KSPList, VecList, SNESList, etc
628: */
629: PetscFListDestroyAll();
631: PetscOptionsHasName(PETSC_NULL,"-trdump",&flg1);
632: PetscOptionsHasName(PETSC_NULL,"-trinfo",&flg2);
633: PetscOptionsHasName(PETSC_NULL,"-trmalloc_log",&flg3);
634: if (flg1) {
635: char fname[256];
636: FILE *fd;
637:
638: fname[0] = 0;
639: PetscOptionsGetString(PETSC_NULL,"-trdump",fname,250,&flg1);
640: if (flg1 && fname[0]) {
641: char sname[256];
643: sprintf(sname,"%s_%d",fname,rank);
644: fd = fopen(sname,"w"); if (!fd) SETERRQ1(1,"Cannot open log file: %s",sname);
645: PetscTrDump(fd);
646: fclose(fd);
647: } else {
648: MPI_Comm local_comm;
650: MPI_Comm_dup(MPI_COMM_WORLD,&local_comm);
651: PetscSequentialPhaseBegin_Private(local_comm,1);
652: PetscTrDump(stdout);
653: PetscSequentialPhaseEnd_Private(local_comm,1);
654: MPI_Comm_free(&local_comm);
655: }
656: } else if (flg2) {
657: MPI_Comm local_comm;
658: PetscLogDouble maxm;
660: MPI_Comm_dup(MPI_COMM_WORLD,&local_comm);
661: PetscTrSpace(PETSC_NULL,PETSC_NULL,&maxm);
662: PetscSequentialPhaseBegin_Private(local_comm,1);
663: printf("[%d] Maximum memory used %g\n",rank,maxm);
664: PetscSequentialPhaseEnd_Private(local_comm,1);
665: MPI_Comm_free(&local_comm);
666: }
667: if (flg3) {
668: char fname[256];
669: FILE *fd;
670:
671: fname[0] = 0;
672: PetscOptionsGetString(PETSC_NULL,"-trmalloc_log",fname,250,&flg1);
673: if (flg1 && fname[0]) {
674: char sname[256];
676: sprintf(sname,"%s_%d",fname,rank);
677: fd = fopen(sname,"w"); if (!fd) SETERRQ1(1,"Cannot open log file: %s",sname);
678: PetscTrLogDump(fd);
679: fclose(fd);
680: } else {
681: PetscTrLogDump(stdout);
682: }
683: }
684: /* Can be destroyed only after all the options are used */
685: PetscOptionsDestroy();
687: PetscGlobalArgc = 0;
688: PetscGlobalArgs = 0;
690: PetscLogInfo(0,"PetscFinalize:PETSc successfully ended!\n");
691: if (PetscBeganMPI) {
692: MPI_Finalize();
693: }
695: /*
697: Note: In certain cases PETSC_COMM_WORLD is never MPI_Comm_free()ed because
698: the communicator has some outstanding requests on it. Specifically if the
699: flag PETSC_HAVE_BROKEN_REQUEST_FREE is set (for IBM MPI implementation). See
700: src/vec/utils/vpscat.c. Due to this the memory allocated in PetscCommDuplicate_Private()
701: is never freed as it should be. Thus one may obtain messages of the form
702: [ 1] 8 bytes PetscCommDuplicate_Private() line 645 in src/sys/src/mpiu.c indicating the
703: memory was not freed.
705: */
706: PetscClearMalloc();
707: PetscInitializeCalled = PETSC_FALSE;
708: PetscFunctionReturn(ierr);
709: }
711: /*
712: These may be used in code that ADIC is to be used on
713: */
717: /*@C
718: PetscGlobalMax - Computes the maximum value over several processors
720: Collective on MPI_Comm
722: Input Parameters:
723: + local - the local value
724: - comm - the processors that find the maximum
726: Output Parameter:
727: . result - the maximum value
728:
729: Level: intermediate
731: Notes:
732: These functions are to be used inside user functions that are to be processed with
733: ADIC. PETSc will automatically provide differentiated versions of these functions
735: .seealso: PetscGlobalMin(), PetscGlobalSum()
736: @*/
737: int PetscGlobalMax(PetscReal* local,PetscReal* result,MPI_Comm comm)
738: {
739: return MPI_Allreduce(local,result,1,MPIU_REAL,MPI_MAX,comm);
740: }
744: /*@C
745: PetscGlobalMin - Computes the minimum value over several processors
747: Collective on MPI_Comm
749: Input Parameters:
750: + local - the local value
751: - comm - the processors that find the minimum
753: Output Parameter:
754: . result - the minimum value
755:
756: Level: intermediate
758: Notes:
759: These functions are to be used inside user functions that are to be processed with
760: ADIC. PETSc will automatically provide differentiated versions of these functions
762: .seealso: PetscGlobalMax(), PetscGlobalSum()
763: @*/
764: int PetscGlobalMin(PetscReal* local,PetscReal* result,MPI_Comm comm)
765: {
766: return MPI_Allreduce(local,result,1,MPIU_REAL,MPI_MIN,comm);
767: }
771: /*@C
772: PetscGlobalSum - Computes the sum over sever processors
774: Collective on MPI_Comm
776: Input Parameters:
777: + local - the local value
778: - comm - the processors that find the sum
780: Output Parameter:
781: . result - the sum
782:
783: Level: intermediate
785: Notes:
786: These functions are to be used inside user functions that are to be processed with
787: ADIC. PETSc will automatically provide differentiated versions of these functions
789: .seealso: PetscGlobalMin(), PetscGlobalMax()
790: @*/
791: int PetscGlobalSum(PetscScalar* local,PetscScalar* result,MPI_Comm comm)
792: {
793: return MPI_Allreduce(local,result,1,MPIU_SCALAR,PetscSum_Op,comm);
794: }