Actual source code: shvec.c
1: /*$Id: shvec.c,v 1.50 2001/03/23 23:21:29 balay Exp $*/
3: /*
4: This file contains routines for Parallel vector operations that use shared memory
5: */
6: #include src/vec/impls/mpi/pvecimpl.h
8: /*
9: Could not get the include files to work properly on the SGI with
10: the C++ compiler.
11: */
12: #if defined(PETSC_USE_SHARED_MEMORY) && !defined(__cplusplus)
14: EXTERN int PetscSharedMalloc(MPI_Comm,int,int,void**);
16: int VecDuplicate_Shared(Vec win,Vec *v)
17: {
18: int ierr;
19: Vec_MPI *vw,*w = (Vec_MPI *)win->data;
20: Scalar *array;
24: /* first processor allocates entire array and sends it's address to the others */
25: PetscSharedMalloc(win->comm,win->n*sizeof(Scalar),win->N*sizeof(Scalar),(void**)&array);
27: VecCreate(win->comm,win->n,win->N,v);
28: VecCreate_MPI_Private(*v,w->nghost,array,win->map);
29: vw = (Vec_MPI *)(*v)->data;
31: /* New vector should inherit stashing property of parent */
32: vw->donotstash = w->donotstash;
33:
34: PetscOListDuplicate(win->olist,&(*v)->olist);
35: PetscFListDuplicate(win->qlist,&(*v)->qlist);
37: if (win->mapping) {
38: (*v)->mapping = win->mapping;
39: PetscObjectReference((PetscObject)win->mapping);
40: }
41: (*v)->ops->duplicate = VecDuplicate_Shared;
42: (*v)->bs = win->bs;
43: (*v)->bstash.bs = win->bstash.bs;
44: return(0);
45: }
48: EXTERN_C_BEGIN
49: int VecCreate_Shared(Vec vv)
50: {
51: int ierr;
52: Scalar *array;
55: PetscSplitOwnership(vv->comm,&vv->n,&vv->N);
56: PetscSharedMalloc(vv->comm,vv->n*sizeof(Scalar),vv->N*sizeof(Scalar),(void**)&array);
58: VecCreate_MPI_Private(vv,0,array,PETSC_NULL);
59: vv->ops->duplicate = VecDuplicate_Shared;
61: return(0);
62: }
63: EXTERN_C_END
66: /* ----------------------------------------------------------------------------------------
67: Code to manage shared memory allocation under the SGI with MPI
69: We associate with a communicator a shared memory "areana" from which memory may be shmalloced.
70: */
71: #include petscsys.h
72: #include "petscfix.h"
73: #if defined(PETSC_HAVE_PWD_H)
74: #include <pwd.h>
75: #endif
76: #include <ctype.h>
77: #include <sys/types.h>
78: #include <sys/stat.h>
79: #if defined(PETSC_HAVE_UNISTD_H)
80: #include <unistd.h>
81: #endif
82: #if defined(PETSC_HAVE_STDLIB_H)
83: #include <stdlib.h>
84: #endif
85: #if !defined(PARCH_win32)
86: #include <sys/param.h>
87: #include <sys/utsname.h>
88: #endif
89: #if defined(PARCH_win32)
90: #include <windows.h>
91: #include <io.h>
92: #include <direct.h>
93: #endif
94: #if defined (PARCH_win32_gnu)
95: #include <windows.h>
96: #endif
97: #include <fcntl.h>
98: #include <time.h>
99: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
100: #include <sys/systeminfo.h>
101: #endif
102: #include "petscfix.h"
104: static int Petsc_Shared_keyval = MPI_KEYVAL_INVALID;
105: static int Petsc_Shared_size = 100000000;
107: /*
108: Private routine to delete internal storage when a communicator is freed.
109: This is called by MPI, not by users.
111: The binding for the first argument changed from MPI 1.0 to 1.1; in 1.0
112: it was MPI_Comm *comm.
113: */
114: static int Petsc_DeleteShared(MPI_Comm comm,int keyval,void* attr_val,void* extra_state)
115: {
119: PetscFree(attr_val);
120: PetscFunctionReturn(MPI_SUCCESS);
121: }
123: int PetscSharedMemorySetSize(int s)
124: {
126: Petsc_Shared_size = s;
127: return(0);
128: }
130: #include "petscfix.h"
132: #include <ulocks.h>
134: int PetscSharedInitialize(MPI_Comm comm)
135: {
136: int rank,len,ierr,flag;
137: char filename[256];
138: usptr_t **arena;
142: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
143: /*
144: The calling sequence of the 2nd argument to this function changed
145: between MPI Standard 1.0 and the revisions 1.1 Here we match the
146: new standard, if you are using an MPI implementation that uses
147: the older version you will get a warning message about the next line;
148: it is only a warning message and should do no harm.
149: */
150: MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DeleteShared,&Petsc_Shared_keyval,0);
151: }
153: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
155: if (!flag) {
156: /* This communicator does not yet have a shared memory areana */
157: PetscMalloc(sizeof(usptr_t*),&arena);
159: MPI_Comm_rank(comm,&rank);
160: if (!rank) {
161: PetscStrcpy(filename,"/tmp/PETScArenaXXXXXX");
162: mktemp(filename);
163: PetscStrlen(filename,&len);
164: }
165: ierr = MPI_Bcast(&len,1,MPI_INT,0,comm);
166: ierr = MPI_Bcast(filename,len+1,MPI_CHAR,0,comm);
167: ierr = PetscOptionsGetInt(PETSC_NULL,"-shared_size",&Petsc_Shared_size,&flag);
168: usconfig(CONF_INITSIZE,Petsc_Shared_size);
169: *arena = usinit(filename);
170: ierr = MPI_Attr_put(comm,Petsc_Shared_keyval,arena);
171: }
173: return(0);
174: }
176: int PetscSharedMalloc(MPI_Comm comm,int llen,int len,void **result)
177: {
178: char *value;
179: int ierr,shift,rank,flag;
180: usptr_t **arena;
183: *result = 0;
184: if (Petsc_Shared_keyval == MPI_KEYVAL_INVALID) {
185: PetscSharedInitialize(comm);
186: }
187: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
188: if (!flag) {
189: PetscSharedInitialize(comm);
190: MPI_Attr_get(comm,Petsc_Shared_keyval,(void**)&arena,&flag);
191: if (!flag) SETERRQ(1,"Unable to initialize shared memory");
192: }
194: ierr = MPI_Scan(&llen,&shift,1,MPI_INT,MPI_SUM,comm);
195: shift -= llen;
197: MPI_Comm_rank(comm,&rank);
198: if (!rank) {
199: value = (char*)usmalloc((size_t) len,*arena);
200: if (!value) {
201: (*PetscErrorPrintf)("PETSC ERROR: Unable to allocate shared memory locationn");
202: (*PetscErrorPrintf)("PETSC ERROR: Run with option -shared_size <size> n");
203: (*PetscErrorPrintf)("PETSC_ERROR: with size > %d n",(int)(1.2*(Petsc_Shared_size+len)));
204: SETERRQ(1,"Unable to malloc shared memory");
205: }
206: }
207: MPI_Bcast(&value,8,MPI_BYTE,0,comm);
208: value += shift;
210: return(0);
211: }
213: #else
215: EXTERN_C_BEGIN
216: int VecCreate_Shared(MPI_Comm comm,int n,int N,Vec *vv)
217: {
218: int ierr,size;
221: MPI_Comm_size(comm,&size);
222: if (size > 1) {
223: SETERRQ(1,"No supported for shared memory vector objects on this machine");
224: }
225: VecCreateSeq(comm,PetscMax(n,N),vv);
226: return(0);
227: }
228: EXTERN_C_END
230: #endif
232: /*@C
233: VecCreateShared - Creates a parallel vector that uses shared memory.
235: Input Parameters:
236: . comm - the MPI communicator to use
237: . n - local vector length (or PETSC_DECIDE to have calculated if N is given)
238: . N - global vector length (or PETSC_DECIDE to have calculated if n is given)
240: Output Parameter:
241: . vv - the vector
243: Collective on MPI_Comm
244:
245: Notes:
246: Currently VecCreateShared() is available only on the SGI; otherwise,
247: this routine is the same as VecCreateMPI().
249: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the
250: same type as an existing vector.
252: Level: advanced
254: Concepts: vectors^creating with shared memory
256: .seealso: VecCreateSeq(), VecCreate(), VecCreateMPI(), VecDuplicate(), VecDuplicateVecs(),
257: VecCreateGhost(), VecCreateMPIWithArray(), VecCreateGhostWithArray()
259: @*/
260: int VecCreateShared(MPI_Comm comm,int n,int N,Vec *v)
261: {
265: VecCreate(comm,n,N,v);
266: VecSetType(*v,VEC_SHARED);
267: return(0);
268: }