Actual source code: mpi.h

petsc-3.12.5 2020-03-29
Report Typos and Errors
  1: /*
  2:    This is a special set of bindings for uni-processor use of MPI by the PETSc library.

  4:    NOT ALL THE MPI CALLS ARE IMPLEMENTED CORRECTLY! Only those needed in PETSc.

  6:    For example,
  7:    * Does not implement send to self.
  8:    * Does not implement attributes correctly.
  9: */

 11: /*
 12:   The following info is a response to one of the petsc-maint questions
 13:   regarding MPIUNI.

 15:   MPIUNI was developed with the aim of getting PETSc compiled, and
 16:   usable in the absence of a full MPI implementation. With this, we
 17:   were able to provide PETSc on Windows, Windows64 even before any MPI
 18:   implementation was available on these platforms. [Or with certain
 19:   compilers - like borland, that do not have a usable MPI
 20:   implementation]

 22:   However - providing a seqential, standards compliant MPI
 23:   implementation is *not* the goal of MPIUNI. The development strategy
 24:   was - to make enough changes to it so that PETSc sources, examples
 25:   compile without errors, and runs in the uni-processor mode. This is
 26:   the reason each function is not documented.

 28:   PETSc usage of MPIUNI is primarily from C. However a minimal fortran
 29:   interface is also provided - to get PETSc fortran examples with a
 30:   few MPI calls working.

 32:   One of the optimzation with MPIUNI, is to avoid the function call
 33:   overhead, when possible. Hence most of the C functions are
 34:   implemented as macros. However the function calls cannot be avoided
 35:   with fortran usage.

 37:   Most PETSc objects have both sequential and parallel
 38:   implementations, which are separate. For eg: We have two types of
 39:   sparse matrix storage formats - SeqAIJ, and MPIAIJ. Some MPI
 40:   routines are used in the Seq part, but most of them are used in the
 41:   MPI part. The send/receive calls can be found mostly in the MPI
 42:   part.

 44:   When MPIUNI is used, only the Seq version of the PETSc objects are
 45:   used, even though the MPI variant of the objects are compiled. Since
 46:   there are no send/receive calls in the Seq variant, PETSc works fine
 47:   with MPIUNI in seq mode.

 49:   The reason some send/receive functions are defined to abort(), is to
 50:   detect sections of code that use send/receive functions, and gets
 51:   executed in the sequential mode. (which shouldn't happen in case of
 52:   PETSc).

 54:   Proper implementation of send/receive would involve writing a
 55:   function for each of them. Inside each of these functions, we have
 56:   to check if the send is to self or receive is from self, and then
 57:   doing the buffering accordingly (until the receive is called) - or
 58:   what if a nonblocking receive is called, do a copy etc.. Handling
 59:   the buffering aspects might be complicated enough, that in this
 60:   case, a proper implementation of MPI might as well be used. This is
 61:   the reason the send to self is not implemented in MPIUNI, and never
 62:   will be.

 64:   Proper implementations of MPI [for eg: MPICH & OpenMPI] are
 65:   available for most machines. When these packages are available, Its
 66:   generally preferable to use one of them instead of MPIUNI - even if
 67:   the user is using PETSc sequentially.

 69:     - MPIUNI does not support all MPI functions [or functionality].
 70:     Hence it might not work with external packages or user code that
 71:     might have MPI calls in it.

 73:     - MPIUNI is not a standards compliant implementation for np=1.
 74:     For eg: if the user code has send/recv to self, then it will
 75:     abort. [Similar issues with a number of other MPI functionality]
 76:     However MPICH & OpenMPI are the correct implementations of MPI
 77:     standard for np=1.

 79:     - When user code uses multiple MPI based packages that have their
 80:     own *internal* stubs equivalent to MPIUNI - in sequential mode,
 81:     invariably these multiple implementations of MPI for np=1 conflict
 82:     with each other. The correct thing to do is: make all such
 83:     packages use the *same* MPI implementation for np=1. MPICH/OpenMPI
 84:     satisfy this requirement correctly [and hence the correct choice].

 86:     - Using MPICH/OpenMPI sequentially should have minimal
 87:     disadvantages. [for eg: these binaries can be run without
 88:     mpirun/mpiexec as ./executable, without requiring any extra
 89:     configurations for ssh/rsh/daemons etc..]. This should not be a
 90:     reason to avoid these packages for sequential use.

 92: */

 94: #if !defined(MPIUNI_H)
 95: #define MPIUNI_H

 97: /* Required by abort() in mpi.c & for win64 */
 98: #include <petscconf.h>
 99: #include <stddef.h>

101: /*  This is reproduced from petscsys.h so that mpi.h can be used standalone without first including petscsys.h */
102: #if defined(_WIN32) && defined(PETSC_USE_SHARED_LIBRARIES)
103: #  define MPIUni_ __declspec(dllexport)
104: #  define MPIUni_PETSC_DLLIMPORT __declspec(dllimport)
105: #elif defined(PETSC_USE_VISIBILITY_CXX) && defined(__cplusplus)
106: #  define MPIUni_ __attribute__((visibility ("default")))
107: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
108: #elif defined(PETSC_USE_VISIBILITY_C) && !defined(__cplusplus)
109: #  define MPIUni_ __attribute__((visibility ("default")))
110: #  define MPIUni_PETSC_DLLIMPORT __attribute__((visibility ("default")))
111: #else
112: #  define MPIUni_
113: #  define MPIUni_PETSC_DLLIMPORT
114: #endif

116: #if defined(petsc_EXPORTS)
117: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_
118: #else  /* Win32 users need this to import symbols from petsc.dll */
119: #  define MPIUni_PETSC_VISIBILITY_PUBLIC MPIUni_PETSC_DLLIMPORT
120: #endif

122: #if defined(__cplusplus)
123: #define MPIUni_PETSC_EXTERN extern "C" MPIUni_PETSC_VISIBILITY_PUBLIC
124: #else
125: #define MPIUni_PETSC_EXTERN extern MPIUni_PETSC_VISIBILITY_PUBLIC
126: #endif

128: #if defined(__cplusplus)
129: extern "C" {
130: #endif

132: /* MPI_Aint has to be a signed integral type large enough to hold a pointer */
133: typedef ptrdiff_t MPI_Aint;

135: /* old 32bit MS compiler does not support long long */
136: #if defined(PETSC_SIZEOF_LONG_LONG)
137: typedef long long MPIUNI_INT64;
138: typedef unsigned long long MPIUNI_UINT64;
139: #elif defined(PETSC_HAVE___INT64)
140: typedef _int64 MPIUNI_INT64;
141: typedef unsigned _int64 MPIUNI_UINT64;
142: #else
143: #error "cannot determine MPIUNI_INT64, MPIUNI_UINT64 types"
144: #endif

146: /*

148:  MPIUNI_ARG is used in the macros below only to stop various C/C++ compilers
149:  from generating warning messages about unused variables while compiling PETSc.
150: */
151: MPIUni_PETSC_EXTERN void *MPIUNI_TMP;
152: #define MPIUNI_ARG(arg) (MPIUNI_TMP = (void *)(MPI_Aint) (arg))

154: #define MPI_IDENT            0
155: #define MPI_CONGRUENT        1
156: #define MPI_SIMILAR          2
157: #define MPI_UNEQUAL          3

159: #define MPI_BOTTOM   ((void *) 0)
160: #define MPI_IN_PLACE ((void *)-1)

162: #define MPI_PROC_NULL      (-1)
163: #define MPI_ANY_SOURCE     (-2)
164: #define MPI_ANY_TAG        (-1)
165: #define MPI_UNDEFINED  (-32766)

167: #define MPI_SUCCESS          0
168: #define MPI_ERR_OTHER       17
169: #define MPI_ERR_UNKNOWN     18
170: #define MPI_ERR_INTERN      21

172: #define MPI_KEYVAL_INVALID   0
173: #define MPI_TAG_UB           0

175: #define MPI_MAX_PROCESSOR_NAME 1024
176: #define MPI_MAX_ERROR_STRING   2056

178: typedef int MPI_Comm;
179: #define MPI_COMM_NULL  0
180: #define MPI_COMM_SELF  1
181: #define MPI_COMM_WORLD 2
182: #define MPI_COMM_TYPE_SHARED 1

184: typedef int MPI_Info;
185: #define MPI_INFO_NULL 0

187: typedef struct {int MPI_SOURCE,MPI_TAG,MPI_ERROR;} MPI_Status;
188: #define MPI_STATUS_IGNORE   (MPI_Status *)0
189: #define MPI_STATUSES_IGNORE (MPI_Status *)0

191: /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
192: /* Any changes here must also be reflected in mpif.h */
193: typedef int MPI_Datatype;
194: #define MPI_DATATYPE_NULL      0
195: #define MPI_PACKED             0

197: #define MPI_FLOAT              (1 << 20 | 1 << 8 | (int)sizeof(float))
198: #define MPI_DOUBLE             (1 << 20 | 1 << 8 | (int)sizeof(double))
199: #define MPI_LONG_DOUBLE        (1 << 20 | 1 << 8 | (int)sizeof(long double))

201: #define MPI_COMPLEX            (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
202: #define MPI_C_COMPLEX          (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
203: #define MPI_C_FLOAT_COMPLEX    (2 << 20 | 1 << 8 | 2*(int)sizeof(float))
204: #define MPI_DOUBLE_COMPLEX     (2 << 20 | 1 << 8 | 2*(int)sizeof(double))
205: #define MPI_C_DOUBLE_COMPLEX   (2 << 20 | 1 << 8 | 2*(int)sizeof(double))

207: #define MPI_CHAR               (3 << 20 | 1 << 8 | (int)sizeof(char))
208: #define MPI_BYTE               (3 << 20 | 1 << 8 | (int)sizeof(char))
209: #define MPI_SIGNED_CHAR        (3 << 20 | 1 << 8 | (int)sizeof(signed char))
210: #define MPI_UNSIGNED_CHAR      (3 << 20 | 1 << 8 | (int)sizeof(unsigned char))

212: #define MPI_SHORT              (4 << 20 | 1 << 8 | (int)sizeof(short))
213: #define MPI_INT                (4 << 20 | 1 << 8 | (int)sizeof(int))
214: #define MPI_LONG               (4 << 20 | 1 << 8 | (int)sizeof(long))
215: #define MPI_LONG_LONG          (4 << 20 | 1 << 8 | (int)sizeof(MPIUNI_INT64))
216: #define MPI_LONG_LONG_INT      MPI_LONG_LONG
217: #define MPI_INTEGER8           MPI_LONG_LONG

219: #define MPI_UNSIGNED_SHORT     (5 << 20 | 1 << 8 | (int)sizeof(unsigned short))
220: #define MPI_UNSIGNED           (5 << 20 | 1 << 8 | (int)sizeof(unsigned))
221: #define MPI_UNSIGNED_LONG      (5 << 20 | 1 << 8 | (int)sizeof(unsigned long))
222: #define MPI_UNSIGNED_LONG_LONG (5 << 20 | 1 << 8 | (int)sizeof(MPIUNI_UINT64))

224: #define MPI_FLOAT_INT          (10 << 20 | 1 << 8 | (int)(sizeof(float) + sizeof(int)))
225: #define MPI_DOUBLE_INT         (11 << 20 | 1 << 8 | (int)(sizeof(double) + sizeof(int)))
226: #define MPI_LONG_INT           (12 << 20 | 1 << 8 | (int)(sizeof(long) + sizeof(int)))
227: #define MPI_SHORT_INT          (13 << 20 | 1 << 8 | (int)(sizeof(short) + sizeof(int)))
228: #define MPI_2INT               (14 << 20 | 1 << 8 | (int)(2*sizeof(int)))
229: #define MPI_2DOUBLE            (15 << 20 | 1 << 8 | (int)(2*sizeof(double)))

231: /* Fortran datatypes; Jed Brown says they should be defined here */
232: #define MPI_INTEGER MPI_INT
233: #define MPI_DOUBLE_PRECISION MPI_DOUBLE
234: #define MPI_COMPLEX16 MPI_C_DOUBLE_COMPLEX
235: #define MPI_2DOUBLE_PRECISION MPI_2DOUBLE

237: #define MPI_ORDER_C            0
238: #define MPI_ORDER_FORTRAN      1

240: #define MPI_sizeof_default(datatype) ((((datatype) >> 8) & 0xfff) * ((datatype) & 0xff))
241: #if defined(PETSC_USE_REAL___FP16)
242: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FP16;
243: #define MPI_sizeof(datatype) ((datatype == MPIU___FP16) ? (int)(2*sizeof(char)) : MPI_sizeof_default(datatype))
244: #elif defined(PETSC_USE_REAL___FLOAT128)
245: MPIUni_PETSC_EXTERN MPI_Datatype MPIU___FLOAT128;
246: #define MPI_sizeof(datatype) ((datatype == MPIU___FLOAT128) ? (int)(2*sizeof(double)) : MPI_sizeof_default(datatype))
247: #else
248: #define MPI_sizeof(datatype) (MPI_sizeof_default(datatype))
249: #endif

251: MPIUni_PETSC_EXTERN int MPIUNI_Memcpy(void*,const void*,int);

253: typedef int MPI_Request;
254: #define MPI_REQUEST_NULL 0

256: typedef int MPI_Group;
257: #define MPI_GROUP_NULL  0
258: #define MPI_GROUP_EMPTY 0

260: typedef int MPI_Op;
261: #define MPI_OP_NULL    0
262: #define MPI_SUM        1
263: #define MPI_MAX        2
264: #define MPI_MIN        3
265: #define MPI_REPLACE    4
266: #define MPI_PROD       5
267: #define MPI_LAND       6
268: #define MPI_BAND       7
269: #define MPI_LOR        8
270: #define MPI_BOR        9
271: #define MPI_LXOR       10
272: #define MPI_BXOR       11
273: #define MPI_MAXLOC     12
274: #define MPI_MINLOC     13

276: typedef void (MPI_User_function)(void*, void *, int *, MPI_Datatype *);

278: typedef int MPI_Errhandler;
279: #define MPI_ERRHANDLER_NULL  0
280: #define MPI_ERRORS_RETURN    0
281: #define MPI_ERRORS_ARE_FATAL 0
282: #define MPI_ERR_LASTCODE     0x3fffffff
283: typedef void (MPI_Handler_function)(MPI_Comm *, int *, ...);

285: /*
286:   Prototypes of some functions which are implemented in mpi.c
287: */
288: typedef int (MPI_Copy_function)(MPI_Comm,int,void *,void *,void *,int *);
289: typedef int (MPI_Delete_function)(MPI_Comm,int,void *,void *);
290: #define MPI_NULL_COPY_FN   (MPI_Copy_function*)0
291: #define MPI_NULL_DELETE_FN (MPI_Delete_function*)0

293: /*
294:   To enable linking PETSc+MPIUNI with any other package that might have its
295:   own MPIUNI (equivalent implementation) we need to avoid using 'MPI'
296:   namespace for MPIUNI functions that go into the petsc library.

298:   For C functions below (that get compiled into petsc library) - we map
299:   the 'MPI' functions to use 'Petsc_MPI' namespace.

301:   With fortran we use similar mapping - thus requiring the use of
302:   c-preprocessor with mpif.h
303: */
304: #define MPI_Abort         Petsc_MPI_Abort
305: #define MPIUni_Abort      Petsc_MPIUni_Abort
306: #define MPI_Attr_get      Petsc_MPI_Attr_get
307: #define MPI_Keyval_free   Petsc_MPI_Keyval_free
308: #define MPI_Attr_put      Petsc_MPI_Attr_put
309: #define MPI_Attr_delete   Petsc_MPI_Attr_delete
310: #define MPI_Keyval_create Petsc_MPI_Keyval_create
311: #define MPI_Comm_free     Petsc_MPI_Comm_free
312: #define MPI_Comm_dup      Petsc_MPI_Comm_dup
313: #define MPI_Comm_create   Petsc_MPI_Comm_create
314: #define MPI_Init          Petsc_MPI_Init
315: #define MPI_Finalize      Petsc_MPI_Finalize
316: #define MPI_Initialized   Petsc_MPI_Initialized
317: #define MPI_Finalized     Petsc_MPI_Finalized
318: #define MPI_Comm_size     Petsc_MPI_Comm_size
319: #define MPI_Comm_rank     Petsc_MPI_Comm_rank
320: #define MPI_Wtime         Petsc_MPI_Wtime
321: #define MPI_Type_get_envelope Petsc_MPI_Type_get_envelope
322: #define MPI_Type_get_contents Petsc_MPI_Type_get_contents
323: #define MPI_Add_error_class   Petsc_MPI_Add_error_class
324: #define MPI_Add_error_code    Petsc_MPI_Add_error_code

326: /* identical C bindings */
327: #define MPI_Comm_copy_attr_function   MPI_Copy_function
328: #define MPI_Comm_delete_attr_function MPI_Delete_function
329: #define MPI_COMM_NULL_COPY_FN         MPI_NULL_COPY_FN
330: #define MPI_COMM_NULL_DELETE_FN       MPI_NULL_DELETE_FN
331: #define MPI_Comm_create_keyval        Petsc_MPI_Keyval_create
332: #define MPI_Comm_free_keyval          Petsc_MPI_Keyval_free
333: #define MPI_Comm_get_attr             Petsc_MPI_Attr_get
334: #define MPI_Comm_set_attr             Petsc_MPI_Attr_put
335: #define MPI_Comm_delete_attr          Petsc_MPI_Attr_delete

337: MPIUni_PETSC_EXTERN int    MPIUni_Abort(MPI_Comm,int);
338: MPIUni_PETSC_EXTERN int    MPI_Abort(MPI_Comm,int);
339: MPIUni_PETSC_EXTERN int    MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag);
340: MPIUni_PETSC_EXTERN int    MPI_Keyval_free(int*);
341: MPIUni_PETSC_EXTERN int    MPI_Attr_put(MPI_Comm,int,void *);
342: MPIUni_PETSC_EXTERN int    MPI_Attr_delete(MPI_Comm,int);
343: MPIUni_PETSC_EXTERN int    MPI_Keyval_create(MPI_Copy_function *,MPI_Delete_function *,int *,void *);
344: MPIUni_PETSC_EXTERN int    MPI_Comm_free(MPI_Comm*);
345: MPIUni_PETSC_EXTERN int    MPI_Comm_dup(MPI_Comm,MPI_Comm *);
346: MPIUni_PETSC_EXTERN int    MPI_Comm_create(MPI_Comm,MPI_Group,MPI_Comm *);
347: MPIUni_PETSC_EXTERN int    MPI_Init(int *, char ***);
348: MPIUni_PETSC_EXTERN int    MPI_Finalize(void);
349: MPIUni_PETSC_EXTERN int    MPI_Initialized(int*);
350: MPIUni_PETSC_EXTERN int    MPI_Finalized(int*);
351: MPIUni_PETSC_EXTERN int    MPI_Comm_size(MPI_Comm,int*);
352: MPIUni_PETSC_EXTERN int    MPI_Comm_rank(MPI_Comm,int*);
353: MPIUni_PETSC_EXTERN double MPI_Wtime(void);

355: MPIUni_PETSC_EXTERN int MPI_Type_get_envelope(MPI_Datatype,int*,int*,int*,int*);
356: MPIUni_PETSC_EXTERN int MPI_Type_get_contents(MPI_Datatype,int,int,int,int*,MPI_Aint*,MPI_Datatype*);
357: MPIUni_PETSC_EXTERN int MPI_Add_error_class(int*);
358: MPIUni_PETSC_EXTERN int MPI_Add_error_code(int,int*);

360: /*
361:     Routines we have replace with macros that do nothing
362:     Some return error codes others return success
363: */

365: typedef int MPI_Fint;
366: #define MPI_Comm_f2c(comm) (MPI_Comm)(comm)
367: #define MPI_Comm_c2f(comm) (MPI_Fint)(comm)
368: #define MPI_Type_f2c(type) (MPI_Datatype)(type)
369: #define MPI_Type_c2f(type) (MPI_Fint)(type)
370: #define MPI_Op_f2c(op)     (MPI_Op)(op)
371: #define MPI_Op_c2f(op)     (MPI_Fint)(op)

373: #define MPI_Send(buf,count,datatype,dest,tag,comm)  \
374:      (MPIUNI_ARG(buf),\
375:       MPIUNI_ARG(count),\
376:       MPIUNI_ARG(datatype),\
377:       MPIUNI_ARG(dest),\
378:       MPIUNI_ARG(tag),\
379:       MPIUNI_ARG(comm),\
380:       MPIUni_Abort(MPI_COMM_WORLD,0))
381: #define MPI_Recv(buf,count,datatype,source,tag,comm,status) \
382:      (MPIUNI_ARG(buf),\
383:       MPIUNI_ARG(count),\
384:       MPIUNI_ARG(datatype),\
385:       MPIUNI_ARG(source),\
386:       MPIUNI_ARG(tag),\
387:       MPIUNI_ARG(comm),\
388:       MPIUNI_ARG(status),\
389:       MPIUni_Abort(MPI_COMM_WORLD,0))
390: #define MPI_Get_count(status,datatype,count) \
391:      (MPIUNI_ARG(status),\
392:       MPIUNI_ARG(datatype),\
393:       MPIUNI_ARG(count),\
394:       MPIUni_Abort(MPI_COMM_WORLD,0))
395: #define MPI_Bsend(buf,count,datatype,dest,tag,comm)  \
396:      (MPIUNI_ARG(buf),\
397:       MPIUNI_ARG(count),\
398:       MPIUNI_ARG(datatype),\
399:       MPIUNI_ARG(dest),\
400:       MPIUNI_ARG(tag),\
401:       MPIUNI_ARG(comm),\
402:       MPIUni_Abort(MPI_COMM_WORLD,0))
403: #define MPI_Ssend(buf,count,datatype,dest,tag,comm) \
404:      (MPIUNI_ARG(buf),\
405:       MPIUNI_ARG(count),\
406:       MPIUNI_ARG(datatype),\
407:       MPIUNI_ARG(dest),\
408:       MPIUNI_ARG(tag),\
409:       MPIUNI_ARG(comm),\
410:       MPIUni_Abort(MPI_COMM_WORLD,0))
411: #define MPI_Rsend(buf,count,datatype,dest,tag,comm) \
412:      (MPIUNI_ARG(buf),\
413:       MPIUNI_ARG(count),\
414:       MPIUNI_ARG(datatype),\
415:       MPIUNI_ARG(dest),\
416:       MPIUNI_ARG(tag),\
417:       MPIUNI_ARG(comm),\
418:       MPIUni_Abort(MPI_COMM_WORLD,0))
419: #define MPI_Buffer_attach(buffer,size) \
420:      (MPIUNI_ARG(buffer),\
421:       MPIUNI_ARG(size),\
422:       MPI_SUCCESS)
423: #define MPI_Buffer_detach(buffer,size)\
424:      (MPIUNI_ARG(buffer),\
425:       MPIUNI_ARG(size),\
426:       MPI_SUCCESS)
427: #define MPI_Ibsend(buf,count,datatype,dest,tag,comm,request) \
428:      (MPIUNI_ARG(buf),\
429:       MPIUNI_ARG(count),\
430:       MPIUNI_ARG(datatype),\
431:       MPIUNI_ARG(dest),\
432:       MPIUNI_ARG(tag),\
433:       MPIUNI_ARG(comm),\
434:       MPIUNI_ARG(request),\
435:       MPIUni_Abort(MPI_COMM_WORLD,0))
436: #define MPI_Issend(buf,count,datatype,dest,tag,comm,request) \
437:      (MPIUNI_ARG(buf),\
438:       MPIUNI_ARG(count),\
439:       MPIUNI_ARG(datatype),\
440:       MPIUNI_ARG(dest),\
441:       MPIUNI_ARG(tag),\
442:       MPIUNI_ARG(comm),\
443:       MPIUNI_ARG(request),\
444:       MPIUni_Abort(MPI_COMM_WORLD,0))
445: #define MPI_Irsend(buf,count,datatype,dest,tag,comm,request) \
446:      (MPIUNI_ARG(buf),\
447:       MPIUNI_ARG(count),\
448:       MPIUNI_ARG(datatype),\
449:       MPIUNI_ARG(dest),\
450:       MPIUNI_ARG(tag),\
451:       MPIUNI_ARG(comm),\
452:       MPIUNI_ARG(request),\
453:       MPIUni_Abort(MPI_COMM_WORLD,0))
454: #define MPI_Irecv(buf,count,datatype,source,tag,comm,request) \
455:      (MPIUNI_ARG(buf),\
456:       MPIUNI_ARG(count),\
457:       MPIUNI_ARG(datatype),\
458:       MPIUNI_ARG(source),\
459:       MPIUNI_ARG(tag),\
460:       MPIUNI_ARG(comm),\
461:       MPIUNI_ARG(request),\
462:       MPIUni_Abort(MPI_COMM_WORLD,0))
463: #define MPI_Isend(buf,count,datatype,dest,tag,comm,request) \
464:      (MPIUNI_ARG(buf),\
465:       MPIUNI_ARG(count),\
466:       MPIUNI_ARG(datatype),\
467:       MPIUNI_ARG(dest),\
468:       MPIUNI_ARG(tag),\
469:       MPIUNI_ARG(comm),\
470:       MPIUNI_ARG(request),\
471:       MPIUni_Abort(MPI_COMM_WORLD,0))
472: #define MPI_Wait(request,status) \
473:      (MPIUNI_ARG(request),\
474:       MPIUNI_ARG(status),\
475:       MPI_SUCCESS)
476: #define MPI_Test(request,flag,status) \
477:      (MPIUNI_ARG(request),\
478:       MPIUNI_ARG(status),\
479:       *(flag) = 0,\
480:       MPI_SUCCESS)
481: #define MPI_Request_free(request) \
482:      (MPIUNI_ARG(request),\
483:       MPI_SUCCESS)
484: #define MPI_Waitany(count,array_of_requests,index,status) \
485:      (MPIUNI_ARG(count),\
486:       MPIUNI_ARG(array_of_requests),\
487:       MPIUNI_ARG(status),\
488:       (*(status)).MPI_SOURCE = 0,               \
489:       *(index) = 0,\
490:       MPI_SUCCESS)
491: #define MPI_Testany(a,b,c,d,e) \
492:      (MPIUNI_ARG(a),\
493:       MPIUNI_ARG(b),\
494:       MPIUNI_ARG(c),\
495:       MPIUNI_ARG(d),\
496:       MPIUNI_ARG(e),\
497:       MPI_SUCCESS)
498: #define MPI_Waitall(count,array_of_requests,array_of_statuses) \
499:      (MPIUNI_ARG(count),\
500:       MPIUNI_ARG(array_of_requests),\
501:       MPIUNI_ARG(array_of_statuses),\
502:       MPI_SUCCESS)
503: #define MPI_Testall(count,array_of_requests,flag,array_of_statuses) \
504:      (MPIUNI_ARG(count),\
505:       MPIUNI_ARG(array_of_requests),\
506:       MPIUNI_ARG(flag),\
507:       MPIUNI_ARG(array_of_statuses),\
508:       MPI_SUCCESS)
509: #define MPI_Waitsome(incount,array_of_requests,outcount,\
510:                      array_of_indices,array_of_statuses)        \
511:      (MPIUNI_ARG(incount),\
512:       MPIUNI_ARG(array_of_requests),\
513:       MPIUNI_ARG(outcount),\
514:       MPIUNI_ARG(array_of_indices),\
515:       MPIUNI_ARG(array_of_statuses),\
516:       MPI_SUCCESS)
517: #define MPI_Comm_group(comm,group) \
518:      (MPIUNI_ARG(comm),\
519:       *group = 1,\
520:       MPI_SUCCESS)
521: #define MPI_Group_incl(group,n,ranks,newgroup) \
522:      (MPIUNI_ARG(group),\
523:       MPIUNI_ARG(n),\
524:       MPIUNI_ARG(ranks),\
525:       MPIUNI_ARG(newgroup),\
526:       MPI_SUCCESS)
527: #define MPI_Testsome(incount,array_of_requests,outcount,\
528:                      array_of_indices,array_of_statuses) \
529:      (MPIUNI_ARG(incount),\
530:       MPIUNI_ARG(array_of_requests),\
531:       MPIUNI_ARG(outcount),\
532:       MPIUNI_ARG(array_of_indices),\
533:       MPIUNI_ARG(array_of_statuses),\
534:       MPI_SUCCESS)
535: #define MPI_Iprobe(source,tag,comm,flag,status) \
536:      (MPIUNI_ARG(source),\
537:       MPIUNI_ARG(tag),\
538:       MPIUNI_ARG(comm),\
539:       *(flag)=0,\
540:       MPIUNI_ARG(status),\
541:       MPI_SUCCESS)
542: #define MPI_Probe(source,tag,comm,status) \
543:      (MPIUNI_ARG(source),\
544:       MPIUNI_ARG(tag),\
545:       MPIUNI_ARG(comm),\
546:       MPIUNI_ARG(status),\
547:       MPI_SUCCESS)
548: #define MPI_Cancel(request) \
549:      (MPIUNI_ARG(request),\
550:       MPI_SUCCESS)
551: #define MPI_Test_cancelled(status,flag) \
552:      (MPIUNI_ARG(status),\
553:       *(flag)=0,\
554:       MPI_SUCCESS)
555: #define MPI_Send_init(buf,count,datatype,dest,tag,comm,request) \
556:      (MPIUNI_ARG(buf),\
557:       MPIUNI_ARG(count),\
558:       MPIUNI_ARG(datatype),\
559:       MPIUNI_ARG(dest),\
560:       MPIUNI_ARG(tag),\
561:       MPIUNI_ARG(comm),\
562:       MPIUNI_ARG(request),\
563:       MPI_SUCCESS)
564: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
565:      (MPIUNI_ARG(buf),\
566:       MPIUNI_ARG(count),\
567:       MPIUNI_ARG(datatype),\
568:       MPIUNI_ARG(dest),\
569:       MPIUNI_ARG(tag),\
570:       MPIUNI_ARG(comm),\
571:       MPIUNI_ARG(request),\
572:       MPI_SUCCESS)
573: #define MPI_Ssend_init(buf,count,datatype,dest,tag,comm,request) \
574:      (MPIUNI_ARG(buf),\
575:       MPIUNI_ARG(count),\
576:       MPIUNI_ARG(datatype),\
577:       MPIUNI_ARG(dest),\
578:       MPIUNI_ARG(tag),\
579:       MPIUNI_ARG(comm),\
580:       MPIUNI_ARG(request),\
581:       MPI_SUCCESS)
582: #define MPI_Bsend_init(buf,count,datatype,dest,tag,comm,request) \
583:      (MPIUNI_ARG(buf),\
584:       MPIUNI_ARG(count),\
585:       MPIUNI_ARG(datatype),\
586:       MPIUNI_ARG(dest),\
587:       MPIUNI_ARG(tag),\
588:       MPIUNI_ARG(comm),\
589:       MPIUNI_ARG(request),\
590:       MPI_SUCCESS)
591: #define MPI_Rsend_init(buf,count,datatype,dest,tag,comm,request) \
592:      (MPIUNI_ARG(buf),\
593:       MPIUNI_ARG(count),\
594:       MPIUNI_ARG(datatype),\
595:       MPIUNI_ARG(dest),\
596:       MPIUNI_ARG(tag),\
597:       MPIUNI_ARG(comm),\
598:       MPIUNI_ARG(request),\
599:       MPI_SUCCESS)
600: #define MPI_Recv_init(buf,count,datatype,source,tag,comm,request) \
601:      (MPIUNI_ARG(buf),\
602:       MPIUNI_ARG(count),\
603:       MPIUNI_ARG(datatype),\
604:       MPIUNI_ARG(source),\
605:       MPIUNI_ARG(tag),\
606:       MPIUNI_ARG(comm),\
607:       MPIUNI_ARG(request),\
608:       MPI_SUCCESS)
609: #define MPI_Start(request) \
610:      (MPIUNI_ARG(request),\
611:       MPI_SUCCESS)
612: #define MPI_Startall(count,array_of_requests) \
613:      (MPIUNI_ARG(count),\
614:       MPIUNI_ARG(array_of_requests),\
615:       MPI_SUCCESS)
616: #define MPI_Sendrecv(sendbuf,sendcount,sendtype,\
617:                      dest,sendtag,recvbuf,recvcount,\
618:                      recvtype,source,recvtag,\
619:                      comm,status) \
620:      (MPIUNI_ARG(dest),\
621:       MPIUNI_ARG(sendtag),\
622:       MPIUNI_ARG(recvcount),\
623:       MPIUNI_ARG(recvtype),\
624:       MPIUNI_ARG(source),\
625:       MPIUNI_ARG(recvtag),\
626:       MPIUNI_ARG(comm),\
627:       MPIUNI_ARG(status),\
628:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
629: #define MPI_Sendrecv_replace(buf,count,datatype,dest,sendtag,\
630:                              source,recvtag,comm,status) \
631:      (MPIUNI_ARG(buf),\
632:       MPIUNI_ARG(count),\
633:       MPIUNI_ARG(datatype),\
634:       MPIUNI_ARG(dest),\
635:       MPIUNI_ARG(sendtag),\
636:       MPIUNI_ARG(source),\
637:       MPIUNI_ARG(recvtag),\
638:       MPIUNI_ARG(comm),\
639:       MPIUNI_ARG(status),\
640:       MPI_SUCCESS)

642: #define MPI_COMBINER_NAMED      0
643: #define MPI_COMBINER_DUP        1
644: #define MPI_COMBINER_CONTIGUOUS 2
645:   /* 32-bit packing scheme: [combiner:4 | type-index:8 | count:12 | base-bytes:8] */
646: #define MPI_Type_dup(oldtype,newtype) \
647:      (*(newtype) = oldtype, MPI_SUCCESS)
648: #define MPI_Type_contiguous(count,oldtype,newtype) \
649:      (*(newtype) = (MPI_COMBINER_CONTIGUOUS<<28)|((oldtype)&0x0ff00000)|(((oldtype)>>8&0xfff)*(count))<<8|((oldtype)&0xff), MPI_SUCCESS)
650: #define MPI_Type_vector(count,blocklength,stride,oldtype,newtype) \
651:      (MPIUNI_ARG(count),\
652:       MPIUNI_ARG(blocklength),\
653:       MPIUNI_ARG(stride),\
654:       MPIUNI_ARG(oldtype),\
655:       MPIUNI_ARG(newtype),\
656:       MPIUni_Abort(MPI_COMM_WORLD,0))
657: #define MPI_Type_hvector(count,blocklength,stride,oldtype,newtype) \
658:      (MPIUNI_ARG(count),\
659:       MPIUNI_ARG(blocklength),\
660:       MPIUNI_ARG(stride),\
661:       MPIUNI_ARG(oldtype),\
662:       MPIUNI_ARG(newtype),\
663:       MPIUni_Abort(MPI_COMM_WORLD,0))
664: #define MPI_Type_indexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
665:      (MPIUNI_ARG(count),\
666:       MPIUNI_ARG(array_of_blocklengths),\
667:       MPIUNI_ARG(array_of_displacements),\
668:       MPIUNI_ARG(oldtype),\
669:       MPIUNI_ARG(newtype),\
670:       MPIUni_Abort(MPI_COMM_WORLD,0))
671: #define MPI_Type_hindexed(count,array_of_blocklengths,array_of_displacements,oldtype,newtype) \
672:      (MPIUNI_ARG(count),\
673:       MPIUNI_ARG(array_of_blocklengths),\
674:       MPIUNI_ARG(array_of_displacements),\
675:       MPIUNI_ARG(oldtype),\
676:       MPIUNI_ARG(newtype),\
677:       MPIUni_Abort(MPI_COMM_WORLD,0))
678: #define MPI_Type_struct(count,array_of_blocklengths,array_of_displacements,array_of_types,newtype) \
679:      (MPIUNI_ARG(count),\
680:       MPIUNI_ARG(array_of_blocklengths),\
681:       MPIUNI_ARG(array_of_displacements),\
682:       MPIUNI_ARG(array_of_types),\
683:       MPIUNI_ARG(newtype),\
684:       MPIUni_Abort(MPI_COMM_WORLD,0))
685: #define MPI_Address(location,address) \
686:      (*(address) = (MPI_Aint)((char *)(location)), MPI_SUCCESS)
687: #define MPI_Type_size(datatype,size) (*(size) = MPI_sizeof((datatype)), MPI_SUCCESS)
688: #define MPI_Type_lb(datatype,lb) (MPIUNI_ARG(datatype), *(lb) = 0, MPI_SUCCESS)
689: #define MPI_Type_ub(datatype,ub) (*(ub) = MPI_sizeof((datatype)), MPI_SUCCESS)
690: #define MPI_Type_extent(datatype,extent) \
691:      (*(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
692: #define MPI_Type_get_extent(datatype,lb,extent) \
693:      (*(lb) = 0, *(extent) = MPI_sizeof((datatype)), MPI_SUCCESS)
694: #define MPI_Type_commit(datatype) (MPIUNI_ARG(datatype), MPI_SUCCESS)
695: #define MPI_Type_free(datatype) (*(datatype) = MPI_DATATYPE_NULL, MPI_SUCCESS)
696: #define MPI_Get_elements(status,datatype,count) \
697:      (MPIUNI_ARG(status),\
698:       MPIUNI_ARG(datatype),\
699:       MPIUNI_ARG(count),\
700:       MPIUni_Abort(MPI_COMM_WORLD,0))
701: #define MPI_Pack(inbuf,incount,datatype,outbuf,outsize,position,comm) \
702:      (MPIUNI_ARG(inbuf),\
703:       MPIUNI_ARG(incount),\
704:       MPIUNI_ARG(datatype),\
705:       MPIUNI_ARG(outbuf),\
706:       MPIUNI_ARG(outsize),\
707:       MPIUNI_ARG(position),\
708:       MPIUNI_ARG(comm),\
709:       MPIUni_Abort(MPI_COMM_WORLD,0))
710: #define MPI_Unpack(inbuf,insize,position,outbuf,outcount,datatype,comm) \
711:      (MPIUNI_ARG(inbuf),\
712:       MPIUNI_ARG(insize),\
713:       MPIUNI_ARG(position),\
714:       MPIUNI_ARG(outbuf),\
715:       MPIUNI_ARG(outcount),\
716:       MPIUNI_ARG(datatype),\
717:       MPIUNI_ARG(comm),\
718:       MPIUni_Abort(MPI_COMM_WORLD,0))
719: #define MPI_Pack_size(incount,datatype,comm,size) \
720:      (MPIUNI_ARG(incount),\
721:       MPIUNI_ARG(datatype),\
722:       MPIUNI_ARG(comm),\
723:       MPIUNI_ARG(size),\
724:       MPIUni_Abort(MPI_COMM_WORLD,0))
725: #define MPI_Barrier(comm) \
726:      (MPIUNI_ARG(comm),\
727:       MPI_SUCCESS)
728: #define MPI_Bcast(buffer,count,datatype,root,comm) \
729:      (MPIUNI_ARG(buffer),\
730:       MPIUNI_ARG(count),\
731:       MPIUNI_ARG(datatype),\
732:       MPIUNI_ARG(root),\
733:       MPIUNI_ARG(comm),\
734:       MPI_SUCCESS)
735: #define MPI_Gather(sendbuf,sendcount,sendtype,\
736:                    recvbuf,recvcount, recvtype,\
737:                    root,comm) \
738:      (MPIUNI_ARG(recvcount),\
739:       MPIUNI_ARG(root),\
740:       MPIUNI_ARG(recvtype),\
741:       MPIUNI_ARG(comm),\
742:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
743: #define MPI_Gatherv(sendbuf,sendcount,sendtype,\
744:                     recvbuf,recvcounts,displs,\
745:                     recvtype,root,comm) \
746:      (MPIUNI_ARG(recvcounts),\
747:       MPIUNI_ARG(displs),\
748:       MPIUNI_ARG(recvtype),\
749:       MPIUNI_ARG(root),\
750:       MPIUNI_ARG(comm),\
751:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
752: #define MPI_Scatter(sendbuf,sendcount,sendtype,\
753:                     recvbuf,recvcount,recvtype,\
754:                     root,comm) \
755:      (MPIUNI_ARG(sendcount),\
756:       MPIUNI_ARG(sendtype),\
757:       MPIUNI_ARG(recvbuf),\
758:       MPIUNI_ARG(recvtype),\
759:       MPIUNI_ARG(root),\
760:       MPIUNI_ARG(comm),\
761:       MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
762: #define MPI_Scatterv(sendbuf,sendcounts,displs,\
763:                      sendtype,recvbuf,recvcount,\
764:                      recvtype,root,comm) \
765:      (MPIUNI_ARG(displs),\
766:       MPIUNI_ARG(sendtype),\
767:       MPIUNI_ARG(sendcounts),\
768:       MPIUNI_ARG(root),\
769:       MPIUNI_ARG(comm),\
770:       MPIUNI_Memcpy(recvbuf,sendbuf,(recvcount)*MPI_sizeof(recvtype)))
771: #define MPI_Allgather(sendbuf,sendcount,sendtype,\
772:                      recvbuf,recvcount,recvtype,comm) \
773:      (MPIUNI_ARG(recvcount),\
774:       MPIUNI_ARG(recvtype),\
775:       MPIUNI_ARG(comm),\
776:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
777: #define MPI_Allgatherv(sendbuf,sendcount,sendtype,\
778:      recvbuf,recvcounts,displs,recvtype,comm) \
779:      (MPIUNI_ARG(recvcounts),\
780:       MPIUNI_ARG(displs),\
781:       MPIUNI_ARG(recvtype),\
782:       MPIUNI_ARG(comm),\
783:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
784: #define MPI_Alltoall(sendbuf,sendcount,sendtype,\
785:                      recvbuf,recvcount,recvtype,comm) \
786:      (MPIUNI_ARG(recvcount),\
787:       MPIUNI_ARG(recvtype),\
788:       MPIUNI_ARG(comm),\
789:       MPIUNI_Memcpy(recvbuf,sendbuf,(sendcount)*MPI_sizeof(sendtype)))
790: #define MPI_Alltoallv(sendbuf,sendcounts,sdispls,sendtype,\
791:                       recvbuf,recvcounts,rdispls,recvtype,comm) \
792:      (MPIUNI_ARG(sendbuf),\
793:       MPIUNI_ARG(sendcounts),\
794:       MPIUNI_ARG(sdispls),\
795:       MPIUNI_ARG(sendtype),\
796:       MPIUNI_ARG(recvbuf),\
797:       MPIUNI_ARG(recvcounts),\
798:       MPIUNI_ARG(rdispls),\
799:       MPIUNI_ARG(recvtype),\
800:       MPIUNI_ARG(comm),\
801:       MPIUni_Abort(MPI_COMM_WORLD,0))
802: #define MPI_Alltoallw(sendbuf,sendcounts,sdispls,sendtypes,\
803:                       recvbuf,recvcounts,rdispls,recvtypes,comm) \
804:      (MPIUNI_ARG(sendbuf),\
805:       MPIUNI_ARG(sendcounts),\
806:       MPIUNI_ARG(sdispls),\
807:       MPIUNI_ARG(sendtypes),\
808:       MPIUNI_ARG(recvbuf),\
809:       MPIUNI_ARG(recvcount),\
810:       MPIUNI_ARG(rdispls),\
811:       MPIUNI_ARG(recvtypes),\
812:       MPIUNI_ARG(comm),\
813:       MPIUni_Abort(MPI_COMM_WORLD,0))
814: #define MPI_Reduce(sendbuf,recvbuf,count,datatype,op,root,comm) \
815:      (MPIUNI_ARG(op),\
816:       MPIUNI_ARG(root),\
817:       MPIUNI_ARG(comm),\
818:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
819: #define MPI_Allreduce(sendbuf, recvbuf,count,datatype,op,comm) \
820:      (MPIUNI_ARG(op),\
821:       MPIUNI_ARG(comm),\
822:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
823: #define MPI_Scan(sendbuf, recvbuf,count,datatype,op,comm) \
824:      (MPIUNI_ARG(op),\
825:       MPIUNI_ARG(comm),\
826:       MPIUNI_Memcpy(recvbuf,sendbuf,(count)*MPI_sizeof(datatype)))
827: #define MPI_Exscan(sendbuf,recvbuf,count,datatype,op,comm) \
828:      (MPIUNI_ARG(sendbuf),\
829:       MPIUNI_ARG(recvbuf),\
830:       MPIUNI_ARG(count),\
831:       MPIUNI_ARG(datatype),\
832:       MPIUNI_ARG(op),\
833:       MPIUNI_ARG(comm),\
834:       MPI_SUCCESS)
835: #define MPI_Reduce_scatter(sendbuf,recvbuf,recvcounts,datatype,op,comm) \
836:      (MPIUNI_ARG(op),\
837:       MPIUNI_ARG(comm),\
838:       MPIUNI_Memcpy(recvbuf,sendbuf,(*recvcounts)*MPI_sizeof(datatype)))
839: #define MPI_Op_create(function,commute,op) \
840:      (MPIUNI_ARG(function),\
841:       MPIUNI_ARG(commute),\
842:       MPIUNI_ARG(op),\
843:       MPI_SUCCESS)
844: #define MPI_Op_free(op) \
845:      (*(op) = MPI_OP_NULL, MPI_SUCCESS)

847: #define MPI_Group_size(group,size) \
848:   (MPIUNI_ARG(group),\
849:    *(size)=1,\
850:    MPI_SUCCESS)
851: #define MPI_Group_rank(group,rank) \
852:   (MPIUNI_ARG(group),\
853:    *(rank)=0,\
854:    MPI_SUCCESS)
855: #define MPI_Group_translate_ranks(group1,n,ranks1,group2,ranks2) \
856:      (MPIUNI_ARG(group1),\
857:       MPIUNI_ARG(group2),\
858:       MPIUNI_Memcpy((ranks2),(ranks1),(n)*sizeof(int)))
859: #define MPI_Group_compare(group1,group2,result) \
860:     (MPIUNI_ARG(group1),\
861:      MPIUNI_ARG(group2),\
862:      *(result)=1,\
863:      MPI_SUCCESS)
864: #define MPI_Group_union(group1,group2,newgroup) MPI_SUCCESS
865: #define MPI_Group_intersection(group1,group2,newgroup) MPI_SUCCESS
866: #define MPI_Group_difference(group1,group2,newgroup) MPI_SUCCESS
867: #define MPI_Group_excl(group,n,ranks,newgroup) MPI_SUCCESS
868: #define MPI_Group_range_incl(group,n,ranges,newgroup) MPI_SUCCESS
869: #define MPI_Group_range_excl(group,n,ranges,newgroup) MPI_SUCCESS
870: #define MPI_Group_free(group) \
871:      (*(group) = MPI_GROUP_NULL, MPI_SUCCESS)

873: #define MPI_Comm_compare(comm1,comm2,result) \
874:      (MPIUNI_ARG(comm1),\
875:       MPIUNI_ARG(comm2),\
876:       *(result)=MPI_IDENT,\
877:       MPI_SUCCESS)
878: #define MPI_Comm_split(comm,color,key,newcomm) \
879:      (MPIUNI_ARG(color),\
880:       MPIUNI_ARG(key),\
881:       MPI_Comm_dup(comm,newcomm))
882: #define MPI_Comm_split_type(comm,color,key,info,newcomm) \
883:      (MPIUNI_ARG(color),\
884:       MPIUNI_ARG(key),\
885:       MPIUNI_ARG(info),\
886:       MPI_Comm_dup(comm,newcomm))
887: #define MPI_Comm_test_inter(comm,flag) (*(flag)=1, MPI_SUCCESS)
888: #define MPI_Comm_remote_size(comm,size) (*(size)=1 ,MPI_SUCCESS)
889: #define MPI_Comm_remote_group(comm,group) MPI_SUCCESS
890: #define MPI_Intercomm_create(local_comm,local_leader,peer_comm,\
891:      remote_leader,tag,newintercomm) MPI_SUCCESS
892: #define MPI_Intercomm_merge(intercomm,high,newintracomm) MPI_SUCCESS
893: #define MPI_Topo_test(comm,flag) MPI_SUCCESS
894: #define MPI_Cart_create(comm_old,ndims,dims,periods,\
895:      reorder,comm_cart) MPIUni_Abort(MPI_COMM_WORLD,0)
896: #define MPI_Dims_create(nnodes,ndims,dims) MPIUni_Abort(MPI_COMM_WORLD,0)
897: #define MPI_Graph_create(comm,a,b,c,d,e) MPIUni_Abort(MPI_COMM_WORLD,0)
898: #define MPI_Graphdims_Get(comm,nnodes,nedges) MPIUni_Abort(MPI_COMM_WORLD,0)
899: #define MPI_Graph_get(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)
900: #define MPI_Cartdim_get(comm,ndims) MPIUni_Abort(MPI_COMM_WORLD,0)
901: #define MPI_Cart_get(comm,maxdims,dims,periods,coords) \
902:      MPIUni_Abort(MPI_COMM_WORLD,0)
903: #define MPI_Cart_rank(comm,coords,rank) MPIUni_Abort(MPI_COMM_WORLD,0)
904: #define MPI_Cart_coords(comm,rank,maxdims,coords) \
905:      MPIUni_Abort(MPI_COMM_WORLD,0)
906: #define MPI_Graph_neighbors_count(comm,rank,nneighbors) \
907:      MPIUni_Abort(MPI_COMM_WORLD,0)
908: #define MPI_Graph_neighbors(comm,rank,maxneighbors,neighbors) \
909:      MPIUni_Abort(MPI_COMM_WORLD,0)
910: #define MPI_Cart_shift(comm,direction,disp,rank_source,rank_dest) \
911:      MPIUni_Abort(MPI_COMM_WORLD,0)
912: #define MPI_Cart_sub(comm,remain_dims,newcomm) MPIUni_Abort(MPI_COMM_WORLD,0)
913: #define MPI_Cart_map(comm,ndims,dims,periods,newrank) MPIUni_Abort(MPI_COMM_WORLD,0)
914: #define MPI_Graph_map(comm,a,b,c,d) MPIUni_Abort(MPI_COMM_WORLD,0)

916: #define MPI_Get_processor_name(name,result_len)                         \
917:      (*(result_len) = 9,MPIUNI_Memcpy(name,"localhost",10*sizeof(char)))
918: #define MPI_Errhandler_create(function,errhandler) \
919:      (MPIUNI_ARG(function),\
920:       *(errhandler) = MPI_ERRORS_RETURN,\
921:       MPI_SUCCESS)
922: #define MPI_Errhandler_set(comm,errhandler) \
923:      (MPIUNI_ARG(comm),\
924:       MPIUNI_ARG(errhandler),\
925:       MPI_SUCCESS)
926: #define MPI_Errhandler_get(comm,errhandler) \
927:      (MPIUNI_ARG(comm),\
928:       (*errhandler) = MPI_ERRORS_RETURN,\
929:       MPI_SUCCESS)
930: #define MPI_Errhandler_free(errhandler) \
931:      (*(errhandler) = MPI_ERRHANDLER_NULL,\
932:       MPI_SUCCESS)
933: #define MPI_Error_string(errorcode,string,result_len) \
934:      (MPIUNI_ARG(errorcode),\
935:       *(result_len) = 9,\
936:       MPIUNI_Memcpy(string,"MPI error",10*sizeof(char)))
937: #define MPI_Error_class(errorcode,errorclass) \
938:      (*(errorclass) = errorcode, MPI_SUCCESS)
939: #define MPI_Wtick() 1.0
940: #define MPI_Pcontrol(level) MPI_SUCCESS

942: /* MPI-IO additions */

944: typedef int MPI_File;
945: #define MPI_FILE_NULL 0

947: typedef int MPI_Offset;

949: #define MPI_MODE_RDONLY  0
950: #define MPI_MODE_WRONLY  0
951: #define MPI_MODE_CREATE  0

953: #define MPI_File_open(comm,filename,amode,info,mpi_fh) \
954:   (MPIUNI_ARG(comm),\
955:    MPIUNI_ARG(filename),\
956:    MPIUNI_ARG(amode),\
957:    MPIUNI_ARG(info),\
958:    MPIUNI_ARG(mpi_fh),\
959:    MPIUni_Abort(MPI_COMM_WORLD,0))

961: #define MPI_File_close(mpi_fh) \
962:   (MPIUNI_ARG(mpi_fh),\
963:    MPIUni_Abort(MPI_COMM_WORLD,0))

965: #define MPI_File_set_view(mpi_fh,disp,etype,filetype,datarep,info) \
966:   (MPIUNI_ARG(mpi_fh),\
967:    MPIUNI_ARG(disp),\
968:    MPIUNI_ARG(etype),\
969:    MPIUNI_ARG(filetype),\
970:    MPIUNI_ARG(datarep),\
971:    MPIUNI_ARG(info),\
972:    MPIUni_Abort(MPI_COMM_WORLD,0))

974: #define MPI_File_write_all(mpi_fh,buf,count,datatype,status) \
975:   (MPIUNI_ARG(mpi_fh),\
976:    MPIUNI_ARG(buf),\
977:    MPIUNI_ARG(count),\
978:    MPIUNI_ARG(datatype),\
979:    MPIUNI_ARG(status),\
980:    MPIUni_Abort(MPI_COMM_WORLD,0))

982: #define MPI_File_read_all(mpi_fh,buf,count,datatype,status) \
983:   (MPIUNI_ARG(mpi_fh),\
984:    MPIUNI_ARG(buf),\
985:    MPIUNI_ARG(count),\
986:    MPIUNI_ARG(datatype),\
987:    MPIUNI_ARG(status),\
988:    MPIUni_Abort(MPI_COMM_WORLD,0))

990:   /* called from PetscInitialize() - so return success */
991: #define MPI_Register_datarep(name,read_conv_fn,write_conv_fn,extent_fn,state) \
992:   (MPIUNI_ARG(name),\
993:    MPIUNI_ARG(read_conv_fn),\
994:    MPIUNI_ARG(write_conv_fn),\
995:    MPIUNI_ARG(extent_fn),\
996:    MPIUNI_ARG(state),\
997:    MPI_SUCCESS)

999: #define MPI_Type_create_subarray(ndims,array_of_sizes,array_of_subsizes,array_of_starts,order,oldtype,newtype) \
1000:   (MPIUNI_ARG(ndims),\
1001:    MPIUNI_ARG(array_of_sizes),\
1002:    MPIUNI_ARG(array_of_subsizes),\
1003:    MPIUNI_ARG(array_of_starts),\
1004:    MPIUNI_ARG(order),\
1005:    MPIUNI_ARG(oldtype),\
1006:    MPIUNI_ARG(newtype),\
1007:    MPIUni_Abort(MPI_COMM_WORLD,0))

1009: #define MPI_Type_create_resized(oldtype,lb,extent,newtype) \
1010:   (MPIUNI_ARG(oldtype),\
1011:    MPIUNI_ARG(lb),\
1012:    MPIUNI_ARG(extent),\
1013:    MPIUNI_ARG(newtype),\
1014:    MPIUni_Abort(MPI_COMM_WORLD,0))

1016: #define MPI_Type_create_indexed_block(count,blocklength,array_of_displacements,oldtype,newtype) \
1017:   (MPIUNI_ARG(count),\
1018:    MPIUNI_ARG(blocklength),\
1019:    MPIUNI_ARG(array_of_displacements),\
1020:    MPIUNI_ARG(oldtype),\
1021:    MPIUNI_ARG(newtype),\
1022:    MPIUni_Abort(MPI_COMM_WORLD,0))

1024: #if defined(__cplusplus)
1025: }
1026: #endif
1027: #endif