![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 ! PushParMeshIntoMoabF90: push parallel mesh into moab, F90 version
00002 !
00003 ! This program shows how to push a mesh into MOAB in parallel from Fortran90, with sufficient
00004 ! information to resolve boundary sharing and exchange a layer of ghost information.
00005 ! To successfully link this example, you need to specify FCFLAGS that include:
00006 ! a) -DMOAB_HAVE_MPI, and
00007 ! b) flags required to link Fortran90 MPI programs with the C++ compiler; these flags
00008 ! can often be found on your system by inspecting the output of 'mpif90 -show'
00009 ! For example, using gcc, the link line looks like:
00010 ! make MOAB_DIR= FCFLAGS="-DMOAB_HAVE_MPI -I/usr/lib/openmpi/include -pthread -I/usr/lib/openmpi/lib -L/usr/lib/openmpi/lib -lmpi_f90 -lmpi_f77 -lmpi -lopen-rte -lopen-pal -ldl -Wl,--export-dynamic -lnsl -lutil -lm -ldl" PushParMeshIntoMoabF90
00011 !
00012 ! Usage: PushParMeshIntoMoab
00013 #define ERROR(rval) if (0 .ne. rval) call exit(1)
00014
00015 program PushParMeshIntoMoab
00016
00017 use ISO_C_BINDING
00018 implicit none
00019
00020 #include "moab/MOABConfig.h"
00021 #ifdef MOAB_HAVE_MPI
00022 # include "mpif.h"
00023 # include "iMeshP_f.h"
00024 #else
00025 # include "iMesh_f.h"
00026 #endif
00027
00028 ! declarations
00029 ! imesh is the instance handle
00030 iMesh_Instance imesh
00031 ! NUMV, NUME, NVPERE are the hardwired here; these are for the whole mesh,
00032 ! local mesh determined later
00033 integer NUMV, NUME, NVPERE
00034 parameter (NUMV = 8) ! # vertices in whole mesh
00035 parameter (NUME = 6) ! # elements in whole mesh
00036 parameter (NVPERE = 4) ! # vertices per element
00037 ! ents, verts will be arrays storing vertex/entity handles
00038 iBase_EntityHandle, pointer :: ents(:), verts(:)
00039 iBase_EntitySetHandle root_set
00040 TYPE(C_PTR) :: vertsPtr, entsPtr
00041 ! storage for vertex positions, element connectivity indices, global vertex ids
00042 real*8 coords(0:3*NUMV-1)
00043 integer iconn(0:4*NUME-1), gids(0:NUMV-1)
00044 !
00045 ! local variables
00046 integer lgids(0:NUMV-1), lconn(0:4*NUME-1)
00047 real*8 lcoords(0:3*NUMV-1)
00048 integer lnumv, lvids(0:NUMV-1), gvids(0:NUMV-1)
00049 integer lvpe, ltp ! lvpe = # vertices per entity, ltp = element type
00050 integer ic, ie, iv, istart, iend, ierr, indv, lnume, rank, sz
00051
00052 #ifdef MOAB_HAVE_MPI
00053 ! local variables for parallel runs
00054 iMeshP_PartitionHandle imeshp
00055 ! integer MPI_COMM_WORLD
00056 #endif
00057
00058 ! vertex positions, cube; side 2
00059 ! (first index varying fastest)
00060 data coords / &
00061 -1., -1., -1, 1., -1., -1., 1., 1., -1., -1., 1., -1., &
00062 -1., -1., 1, 1., -1., 1., 1., 1., 1., -1., 1., 1. /
00063
00064 ! quad index numbering, each quad ccw, sides then bottom then top
00065 data iconn / &
00066 0, 1, 5, 4, &
00067 1, 2, 6, 5, &
00068 2, 3, 7, 6, &
00069 3, 0, 4, 7, &
00070 0, 3, 2, 1, &
00071 4, 5, 6, 7 /
00072
00073 data lvpe /4/ ! quads in this example
00074 data ltp / iMesh_QUADRILATERAL / ! from iBase_f.h
00075
00076 ! initialize global vertex ids
00077 do iv = 0, NUMV-1
00078 lgids(iv) = iv+1
00079 end do
00080
00081 #ifdef MOAB_HAVE_MPI
00082 ! init the parallel partition
00083 call MPI_INIT(ierr)
00084 call MPI_COMM_SIZE(MPI_COMM_WORLD, sz, ierr)
00085 call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr)
00086 ERROR(ierr)
00087 ! compute starting/ending element numbers
00088 lnume = NUME / sz
00089 istart = rank * lnume
00090 iend = istart + lnume - 1
00091 if (rank .eq. sz-1) then
00092 iend = NUME-1
00093 lnume = iend - istart + 1
00094 endif
00095 ! for my elements, figure out which vertices I use and accumulate local indices and coords
00096 ! lvids stores the local 0-based index for each vertex; -1 means vertex i isn't used locally
00097 ! also build up connectivity indices for local elements, in lconn
00098 do iv = 0, NUMV-1
00099 lvids(iv) = -1
00100 end do
00101 lnumv = -1
00102 do ie = istart, iend
00103 do iv = 0, lvpe-1
00104 indv = iconn(lvpe*ie + iv)
00105 if (lvids(indv) .eq. -1) then
00106 lnumv = lnumv + 1 ! increment local # verts
00107 do ic = 0, 2 ! cache local coords
00108 lcoords(3*lnumv+ic) = coords(3*indv+ic)
00109 end do
00110 lvids(indv) = lnumv
00111 gvids(lnumv) = 1+indv
00112 end if
00113 lconn(lvpe*(ie-istart)+iv) = lvids(indv)
00114 end do ! do iv
00115 end do ! do ie
00116
00117 lnumv = lnumv + 1
00118
00119 ! now create the mesh; this also initializes parallel sharing and ghost exchange
00120 imesh = 0
00121 imeshp = 0
00122 call create_mesh(imesh, imeshp, MPI_COMM_WORLD, lnumv, lnume, gvids, lvpe, ltp, lcoords, lconn, &
00123 vertsPtr, entsPtr, ierr)
00124 ERROR(ierr)
00125 call c_f_pointer(vertsPtr, verts, [lnumv])
00126 call c_f_pointer(entsPtr, ents, [lnume])
00127
00128 ! get/report number of vertices, elements
00129 call iMesh_getRootSet(%VAL(imesh), root_set, ierr)
00130 ERROR(ierr)
00131 iv = 0
00132 ie = 0
00133 call iMeshP_getNumOfTypeAll(%VAL(imesh), %VAL(imeshp), %VAL(root_set), %VAL(iBase_VERTEX), iv, ierr)
00134 ERROR(ierr)
00135 call iMeshP_getNumOfTypeAll(%VAL(imesh), %VAL(imeshp), %VAL(root_set), %VAL(iBase_FACE), ie, ierr)
00136 ERROR(ierr)
00137 if (rank .eq. 0) then
00138 write(0,*) "Number of vertices = ", iv
00139 write(0,*) "Number of entities = ", ie
00140 endif
00141
00142 ! from here, can use verts and ents as (1-based) arrays of entity handles for input to other iMesh functions
00143
00144 call MPI_FINALIZE(ierr)
00145 #else
00146 write(0, *) "compile with MPI for better experience\n"
00147 #endif
00148 stop
00149 end program PushParMeshIntoMoab
00150
00151 #ifdef MOAB_HAVE_MPI
00152 subroutine create_mesh( &
00153 ! interfaces
00154 imesh, imeshp, &
00155 ! input
00156 comm, numv, nume, vgids, nvpe, tp, posn, iconn, &
00157 ! output
00158 vertsPtr, entsPtr, ierr)
00159 !
00160 ! create a mesh with numv vertices and nume elements, with elements of type tp
00161 ! vertices have positions in posn (3 coordinates each, interleaved xyzxyz...), indexed from 0
00162 ! elements have nvpe vertices per entity, with connectivity indices stored in iconn, referencing
00163 ! vertices using 0-based indices; vertex and entity handles are output in arrays passed in
00164 !
00165 ! if imesh/imeshp are 0, imesh/imeshp are initialized in this subroutine
00166 !
00167
00168 use ISO_C_BINDING
00169 implicit none
00170
00171 # include "iMeshP_f.h"
00172 # include "mpif.h"
00173
00174 ! subroutine arguments
00175 iMesh_Instance imesh
00176 TYPE(C_PTR) :: vertsPtr, entsPtr
00177 integer numv, nume, nvpe, vgids(0:*), iconn(0:*), ierr, tp
00178 real*8 posn(0:*)
00179 #ifdef MOAB_HAVE_MPI
00180 iMeshP_PartitionHandle imeshp
00181 integer comm
00182 #endif
00183
00184 ! local variables
00185 integer comm_sz, comm_rank, numa, numo, iv, ie
00186 TYPE(C_PTR) :: statsPtr
00187 integer, allocatable, target :: stats(:)
00188 iBase_TagHandle tagh
00189 integer i
00190 iBase_EntityHandle, pointer :: verts(:), ents(:)
00191 iBase_EntityHandle, allocatable :: conn(:)
00192 iBase_EntitySetHandle root_set
00193 iBase_EntitySetHandle file_set
00194 #ifdef MOAB_HAVE_MPI
00195 IBASE_HANDLE_T mpi_comm_c
00196 TYPE(C_PTR) :: partsPtr
00197 iMeshP_PartHandle, pointer :: parts(:)
00198 iMeshP_PartHandle part
00199 integer partsa, partso
00200 character (len=10) filename
00201 #endif
00202
00203 ! create the Mesh instance
00204 if (imesh .eq. 0) then
00205 call iMesh_newMesh("MOAB", imesh, ierr)
00206 end if
00207
00208 #ifdef MOAB_HAVE_MPI
00209 if (imeshp .eq. 0) then
00210 call iMeshP_getCommunicator(%VAL(imesh), MPI_COMM_WORLD, mpi_comm_c, ierr)
00211 ERROR(ierr)
00212 call iMeshP_createPartitionAll(%VAL(imesh), %VAL(mpi_comm_c), imeshp, ierr)
00213 ERROR(ierr)
00214 call iMeshP_createPart(%VAL(imesh), %VAL(imeshp), part, ierr)
00215 ERROR(ierr)
00216 else
00217 partsa = 0
00218 call iMeshP_getLocalParts(%VAL(imesh), %VAL(imeshp), partsPtr, partsa, partso, ierr)
00219 ERROR(ierr)
00220 call c_f_pointer(partsPtr, parts, [partso])
00221 part = parts(1)
00222 end if
00223 call MPI_COMM_RANK(comm, comm_rank, ierr)
00224 ERROR(ierr)
00225 call MPI_COMM_SIZE(comm, comm_sz, ierr)
00226 ERROR(ierr)
00227 #endif
00228
00229 ! create the vertices, all in one call
00230 numa = 0
00231 call iMesh_createVtxArr(%VAL(imesh), %VAL(numv), %VAL(iBase_INTERLEAVED), posn, %VAL(3*numv), &
00232 vertsPtr, numa, numo, ierr)
00233 ERROR(ierr)
00234
00235 ! fill in the connectivity array, based on indexing from iconn
00236 allocate (conn(0:nvpe*nume-1))
00237 call c_f_pointer(vertsPtr, verts, [numv])
00238 do i = 0, nvpe*nume-1
00239 conn(i) = verts(1+iconn(i))
00240 end do
00241 ! create the elements
00242 numa = 0
00243 allocate(stats(0:nume-1))
00244 statsPtr = C_LOC(stats(0))
00245 call iMesh_createEntArr(%VAL(imesh), %VAL(tp), conn, %VAL(nvpe*nume), &
00246 entsPtr, numa, numo, statsPtr, numa, numo, ierr)
00247 deallocate(stats)
00248 deallocate(conn)
00249
00250 #ifdef MOAB_HAVE_MPI
00251 ! take care of parallel stuff
00252
00253 ! add entities to part, using iMesh
00254 call c_f_pointer(entsPtr, ents, [numo])
00255 call iMesh_addEntArrToSet(%VAL(imesh), ents, %VAL(numo), %VAL(part), ierr)
00256 ERROR(ierr)
00257 ! set global ids on vertices, needed for sharing between procs
00258 call iMesh_getTagHandle(%VAL(imesh), "GLOBAL_ID", tagh, ierr, %VAL(9))
00259 if (iBase_SUCCESS .ne. ierr) then
00260 ! didn't get handle, need to create the tag
00261 call iMesh_createTag(%VAL(imesh), "GLOBAL_ID", %VAL(iBase_INTEGER), tagh, ierr)
00262 ERROR(ierr)
00263 end if
00264 call iMesh_setIntArrData(%VAL(imesh), verts, %VAL(numv), %VAL(tagh), vgids, %VAL(numv), ierr)
00265 ERROR(ierr)
00266 ! now resolve shared verts and exchange ghost cells
00267 call iMeshP_syncMeshAll(%VAL(imesh), %VAL(imeshp), ierr)
00268 ERROR(ierr)
00269
00270 call iMeshP_createGhostEntsAll(%VAL(imesh), %VAL(imeshp), %VAL(2), %VAL(1), %VAL(1), %VAL(0), ierr)
00271 ERROR(ierr)
00272
00273 #endif
00274
00275 return
00276 end subroutine create_mesh
00277 #endif