Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
MigrateMesh.F90
Go to the documentation of this file.
00001 
00002 program MigrateMesh
00003   implicit none
00004 
00005 #include "moab/MOABConfig.h"
00006 #ifdef MOAB_HAVE_MPI
00007 #  include "mpif.h"
00008 #else
00009 #  error "enable parallel build"
00010 #endif
00011 
00012 !#define NONOVERLAP
00013 
00014     ! init the parallel partition
00015     integer ierr, sz, rank, i
00016     integer  newComm
00017     integer gcomm, comm1, comm2
00018     integer pid1, pid2 !  this is for physics ids
00019     integer compid1, compid2  !  component ids are unique over all pes, and established in
00020                               !  advance;
00021     integer nghlay ! number of ghost layers for loading
00022     integer groupTasks(9)   !   run on at most 9 processes
00023     integer startG1, startG2, endG1, endG2    !   start and end for group tasks, for creation
00024     integer sizeG1, sizeG2           !   size of the group that gets created
00025     character*10 appname
00026     character*132 readopts
00027     character*132 filename
00028     character*132 outfile
00029     character*132 wopts
00030     integer allgroup, group1, group2 ! Corresponding to MPI_Group in C
00031     integer tagcomm1, tagcomm2
00032     integer iMOAB_InitializeFortran, iMOAB_RegisterFortranApplication
00033     integer iMOAB_LoadMesh, iMOAB_SendMesh, iMOAB_ReceiveMesh, iMOAB_WriteMesh
00034     integer iMOAB_FreeSenderBuffers
00035     integer iMOAB_DeregisterApplication, iMOAB_Finalize
00036     integer repart_scheme , context_id
00037 
00038     call MPI_INIT(ierr)
00039     call MPI_Comm_dup(MPI_COMM_WORLD, gcomm, ierr)
00040     call MPI_COMM_SIZE(gcomm, sz, ierr)
00041     call MPI_COMM_RANK(gcomm, rank, ierr)
00042     if (rank .eq. 0) print *, "size:", sz
00043     call errorout(ierr, 'cannot get rank' )
00044     if ( (0 .eq. rank) .and.  (sz>9)  ) then
00045       print *, "size is " , sz, ". run on at most 9 tasks "
00046       call exit(1)
00047     endif
00048     ! create 2 overlapping groups, for generality
00049     ! create communicators for each group; 
00050     ! one group will represent the sender, the other group the receiver
00051     ! about one third of tasks will be on group 1 only,  and one fourth will be on group 2 only
00052     !  about (1-1./3 -1./4) will be overlapping, these tasks will be common to both groups
00053     ! the mesh will be read on the sender comm, will be sent to receiver comm 
00054 
00055     ! create new MPI groups for processors 1/3*sz, 1/3*sz+1, ..., sz-1 (group 1) and 0, 1, .., 3/4*sz-1 (group 2)
00056     
00057 
00058     call MPI_COMM_GROUP (gcomm, allgroup, ierr)
00059     call errorout(ierr, 'cannot get world group' )
00060     ! first group, sz/3 to sz-1
00061     startG1 = sz/2
00062     endG1 = sz-1
00063     sizeG1 = endG1 - startG1 + 1
00064     ! used for new API in iMOAB, for tag migrate, release buffers
00065     context_id = -1
00066     
00067     do i=1, sizeG1
00068       groupTasks (i) = startG1+i-1
00069     end do 
00070 
00071     call MPI_Group_incl(allgroup, sizeG1, groupTasks, group1, ierr)
00072     call errorout(ierr, 'cannot create group 1' )
00073 
00074     ! second group, 0, 1, 3/4*sz
00075     startG2 = 0
00076     endG2 = 3*sz/4 -1
00077     if (endG2 <0) endG2 = 0  !  so it will work even for 1 task
00078 #ifdef NONOVERLAP
00079     endG2 = startG1-1
00080 #endif
00081     sizeG2 = endG2 - startG2 + 1
00082     do i=1, sizeG2
00083       groupTasks(i) = startG2+i-1
00084     enddo 
00085     
00086     call MPI_Group_incl(allgroup, sizeG2, groupTasks, group2, ierr)
00087     call errorout(ierr, 'cannot create group 2' )
00088 
00089     if ( (0 .eq. rank) ) then
00090       print *, "group 1 tasks: ", (i, i=startG1, endG1)
00091       print *, "group 2 tasks: ", (i, i=startG2, endG2)
00092     endif
00093     ! now create both communicators
00094     !  when we are not on tasks in the communicator, the MPI_Comm created will be null
00095     tagcomm1 = 1
00096     call MPI_Comm_create_group(gcomm, group1, tagcomm1, comm1, ierr)
00097     call errorout(ierr, 'cannot create communicator 1' )
00098 
00099     tagcomm2 = 2
00100     call MPI_Comm_create_group(gcomm, group2, tagcomm2, comm2, ierr)
00101     call errorout(ierr, 'cannot create communicator 2' )
00102 
00103 
00104     ierr = iMOAB_InitializeFortran()
00105 
00106     repart_scheme = 0 !  this is for trivial partitioning
00107 #ifdef MOAB_HAVE_ZOLTAN
00108     repart_scheme = 1 !  use the graph partitioner in that case
00109 #endif
00110     ! give some dummy values to component ids, just to differentiate between them
00111     ! the par comm graph is unique between components
00112     compid1 = 4
00113     compid2 = 7
00114     call errorout(ierr, 'did not initialize fortran' )
00115     if (rank == 0) print *, "initialize iMOAB fortran applications"
00116 
00117     if (comm1 /= MPI_COMM_NULL) then
00118        appname='phis1'//CHAR(0)
00119        ierr = iMOAB_RegisterFortranApplication(trim(appname), comm1, compid1, pid1)
00120        print *, ' register ', appname, " on rank ", rank, " pid1 ", pid1
00121     endif
00122     if (comm2 /= MPI_COMM_NULL) then
00123        appname = 'phis2'//CHAR(0)
00124        ierr = iMOAB_RegisterFortranApplication(trim(appname), comm2, compid2, pid2)
00125        print *, ' register ', appname, " on rank ", rank, " pid2 ", pid2
00126     endif
00127     
00128 
00129     if (comm1 /= MPI_COMM_NULL) then
00130        filename = 'spherecube.h5m'//CHAR(0)
00131        readopts = 'PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS'//CHAR(0)
00132        if (rank .eq. sz-2 ) print *, "loading " , trim(filename) , " with options " , trim(readopts)
00133        nghlay = 0
00134 
00135        ierr = iMOAB_LoadMesh(pid1, trim(filename), trim(readopts), nghlay)
00136        if (rank .eq. sz-1 ) print *, "loaded in parallel ", trim(filename), " error: ", ierr
00137        ierr = iMOAB_SendMesh(pid1, gcomm, group2, compid2, repart_scheme); ! send to component 2
00138        call errorout(ierr, 'cannot send elements' )
00139     endif
00140 
00141     if (comm2 /= MPI_COMM_NULL) then
00142        ierr = iMOAB_ReceiveMesh(pid2, gcomm, group1, compid1); ! receive from component 1
00143        call errorout(ierr, 'cannot receive elements' )
00144     endif
00145 
00146     ! we can now free the sender buffers
00147     if (comm1 /= MPI_COMM_NULL) then
00148 
00149        ierr = iMOAB_FreeSenderBuffers(pid1, context_id)
00150     endif
00151     call MPI_Barrier(gcomm, ierr)
00152     call errorout(ierr, 'cannot stop at barrier' )
00153 
00154 if (comm2 /= MPI_COMM_NULL) then
00155        outfile = 'receivedMesh.h5m'//CHAR(0)
00156        wopts   = 'PARALLEL=WRITE_PART;'//CHAR(0)
00157 !      write out the mesh file to disk
00158        ierr = iMOAB_WriteMesh(pid2, trim(outfile), trim(wopts))
00159        call errorout(ierr, 'cannot write received mesh' )
00160     endif
00161 
00162 
00163     if (comm2 /= MPI_COMM_NULL) then
00164        ierr = iMOAB_DeregisterApplication(pid2)
00165        call errorout(ierr, 'cannot deregister app 2 receiver' )
00166     endif
00167     if (comm1 /= MPI_COMM_NULL) then
00168        ierr = iMOAB_DeregisterApplication(pid1)
00169          call errorout(ierr, 'cannot deregister app 1 sender' )
00170     endif
00171 
00172     ierr = iMOAB_Finalize()
00173     call errorout(ierr, 'did not finalize iMOAB' )
00174 
00175     if (MPI_COMM_NULL /= comm1) call MPI_Comm_free(comm1, ierr)
00176     call errorout(ierr, 'did not free comm1' )
00177 
00178     if (MPI_COMM_NULL /= comm2) call MPI_Comm_free(comm2, ierr)
00179     call errorout(ierr, 'did not free comm2' )
00180 
00181     call MPI_Group_free(allgroup, ierr)
00182     call MPI_Group_free(group1, ierr)
00183     call MPI_Group_free(group2, ierr)
00184     call MPI_Comm_free(gcomm, ierr)
00185 
00186     call MPI_Finalize(ierr)
00187     call errorout(ierr, 'did not finalize MPI' )
00188 contains
00189   SUBROUTINE errorout(ierr, message)
00190   integer ierr
00191   character*(*) message
00192   if (ierr.ne.0) then
00193     print *, message
00194     call exit (1)
00195   end if
00196   end
00197 
00198 end program MigrateMesh
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines