![]() |
Mesh Oriented datABase
(version 5.4.1)
Array-based unstructured mesh datastructure
|
00001 /** @example ReduceExchangeTags.cpp
00002 * \brief Example program that shows the use case for performing tag data exchange
00003 * between parallel processors in order to sync data on shared entities. The reduction
00004 * operation on tag data is also shown where the user can perform any of the actions supported
00005 * by MPI_Op on data residing on shared entities. \n
00006 *
00007 * This example :
00008 * -# Initialize MPI and instantiate MOAB
00009 * -# Get user options: Input mesh file name, tag name (default: USERTAG), tag value
00010 * (default: 1.0)
00011 * -# Create the root and partition sets
00012 * -# Instantiate ParallelComm and read the mesh file in parallel using appropriate options
00013 * -# Create two tags: USERTAG_EXC (exchange) and USERTAG_RED (reduction)
00014 * -# Set tag data and exchange shared entity information between processors
00015 * -# Get entities in all dimensions and set local (current rank, dimension) dependent data for
00016 * exchange tag (USERTAG_EXC)
00017 * -# Perform exchange of tag data so that data on shared entities are synced via
00018 * ParallelCommunicator.
00019 * -# Set tag data and reduce shared entity information between processors using MPI_SUM
00020 * -# Get higher dimensional entities in the current partition and set local (current rank)
00021 * dependent data for reduce tag (USERTAG_EXC)
00022 * -# Perform the reduction operation (MPI_SUM) on shared entities via ParallelCommunicator.
00023 * -# Destroy the MOAB instance and finalize MPI
00024 *
00025 * To run: \n mpiexec -n 2 ./ReduceExchangeTags \n
00026 * Example: \n mpiexec -n 2 ./ReduceExchangeTags ../MeshFiles/unittest/64bricks_1khex.h5m
00027 * USERTAG 100 \n
00028 *
00029 */
00030
00031 #include "moab/Core.hpp"
00032 #ifdef MOAB_HAVE_MPI
00033 #include "moab/ParallelComm.hpp"
00034 #endif
00035 #include "MBParallelConventions.h"
00036 #include
00037 #include
00038 #include
00039
00040 using namespace moab;
00041 using namespace std;
00042
00043 // Error routines for use with MPI API
00044 #define MPICHKERR( CODE, MSG ) \
00045 do \
00046 { \
00047 if( 0 != ( CODE ) ) \
00048 { \
00049 cerr << ( MSG ) << endl; \
00050 MPI_Finalize(); \
00051 } \
00052 } while( false )
00053
00054 #define dbgprint( MSG ) \
00055 do \
00056 { \
00057 if( !rank ) cerr << ( MSG ) << endl; \
00058 } while( false )
00059
00060 #define dbgprintall( MSG ) \
00061 do \
00062 { \
00063 cerr << "[" << rank << "]: " << ( MSG ) << endl; \
00064 } while( false )
00065
00066 // Function to parse input parameters
00067 ErrorCode get_file_options( int argc, char** argv, string& filename, string& tagName, double& tagValues )
00068 {
00069 // Get mesh filename
00070 if( argc > 1 )
00071 filename = string( argv[1] );
00072 else
00073 filename = string( MESH_DIR ) + string( "/64bricks_1khex.h5m" );
00074
00075 // Get tag selection options
00076 if( argc > 2 )
00077 tagName = string( argv[2] );
00078 else
00079 tagName = "USERTAG";
00080
00081 if( argc > 3 )
00082 tagValues = atof( argv[3] );
00083 else
00084 tagValues = 1.0;
00085
00086 return MB_SUCCESS;
00087 }
00088
00089 //
00090 // Start of main test program
00091 //
00092 int main( int argc, char** argv )
00093 {
00094 #ifdef MOAB_HAVE_MPI
00095 ErrorCode err;
00096 int ierr, rank;
00097 string filename, tagName;
00098 double tagValue;
00099 MPI_Comm comm = MPI_COMM_WORLD;
00100 /// Parallel Read options:
00101 /// PARALLEL = type {READ_PART}
00102 /// PARTITION = PARALLEL_PARTITION : Partition as you read
00103 /// PARALLEL_RESOLVE_SHARED_ENTS : Communicate to all processors to get the shared adjacencies
00104 /// consistently in parallel PARALLEL_GHOSTS : a.b.c
00105 /// : a = 3 - highest dimension of entities
00106 /// : b = 0 -
00107 /// : c = 1 - number of layers
00108 /// PARALLEL_COMM = index
00109 string read_options = "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_"
00110 "ENTS;PARTITION_DISTRIBUTE;PARALLEL_GHOSTS=3.0.1;PARALLEL_COMM=0";
00111
00112 // Print usage if not enough arguments
00113 if( argc < 1 )
00114 {
00115 cerr << "Usage: ";
00116 cerr << argv[0] << " " << endl;
00117 cerr << "file_name : mesh file name" << endl;
00118 cerr << "tag_name : name of tag to add to mesh" << endl;
00119 cerr << "tag_value : a double valued string to set for highest-dimensional entities in "
00120 "the mesh for the named tag"
00121 << endl;
00122
00123 ierr = MPI_Finalize();
00124 MPICHKERR( ierr, "MPI_Finalize failed; Aborting" );
00125
00126 return 1;
00127 }
00128
00129 // Initialize MPI first
00130 ierr = MPI_Init( &argc, &argv );
00131 MPICHKERR( ierr, "MPI_Init failed" );
00132
00133 ierr = MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00134 MPICHKERR( ierr, "MPI_Comm_rank failed" );
00135
00136 dbgprint( "********** reduce_exchange_tags **********\n" );
00137
00138 // Create the moab instance
00139 Interface* mbi = new( std::nothrow ) Core;
00140 if( NULL == mbi ) return 1;
00141
00142 // Get the input options
00143 err = get_file_options( argc, argv, filename, tagName, tagValue );MB_CHK_SET_ERR( err, "get_file_options failed" );
00144
00145 // Print out the input parameters
00146 dbgprint( " Input Parameters - " );
00147 dbgprint( " Filenames: " << filename );
00148 dbgprint( " Tag: Name=" << tagName << " Value=" << tagValue << endl );
00149
00150 // Create root sets for each mesh. Then pass these
00151 // to the load_file functions to be populated.
00152 EntityHandle rootset, partnset;
00153 err = mbi->create_meshset( MESHSET_SET, rootset );MB_CHK_SET_ERR( err, "Creating root set failed" );
00154 err = mbi->create_meshset( MESHSET_SET, partnset );MB_CHK_SET_ERR( err, "Creating partition set failed" );
00155
00156 // Create the parallel communicator object with the partition handle associated with MOAB
00157 ParallelComm* parallel_communicator = ParallelComm::get_pcomm( mbi, partnset, &comm );
00158
00159 // Load the file from disk with given options
00160 err = mbi->load_file( filename.c_str(), &rootset, read_options.c_str() );MB_CHK_SET_ERR( err, "MOAB::load_file failed" );
00161
00162 // Create two tag handles: Exchange and Reduction operations
00163 dbgprint( "-Creating tag handle " << tagName << "..." );
00164 Tag tagReduce, tagExchange;
00165 {
00166 stringstream sstr;
00167 // Create the exchange tag: default name = USERTAG_EXC
00168 sstr << tagName << "_EXC";
00169 err = mbi->tag_get_handle( sstr.str().c_str(), 1, MB_TYPE_INTEGER, tagExchange, MB_TAG_CREAT | MB_TAG_DENSE,
00170 &tagValue );MB_CHK_SET_ERR( err, "Retrieving tag handles failed" );
00171
00172 // Create the exchange tag: default name = USERTAG_RED
00173 sstr.str( "" );
00174 sstr << tagName << "_RED";
00175 err = mbi->tag_get_handle( sstr.str().c_str(), 1, MB_TYPE_DOUBLE, tagReduce, MB_TAG_CREAT | MB_TAG_DENSE,
00176 &tagValue );MB_CHK_SET_ERR( err, "Retrieving tag handles failed" );
00177 }
00178
00179 // Perform exchange tag data
00180 dbgprint( "-Exchanging tags between processors " );
00181 {
00182 Range partEnts, dimEnts;
00183 for( int dim = 0; dim <= 3; dim++ )
00184 {
00185 // Get all entities of dimension = dim
00186 err = mbi->get_entities_by_dimension( rootset, dim, dimEnts, false );MB_CHK_ERR( err );
00187
00188 vector< int > tagValues( dimEnts.size(), static_cast< int >( tagValue ) * ( rank + 1 ) * ( dim + 1 ) );
00189 // Set local tag data for exchange
00190 err = mbi->tag_set_data( tagExchange, dimEnts, &tagValues[0] );MB_CHK_SET_ERR( err, "Setting local tag data failed during exchange phase" );
00191 // Merge entities into parent set
00192 partEnts.merge( dimEnts );
00193 }
00194
00195 // Exchange tags between processors
00196 err = parallel_communicator->exchange_tags( tagExchange, partEnts );MB_CHK_SET_ERR( err, "Exchanging tags between processors failed" );
00197 }
00198
00199 // Perform reduction of tag data
00200 dbgprint( "-Reducing tags between processors " );
00201 {
00202 Range partEnts;
00203 // Get all higher dimensional entities belonging to current partition
00204 err = parallel_communicator->get_part_entities( partEnts );MB_CHK_SET_ERR( err, "ParallelComm::get_part_entities failed" );
00205
00206 // Output what is in current partition sets
00207 dbgprintall( "Number of Partitioned entities: " << partEnts.size() );
00208 MPI_Barrier( comm );
00209
00210 // Set local tag data for reduction
00211 vector< double > tagValues( partEnts.size(), tagValue * ( rank + 1 ) );
00212 err = mbi->tag_set_data( tagReduce, partEnts, &tagValues[0] );MB_CHK_SET_ERR( err, "Setting local tag data failed during reduce phase" );
00213
00214 Range dummy;
00215 // Reduce tag data using MPI_SUM on the interface between partitions
00216 err = parallel_communicator->reduce_tags( tagReduce, MPI_SUM, dummy /*partEnts*/ );MB_CHK_SET_ERR( err, "Reducing tags between processors failed" );
00217 }
00218 // Write out to output file to visualize reduction/exchange of tag data
00219 err = mbi->write_file( "test.h5m", "H5M", "PARALLEL=WRITE_PART" );MB_CHK_ERR( err );
00220
00221 // Done, cleanup
00222 delete mbi;
00223
00224 dbgprint( "\n********** reduce_exchange_tags DONE! **********" );
00225
00226 MPI_Finalize();
00227 #else
00228 std::cout << " compile with MPI and HDF5 for this example to work \n";
00229 #endif
00230 return 0;
00231 }