MOAB: Mesh Oriented datABase  (version 5.4.1)
migrate_nontrivial.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_nontrivial.cpp
00003  *
00004  *  migrate_nontrivial will contain tests for migrating meshes in parallel environments, with iMOAB
00005  * api, using nontrivial partitions; for starters, we will use zoltan to compute partition in
00006  * parallel, and then use the result to migrate the mesh trivial partition gave terrible results for
00007  * mpas type meshes, that were numbered like a fractal; the resulting migrations were like Swiss
00008  * cheese, full of holes a mesh is read on senders tasks we will use graph like methods or geometry
00009  * methods, and will modify the ZoltanPartitioner to add needed methods
00010  *
00011  *  mesh will be sent to receivers tasks, with nonblocking MPI_Isend calls, and then received
00012  *    with blocking MPI_Recv calls;
00013  *
00014  *  we will not modify the GLOBAL_ID tag, we assume it was set correctly before we started
00015  */
00016 
00017 #include "moab/ParallelComm.hpp"
00018 #include "moab/Core.hpp"
00019 #include "moab_mpi.h"
00020 #include "moab/iMOAB.h"
00021 #include "TestUtil.hpp"
00022 #include "moab/ProgOptions.hpp"
00023 
00024 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00025 
00026 using namespace moab;
00027 
00028 //#define GRAPH_INFO
00029 
00030 #define CHECKRC( rc, message )            \
00031     if( 0 != ( rc ) )                     \
00032     {                                     \
00033         printf( "Error: %s\n", message ); \
00034         return MB_FAILURE;                \
00035     }
00036 
00037 int is_any_proc_error( int is_my_error )
00038 {
00039     int result = 0;
00040     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00041     return err || result;
00042 }
00043 
00044 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00045 {
00046     ErrorCode result = ( *func )( file_name );
00047     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00048     int rank;
00049     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00050     if( rank == 0 )
00051     {
00052         if( is_err )
00053             std::cout << func_name << " : FAILED!!" << std::endl;
00054         else
00055             std::cout << func_name << " : success" << std::endl;
00056     }
00057 
00058     return is_err;
00059 }
00060 
00061 ErrorCode migrate_graph( const char* filename );
00062 ErrorCode migrate_geom( const char* filename );
00063 ErrorCode migrate_trivial( const char* filename );
00064 
00065 // some global variables, used by all tests
00066 int rank, size, ierr;
00067 
00068 int compid1, compid2;           // component ids are unique over all pes, and established in advance;
00069 int nghlay;                     // number of ghost layers for loading the file
00070 std::vector< int > groupTasks;  // at most 4 tasks
00071 int startG1, startG2, endG1, endG2;
00072 
00073 MPI_Comm jcomm;  // will be a copy of the global
00074 MPI_Group jgroup;
00075 
00076 ErrorCode migrate_smart( const char* filename, const char* outfile, int partMethod )
00077 {
00078     // first create MPI groups
00079 
00080     std::string filen( filename );
00081     MPI_Group group1, group2;
00082     groupTasks.resize( endG1 - startG1 + 1 );
00083     for( int i = startG1; i <= endG1; i++ )
00084         groupTasks[i - startG1] = i;
00085 
00086     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &group1 );
00087     CHECKRC( ierr, "can't create group1" )
00088 
00089     groupTasks.resize( endG2 - startG2 + 1 );
00090     for( int i = startG2; i <= endG2; i++ )
00091         groupTasks[i - startG2] = i;
00092 
00093     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &group2 );
00094     CHECKRC( ierr, "can't create group2" )
00095 
00096     // create 2 communicators, one for each group
00097     int tagcomm1 = 1, tagcomm2 = 2;
00098     int context_id = -1;  // plain migrate, default context
00099     MPI_Comm comm1, comm2;
00100     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
00101     CHECKRC( ierr, "can't create comm1" )
00102 
00103     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
00104     CHECKRC( ierr, "can't create comm2" )
00105 
00106     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00107     CHECKRC( ierr, "can't initialize iMOAB" )
00108 
00109     // give some dummy values to component ids, just to differentiate between them
00110     // the par comm graph is unique between components
00111     compid1 = 4;
00112     compid2 = 7;
00113 
00114     int appID1;
00115     iMOAB_AppID pid1 = &appID1;
00116     int appID2;
00117     iMOAB_AppID pid2 = &appID2;
00118 
00119     if( comm1 != MPI_COMM_NULL )
00120     {
00121         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
00122         CHECKRC( ierr, "can't register app1 " )
00123     }
00124     if( comm2 != MPI_COMM_NULL )
00125     {
00126         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
00127         CHECKRC( ierr, "can't register app2 " )
00128     }
00129 
00130     if( comm1 != MPI_COMM_NULL )
00131     {
00132 
00133         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00134 
00135         nghlay = 0;
00136 
00137         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
00138         CHECKRC( ierr, "can't load mesh " )
00139         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &partMethod );  // send to component 2
00140         CHECKRC( ierr, "cannot send elements" )
00141 #ifdef GRAPH_INFO
00142         int is_sender = 1;
00143         int context   = compid2;
00144         iMOAB_DumpCommGraph( pid1, &context, &is_sender, "MigrateS" );
00145 #endif
00146     }
00147 
00148     if( comm2 != MPI_COMM_NULL )
00149     {
00150         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00151         CHECKRC( ierr, "cannot receive elements" )
00152         std::string wopts;
00153         wopts = "PARALLEL=WRITE_PART;";
00154         ierr  = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
00155         CHECKRC( ierr, "cannot write received mesh" )
00156 #ifdef GRAPH_INFO
00157         int is_sender = 0;
00158         int context   = compid1;
00159         iMOAB_DumpCommGraph( pid2, &context, &is_sender, "MigrateR" );
00160 #endif
00161     }
00162 
00163     MPI_Barrier( jcomm );
00164 
00165     // we can now free the sender buffers
00166     context_id = compid2;  // even for default migrate, be more explicit
00167     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00168 
00169     if( comm2 != MPI_COMM_NULL )
00170     {
00171         ierr = iMOAB_DeregisterApplication( pid2 );
00172         CHECKRC( ierr, "cannot deregister app 2 receiver" )
00173     }
00174 
00175     if( comm1 != MPI_COMM_NULL )
00176     {
00177         ierr = iMOAB_DeregisterApplication( pid1 );
00178         CHECKRC( ierr, "cannot deregister app 1 sender" )
00179     }
00180 
00181     ierr = iMOAB_Finalize();
00182     CHECKRC( ierr, "did not finalize iMOAB" )
00183 
00184     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00185     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00186 
00187     MPI_Group_free( &group1 );
00188     MPI_Group_free( &group2 );
00189     return MB_SUCCESS;
00190 }
00191 // migrate from 2 tasks to 3 tasks
00192 ErrorCode migrate_graph( const char* filename )
00193 {
00194     return migrate_smart( filename, "migrate_graph.h5m", 1 );
00195 }
00196 
00197 ErrorCode migrate_geom( const char* filename )
00198 {
00199     return migrate_smart( filename, "migrate_geom.h5m", 2 );
00200 }
00201 
00202 ErrorCode migrate_trivial( const char* filename )
00203 {
00204     return migrate_smart( filename, "migrate_trivial.h5m", 0 );
00205 }
00206 
00207 int main( int argc, char* argv[] )
00208 {
00209     MPI_Init( &argc, &argv );
00210     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00211     MPI_Comm_size( MPI_COMM_WORLD, &size );
00212 
00213     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00214     MPI_Comm_group( jcomm, &jgroup );
00215 
00216     ProgOptions opts;
00217     int typeTest = 3;
00218     // std::string inputfile, outfile("out.h5m"), netcdfFile, variable_name, sefile_name;
00219     std::string filename;
00220     filename = TestDir + "unittest/field1.h5m";
00221     startG1  = 0;
00222     startG2  = 0;
00223     endG1    = 0;
00224     endG2    = 1;
00225 
00226     opts.addOpt< std::string >( "file,f", "source file", &filename );
00227 
00228     opts.addOpt< int >( "startSender,a", "start task for source layout", &startG1 );
00229     opts.addOpt< int >( "endSender,b", "end task for source layout", &endG1 );
00230     opts.addOpt< int >( "startRecv,c", "start task for receiver layout", &startG2 );
00231     opts.addOpt< int >( "endRecv,d", "end task for receiver layout", &endG2 );
00232 
00233     opts.addOpt< int >( "typeTest,t", "test types (0 - trivial, 1 graph, 2 geom, 3 both  graph and geometry",
00234                         &typeTest );
00235 
00236     opts.parseCommandLine( argc, argv );
00237 
00238     if( rank == 0 )
00239     {
00240         std::cout << " input file : " << filename << "\n";
00241         std::cout << " sender   on tasks: " << startG1 << ":" << endG1 << "\n";
00242         std::cout << " receiver on tasks: " << startG2 << ":" << endG2 << "\n";
00243         std::cout << " type migrate: " << typeTest << " (0 - trivial, 1 graph , 2 geom, 3 both graph and geom  ) \n";
00244     }
00245 
00246     int num_errors = 0;
00247 
00248     if( 0 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_trivial, filename.c_str() );
00249     if( 3 == typeTest || 1 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_graph, filename.c_str() );
00250     if( 3 == typeTest || 2 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_geom, filename.c_str() );
00251 
00252     if( rank == 0 )
00253     {
00254         if( !num_errors )
00255             std::cout << "All tests passed" << std::endl;
00256         else
00257             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00258     }
00259 
00260     MPI_Group_free( &jgroup );
00261     MPI_Comm_free( &jcomm );
00262     MPI_Finalize();
00263     return num_errors;
00264 }
00265 
00266 #undef VERBOSE
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines