MOAB: Mesh Oriented datABase  (version 5.3.1)
migrate_nontrivial.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_nontrivial.cpp
00003  *
00004  *  migrate_nontrivial will contain tests for migrating meshes in parallel environments, with iMOAB
00005  * api, using nontrivial partitions; for starters, we will use zoltan to compute partition in
00006  * parallel, and then use the result to migrate the mesh trivial partition gave terrible results for
00007  * mpas type meshes, that were numbered like a fractal; the resulting migrations were like Swiss
00008  * cheese, full of holes a mesh is read on senders tasks we will use graph like methods or geometry
00009  * methods, and will modify the ZoltanPartitioner to add needed methods
00010  *
00011  *  mesh will be sent to receivers tasks, with nonblocking MPI_Isend calls, and then received
00012  *    with blocking MPI_Recv calls;
00013  *
00014  *  we will not modify the GLOBAL_ID tag, we assume it was set correctly before we started
00015  */
00016 
00017 #include "moab/ParallelComm.hpp"
00018 #include "moab/Core.hpp"
00019 #include "moab_mpi.h"
00020 #include "moab/iMOAB.h"
00021 #include "TestUtil.hpp"
00022 #include "moab/ProgOptions.hpp"
00023 
00024 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00025 
00026 using namespace moab;
00027 
00028 //#define GRAPH_INFO
00029 
00030 #define CHECKRC( rc, message )            \
00031     if( 0 != ( rc ) )                     \
00032     {                                     \
00033         printf( "Error: %s\n", message ); \
00034         return MB_FAILURE;                \
00035     }
00036 
00037 int is_any_proc_error( int is_my_error )
00038 {
00039     int result = 0;
00040     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00041     return err || result;
00042 }
00043 
00044 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00045 {
00046     ErrorCode result = ( *func )( file_name );
00047     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00048     int rank;
00049     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00050     if( rank == 0 )
00051     {
00052         if( is_err )
00053             std::cout << func_name << " : FAILED!!" << std::endl;
00054         else
00055             std::cout << func_name << " : success" << std::endl;
00056     }
00057 
00058     return is_err;
00059 }
00060 
00061 ErrorCode migrate_graph( const char* filename );
00062 ErrorCode migrate_geom( const char* filename );
00063 ErrorCode migrate_trivial( const char* filename );
00064 
00065 // some global variables, used by all tests
00066 int rank, size, ierr;
00067 
00068 int compid1, compid2;           // component ids are unique over all pes, and established in advance;
00069 int nghlay;                     // number of ghost layers for loading the file
00070 std::vector< int > groupTasks;  // at most 4 tasks
00071 int startG1, startG2, endG1, endG2;
00072 
00073 MPI_Comm jcomm;  // will be a copy of the global
00074 MPI_Group jgroup;
00075 
00076 ErrorCode migrate_smart( const char* filename, const char* outfile, int partMethod )
00077 {
00078     // first create MPI groups
00079 
00080     std::string filen( filename );
00081     MPI_Group group1, group2;
00082     groupTasks.resize( endG1 - startG1 + 1 );
00083     for( int i = startG1; i <= endG1; i++ )
00084         groupTasks[i - startG1] = i;
00085 
00086     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &group1 );CHECKRC( ierr, "can't create group1" )
00087 
00088     groupTasks.resize( endG2 - startG2 + 1 );
00089     for( int i = startG2; i <= endG2; i++ )
00090         groupTasks[i - startG2] = i;
00091 
00092     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &group2 );CHECKRC( ierr, "can't create group2" )
00093 
00094     // create 2 communicators, one for each group
00095     int tagcomm1 = 1, tagcomm2 = 2;
00096     int context_id = -1;  // plain migrate, default context
00097     MPI_Comm comm1, comm2;
00098     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );CHECKRC( ierr, "can't create comm1" )
00099 
00100     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );CHECKRC( ierr, "can't create comm2" )
00101 
00102     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00103     CHECKRC( ierr, "can't initialize iMOAB" )
00104 
00105     // give some dummy values to component ids, just to differentiate between them
00106     // the par comm graph is unique between components
00107     compid1 = 4;
00108     compid2 = 7;
00109 
00110     int appID1;
00111     iMOAB_AppID pid1 = &appID1;
00112     int appID2;
00113     iMOAB_AppID pid2 = &appID2;
00114 
00115     if( comm1 != MPI_COMM_NULL )
00116     {
00117         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );CHECKRC( ierr, "can't register app1 " )
00118     }
00119     if( comm2 != MPI_COMM_NULL )
00120     {
00121         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );CHECKRC( ierr, "can't register app2 " )
00122     }
00123 
00124     if( comm1 != MPI_COMM_NULL )
00125     {
00126 
00127         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00128 
00129         nghlay = 0;
00130 
00131         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );CHECKRC( ierr, "can't load mesh " )
00132         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &partMethod );  // send to component 2
00133         CHECKRC( ierr, "cannot send elements" )
00134 #ifdef GRAPH_INFO
00135         int is_sender = 1;
00136         int context   = compid2;
00137         iMOAB_DumpCommGraph( pid1, &context, &is_sender, "MigrateS" );
00138 #endif
00139     }
00140 
00141     if( comm2 != MPI_COMM_NULL )
00142     {
00143         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00144         CHECKRC( ierr, "cannot receive elements" )
00145         std::string wopts;
00146         wopts = "PARALLEL=WRITE_PART;";
00147         ierr  = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );CHECKRC( ierr, "cannot write received mesh" )
00148 #ifdef GRAPH_INFO
00149         int is_sender = 0;
00150         int context   = compid1;
00151         iMOAB_DumpCommGraph( pid2, &context, &is_sender, "MigrateR" );
00152 #endif
00153     }
00154 
00155     MPI_Barrier( jcomm );
00156 
00157     // we can now free the sender buffers
00158     context_id = compid2;  // even for default migrate, be more explicit
00159     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00160 
00161     if( comm2 != MPI_COMM_NULL )
00162     {
00163         ierr = iMOAB_DeregisterApplication( pid2 );CHECKRC( ierr, "cannot deregister app 2 receiver" )
00164     }
00165 
00166     if( comm1 != MPI_COMM_NULL )
00167     {
00168         ierr = iMOAB_DeregisterApplication( pid1 );CHECKRC( ierr, "cannot deregister app 1 sender" )
00169     }
00170 
00171     ierr = iMOAB_Finalize();CHECKRC( ierr, "did not finalize iMOAB" )
00172 
00173     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00174     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00175 
00176     MPI_Group_free( &group1 );
00177     MPI_Group_free( &group2 );
00178     return MB_SUCCESS;
00179 }
00180 // migrate from 2 tasks to 3 tasks
00181 ErrorCode migrate_graph( const char* filename )
00182 {
00183     return migrate_smart( filename, "migrate_graph.h5m", 1 );
00184 }
00185 
00186 ErrorCode migrate_geom( const char* filename )
00187 {
00188     return migrate_smart( filename, "migrate_geom.h5m", 2 );
00189 }
00190 
00191 ErrorCode migrate_trivial( const char* filename )
00192 {
00193     return migrate_smart( filename, "migrate_trivial.h5m", 0 );
00194 }
00195 
00196 int main( int argc, char* argv[] )
00197 {
00198     MPI_Init( &argc, &argv );
00199     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00200     MPI_Comm_size( MPI_COMM_WORLD, &size );
00201 
00202     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00203     MPI_Comm_group( jcomm, &jgroup );
00204 
00205     ProgOptions opts;
00206     int typeTest = 3;
00207     // std::string inputfile, outfile("out.h5m"), netcdfFile, variable_name, sefile_name;
00208     std::string filename;
00209     filename = TestDir + "unittest/field1.h5m";
00210     startG1  = 0;
00211     startG2  = 0;
00212     endG1    = 0;
00213     endG2    = 1;
00214 
00215     opts.addOpt< std::string >( "file,f", "source file", &filename );
00216 
00217     opts.addOpt< int >( "startSender,a", "start task for source layout", &startG1 );
00218     opts.addOpt< int >( "endSender,b", "end task for source layout", &endG1 );
00219     opts.addOpt< int >( "startRecv,c", "start task for receiver layout", &startG2 );
00220     opts.addOpt< int >( "endRecv,d", "end task for receiver layout", &endG2 );
00221 
00222     opts.addOpt< int >( "typeTest,t", "test types (0 - trivial, 1 graph, 2 geom, 3 both  graph and geometry",
00223                         &typeTest );
00224 
00225     opts.parseCommandLine( argc, argv );
00226 
00227     if( rank == 0 )
00228     {
00229         std::cout << " input file : " << filename << "\n";
00230         std::cout << " sender   on tasks: " << startG1 << ":" << endG1 << "\n";
00231         std::cout << " receiver on tasks: " << startG2 << ":" << endG2 << "\n";
00232         std::cout << " type migrate: " << typeTest << " (0 - trivial, 1 graph , 2 geom, 3 both graph and geom  ) \n";
00233     }
00234 
00235     int num_errors = 0;
00236 
00237     if( 0 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_trivial, filename.c_str() );
00238     if( 3 == typeTest || 1 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_graph, filename.c_str() );
00239     if( 3 == typeTest || 2 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_geom, filename.c_str() );
00240 
00241     if( rank == 0 )
00242     {
00243         if( !num_errors )
00244             std::cout << "All tests passed" << std::endl;
00245         else
00246             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00247     }
00248 
00249     MPI_Group_free( &jgroup );
00250     MPI_Comm_free( &jcomm );
00251     MPI_Finalize();
00252     return num_errors;
00253 }
00254 
00255 #undef VERBOSE
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines