MOAB: Mesh Oriented datABase  (version 5.3.0)
migrate_nontrivial.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_nontrivial.cpp
00003  *
00004  *  migrate_nontrivial will contain tests for migrating meshes in parallel environments, with iMOAB
00005  * api, using nontrivial partitions; for starters, we will use zoltan to compute partition in
00006  * parallel, and then use the result to migrate the mesh trivial partition gave terrible results for
00007  * mpas type meshes, that were numbered like a fractal; the resulting migrations were like Swiss
00008  * cheese, full of holes a mesh is read on senders tasks we will use graph like methods or geometry
00009  * methods, and will modify the ZoltanPartitioner to add needed methods
00010  *
00011  *  mesh will be sent to receivers tasks, with nonblocking MPI_Isend calls, and then received
00012  *    with blocking MPI_Recv calls;
00013  *
00014  *  we will not modify the GLOBAL_ID tag, we assume it was set correctly before we started
00015  */
00016 
00017 #include "moab/ParallelComm.hpp"
00018 #include "moab/Core.hpp"
00019 #include "moab_mpi.h"
00020 #include "moab/iMOAB.h"
00021 #include "TestUtil.hpp"
00022 #include "moab/ProgOptions.hpp"
00023 
00024 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00025 
00026 using namespace moab;
00027 //#define GRAPH_INFO
00028 #define CHECKRC( rc, message )     \
00029     if( 0 != ( rc ) )              \
00030     {                              \
00031         printf( "%s\n", message ); \
00032         return MB_FAILURE;         \
00033     }
00034 
00035 int is_any_proc_error( int is_my_error )
00036 {
00037     int result = 0;
00038     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00039     return err || result;
00040 }
00041 
00042 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00043 {
00044     ErrorCode result = ( *func )( file_name );
00045     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00046     int rank;
00047     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00048     if( rank == 0 )
00049     {
00050         if( is_err )
00051             std::cout << func_name << " : FAILED!!" << std::endl;
00052         else
00053             std::cout << func_name << " : success" << std::endl;
00054     }
00055 
00056     return is_err;
00057 }
00058 
00059 ErrorCode migrate_graph( const char* filename );
00060 ErrorCode migrate_geom( const char* filename );
00061 ErrorCode migrate_trivial( const char* filename );
00062 
00063 // some global variables, used by all tests
00064 int rank, size, ierr;
00065 
00066 int compid1, compid2;           // component ids are unique over all pes, and established in advance;
00067 int nghlay;                     // number of ghost layers for loading the file
00068 std::vector< int > groupTasks;  // at most 4 tasks
00069 int startG1, startG2, endG1, endG2;
00070 
00071 MPI_Comm jcomm;  // will be a copy of the global
00072 MPI_Group jgroup;
00073 
00074 ErrorCode migrate_smart( const char* filename, const char* outfile, int partMethod )
00075 {
00076     // first create MPI groups
00077 
00078     std::string filen( filename );
00079     MPI_Group group1, group2;
00080     groupTasks.resize( endG1 - startG1 + 1 );
00081     for( int i = startG1; i <= endG1; i++ )
00082         groupTasks[i - startG1] = i;
00083 
00084     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, &groupTasks[0], &group1 );
00085     CHECKRC( ierr, "can't create group1" )
00086 
00087     groupTasks.resize( endG2 - startG2 + 1 );
00088     for( int i = startG2; i <= endG2; i++ )
00089         groupTasks[i - startG2] = i;
00090 
00091     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, &groupTasks[0], &group2 );
00092     CHECKRC( ierr, "can't create group2" )
00093 
00094     // create 2 communicators, one for each group
00095     int tagcomm1 = 1, tagcomm2 = 2;
00096     int context_id = -1;  // plain migrate, default context
00097     MPI_Comm comm1, comm2;
00098     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
00099     CHECKRC( ierr, "can't create comm1" )
00100 
00101     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
00102     CHECKRC( ierr, "can't create comm2" )
00103 
00104     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00105     CHECKRC( ierr, "can't initialize iMOAB" )
00106 
00107     // give some dummy values to component ids, just to differentiate between them
00108     // the par comm graph is unique between components
00109     compid1 = 4;
00110     compid2 = 7;
00111 
00112     int appID1;
00113     iMOAB_AppID pid1 = &appID1;
00114     int appID2;
00115     iMOAB_AppID pid2 = &appID2;
00116 
00117     if( comm1 != MPI_COMM_NULL )
00118     {
00119         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
00120         CHECKRC( ierr, "can't register app1 " )
00121     }
00122     if( comm2 != MPI_COMM_NULL )
00123     {
00124         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
00125         CHECKRC( ierr, "can't register app2 " )
00126     }
00127 
00128     if( comm1 != MPI_COMM_NULL )
00129     {
00130 
00131         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00132 
00133         nghlay = 0;
00134 
00135         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay, filen.length(),
00136                                strlen( readopts.c_str() ) );
00137         CHECKRC( ierr, "can't load mesh " )
00138         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &partMethod );  // send to component 2
00139         CHECKRC( ierr, "cannot send elements" )
00140 #ifdef GRAPH_INFO
00141         int is_sender = 1;
00142         int context   = -1;
00143         iMOAB_DumpCommGraph( pid1, &context, &is_sender, "MigrateS", strlen( "MigrateS" ) );
00144 #endif
00145     }
00146 
00147     if( comm2 != MPI_COMM_NULL )
00148     {
00149         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00150         CHECKRC( ierr, "cannot receive elements" )
00151         std::string wopts;
00152         wopts = "PARALLEL=WRITE_PART;";
00153         ierr =
00154             iMOAB_WriteMesh( pid2, (char*)outfile, (char*)wopts.c_str(), strlen( outfile ), strlen( wopts.c_str() ) );
00155         CHECKRC( ierr, "cannot write received mesh" )
00156 #ifdef GRAPH_INFO
00157         int is_sender = 0;
00158         int context   = -1;
00159         iMOAB_DumpCommGraph( pid2, &context, &is_sender, "MigrateR", strlen( "MigrateR" ) );
00160 #endif
00161     }
00162 
00163     MPI_Barrier( jcomm );
00164 
00165     // we can now free the sender buffers
00166     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00167 
00168     if( comm2 != MPI_COMM_NULL )
00169     {
00170         ierr = iMOAB_DeregisterApplication( pid2 );
00171         CHECKRC( ierr, "cannot deregister app 2 receiver" )
00172     }
00173 
00174     if( comm1 != MPI_COMM_NULL )
00175     {
00176         ierr = iMOAB_DeregisterApplication( pid1 );
00177         CHECKRC( ierr, "cannot deregister app 1 sender" )
00178     }
00179 
00180     ierr = iMOAB_Finalize();
00181     CHECKRC( ierr, "did not finalize iMOAB" )
00182 
00183     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00184     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00185 
00186     MPI_Group_free( &group1 );
00187     MPI_Group_free( &group2 );
00188     return MB_SUCCESS;
00189 }
00190 // migrate from 2 tasks to 3 tasks
00191 ErrorCode migrate_graph( const char* filename )
00192 {
00193     return migrate_smart( filename, "migrate_graph.h5m", 1 );
00194 }
00195 
00196 ErrorCode migrate_geom( const char* filename )
00197 {
00198     return migrate_smart( filename, "migrate_geom.h5m", 2 );
00199 }
00200 
00201 ErrorCode migrate_trivial( const char* filename )
00202 {
00203     return migrate_smart( filename, "migrate_trivial.h5m", 0 );
00204 }
00205 
00206 int main( int argc, char* argv[] )
00207 {
00208     MPI_Init( &argc, &argv );
00209     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00210     MPI_Comm_size( MPI_COMM_WORLD, &size );
00211 
00212     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00213     MPI_Comm_group( jcomm, &jgroup );
00214 
00215     ProgOptions opts;
00216     int typeTest = 3;
00217     // std::string inputfile, outfile("out.h5m"), netcdfFile, variable_name, sefile_name;
00218     std::string filename;
00219     filename = TestDir + "/field1.h5m";
00220     startG1  = 0;
00221     startG2  = 0;
00222     endG1    = 0;
00223     endG2    = 1;
00224 
00225     opts.addOpt< std::string >( "file,f", "source file", &filename );
00226 
00227     opts.addOpt< int >( "startSender,a", "start task for source layout", &startG1 );
00228     opts.addOpt< int >( "endSender,b", "end task for source layout", &endG1 );
00229     opts.addOpt< int >( "startRecv,c", "start task for receiver layout", &startG2 );
00230     opts.addOpt< int >( "endRecv,d", "end task for receiver layout", &endG2 );
00231 
00232     opts.addOpt< int >( "typeTest,t", "test types (0 - trivial, 1 graph, 2 geom, 3 both  graph and geometry",
00233                         &typeTest );
00234 
00235     opts.parseCommandLine( argc, argv );
00236 
00237     if( rank == 0 )
00238     {
00239         std::cout << " input file : " << filename << "\n";
00240         std::cout << " sender   on tasks: " << startG1 << ":" << endG1 << "\n";
00241         std::cout << " receiver on tasks: " << startG2 << ":" << endG2 << "\n";
00242         std::cout << " type migrate: " << typeTest << " (0 - trivial, 1 graph , 2 geom, 3 both graph and geom  ) \n";
00243     }
00244 
00245     int num_errors = 0;
00246 
00247     if( 0 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_trivial, filename.c_str() );
00248     if( 3 == typeTest || 1 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_graph, filename.c_str() );
00249     if( 3 == typeTest || 2 == typeTest ) num_errors += RUN_TEST_ARG2( migrate_geom, filename.c_str() );
00250 
00251     if( rank == 0 )
00252     {
00253         if( !num_errors )
00254             std::cout << "All tests passed" << std::endl;
00255         else
00256             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00257     }
00258 
00259     MPI_Group_free( &jgroup );
00260     MPI_Comm_free( &jcomm );
00261     MPI_Finalize();
00262     return num_errors;
00263 }
00264 
00265 #undef VERBOSE
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines