MOAB: Mesh Oriented datABase  (version 5.2.1)
migrate_test.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_test will contain tests for migrating meshes in parallel environments, with iMOAB api
00003  * these  methods are tested also in the example MigrateMesh.F90, with variable
00004  * numbers of processes; migrate_test is launched usually on 2 processes, and it tests
00005  * various cases
00006  * a mesh is read on senders tasks, sent to receivers tasks, and then written out for verification
00007  * It depends on hdf5 parallel for reading and writing in parallel
00008  */
00009 
00010 #include "moab/ParallelComm.hpp"
00011 #include "moab/Core.hpp"
00012 #include "moab_mpi.h"
00013 #include "moab/iMOAB.h"
00014 #include "TestUtil.hpp"
00015 
00016 #define RUN_TEST_ARG2( A, B ) run_test( &A, #A, B )
00017 
00018 using namespace moab;
00019 
00020 #define CHECKRC( rc, message )     \
00021     if( 0 != rc )                  \
00022     {                              \
00023         printf( "%s\n", message ); \
00024         return MB_FAILURE;         \
00025     }
00026 
00027 int is_any_proc_error( int is_my_error )
00028 {
00029     int result = 0;
00030     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00031     return err || result;
00032 }
00033 
00034 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00035 {
00036     ErrorCode result = ( *func )( file_name );
00037     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00038     int rank;
00039     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00040     if( rank == 0 )
00041     {
00042         if( is_err )
00043             std::cout << func_name << " : FAILED!!" << std::endl;
00044         else
00045             std::cout << func_name << " : success" << std::endl;
00046     }
00047 
00048     return is_err;
00049 }
00050 
00051 ErrorCode migrate_1_1( const char* filename );
00052 ErrorCode migrate_1_2( const char* filename );
00053 ErrorCode migrate_2_1( const char* filename );
00054 ErrorCode migrate_2_2( const char* filename );
00055 ErrorCode migrate_4_2( const char* filename );
00056 ErrorCode migrate_2_4( const char* filename );
00057 ErrorCode migrate_4_3( const char* filename );
00058 ErrorCode migrate_overlap( const char* filename );
00059 
00060 // some global variables, used by all tests
00061 int rank, size, ierr;
00062 
00063 int compid1, compid2;  // component ids are unique over all pes, and established in advance;
00064 int nghlay;            // number of ghost layers for loading the file
00065 int groupTasks[4];     // at most 4 tasks
00066 int startG1, startG2, endG1, endG2;
00067 
00068 MPI_Comm jcomm;  // will be a copy of the global
00069 MPI_Group jgroup;
00070 
00071 int main( int argc, char* argv[] )
00072 {
00073     MPI_Init( &argc, &argv );
00074     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00075     MPI_Comm_size( MPI_COMM_WORLD, &size );
00076 
00077     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00078     MPI_Comm_group( jcomm, &jgroup );
00079 
00080     std::string filename;
00081     filename = TestDir + "/field1.h5m";
00082     if( argc > 1 ) { filename = argv[1]; }
00083     int num_errors = 0;
00084     num_errors += RUN_TEST_ARG2( migrate_1_1, filename.c_str() );
00085     num_errors += RUN_TEST_ARG2( migrate_1_2, filename.c_str() );
00086     num_errors += RUN_TEST_ARG2( migrate_2_1, filename.c_str() );
00087     num_errors += RUN_TEST_ARG2( migrate_2_2, filename.c_str() );
00088     if( size >= 4 )
00089     {
00090         num_errors += RUN_TEST_ARG2( migrate_4_2, filename.c_str() );
00091         num_errors += RUN_TEST_ARG2( migrate_2_4, filename.c_str() );
00092         num_errors += RUN_TEST_ARG2( migrate_4_3, filename.c_str() );
00093         num_errors += RUN_TEST_ARG2( migrate_overlap, filename.c_str() );
00094     }
00095     if( rank == 0 )
00096     {
00097         if( !num_errors )
00098             std::cout << "All tests passed" << std::endl;
00099         else
00100             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00101     }
00102 
00103     MPI_Group_free( &jgroup );
00104     MPI_Comm_free( &jcomm );
00105     MPI_Finalize();
00106     return num_errors;
00107 }
00108 
00109 ErrorCode migrate( const char* filename, const char* outfile )
00110 {
00111     // first create MPI groups
00112 
00113     std::string filen( filename );
00114     MPI_Group group1, group2;
00115     for( int i = startG1; i <= endG1; i++ )
00116         groupTasks[i - startG1] = i;
00117 
00118     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, groupTasks, &group1 );
00119     CHECKRC( ierr, "can't create group1" )
00120 
00121     for( int i = startG2; i <= endG2; i++ )
00122         groupTasks[i - startG2] = i;
00123 
00124     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, groupTasks, &group2 );
00125     CHECKRC( ierr, "can't create group2" )
00126 
00127     // create 2 communicators, one for each group
00128     int tagcomm1 = 1, tagcomm2 = 2;
00129     MPI_Comm comm1, comm2;
00130     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
00131     CHECKRC( ierr, "can't create comm1" )
00132 
00133     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
00134     CHECKRC( ierr, "can't create comm2" )
00135 
00136     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00137     CHECKRC( ierr, "can't initialize iMOAB" )
00138 
00139     // give some dummy values to component ids, just to differentiate between them
00140     // the par comm graph is unique between components
00141     compid1        = 4;
00142     compid2        = 7;
00143     int context_id = -1;  // default context
00144 
00145     int appID1;
00146     iMOAB_AppID pid1 = &appID1;
00147     int appID2;
00148     iMOAB_AppID pid2 = &appID2;
00149 
00150     if( comm1 != MPI_COMM_NULL )
00151     {
00152         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
00153         CHECKRC( ierr, "can't register app1 " )
00154     }
00155     if( comm2 != MPI_COMM_NULL )
00156     {
00157         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
00158         CHECKRC( ierr, "can't register app2 " )
00159     }
00160 
00161     int method = 0;  // trivial partition for sending
00162     if( comm1 != MPI_COMM_NULL )
00163     {
00164 
00165         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00166 
00167         nghlay = 0;
00168 
00169         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay, filen.length(),
00170                                strlen( readopts.c_str() ) );
00171         CHECKRC( ierr, "can't load mesh " )
00172         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &method );  // send to component 2
00173         CHECKRC( ierr, "cannot send elements" )
00174     }
00175 
00176     if( comm2 != MPI_COMM_NULL )
00177     {
00178         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00179         CHECKRC( ierr, "cannot receive elements" )
00180         std::string wopts;
00181         wopts = "PARALLEL=WRITE_PART;";
00182         ierr =
00183             iMOAB_WriteMesh( pid2, (char*)outfile, (char*)wopts.c_str(), strlen( outfile ), strlen( wopts.c_str() ) );
00184         CHECKRC( ierr, "cannot write received mesh" )
00185     }
00186 
00187     MPI_Barrier( jcomm );
00188 
00189     // we can now free the sender buffers
00190     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00191 
00192     // exchange tag, from component to component
00193     // one is receiving, one is sending the tag; the one that is sending needs to have communicator
00194     // not null
00195     int size_tag  = 1;  // a double dense tag, on elements
00196     int tagType   = DENSE_DOUBLE;
00197     int tagIndex2 = 0, tagIndex1 = 0;  // these will be tag indices on each app pid
00198 
00199     std::string fileAfterTagMigr( outfile );  // has h5m
00200     int sizen = fileAfterTagMigr.length();
00201     fileAfterTagMigr.erase( sizen - 4, 4 );  // erase extension .h5m
00202     fileAfterTagMigr = fileAfterTagMigr + "_tag.h5m";
00203 
00204     // now send a tag from component 2, towards component 1
00205     if( comm2 != MPI_COMM_NULL )
00206     {
00207         ierr =
00208             iMOAB_DefineTagStorage( pid2, "element_field", &tagType, &size_tag, &tagIndex2, strlen( "element_field" ) );
00209         CHECKRC( ierr, "failed to get tag element_field " );
00210         // this tag is already existing in the file
00211 
00212         // first, send from compid2 to compid1, from comm2, using common joint comm
00213         // as always, use nonblocking sends
00214         ierr = iMOAB_SendElementTag( pid2, "element_field", &jcomm, &context_id, strlen( "element_field" ) );
00215         CHECKRC( ierr, "cannot send tag values" )
00216     }
00217     // receive on component 1
00218     if( comm1 != MPI_COMM_NULL )
00219     {
00220         ierr =
00221             iMOAB_DefineTagStorage( pid1, "element_field", &tagType, &size_tag, &tagIndex1, strlen( "element_field" ) );
00222         CHECKRC( ierr, "failed to get tag DFIELD " );
00223 
00224         ierr = iMOAB_ReceiveElementTag( pid1, "element_field", &jcomm, &context_id, strlen( "element_field" ) );
00225         CHECKRC( ierr, "cannot send tag values" )
00226         std::string wopts;
00227         wopts = "PARALLEL=WRITE_PART;";
00228         ierr  = iMOAB_WriteMesh( pid1, (char*)fileAfterTagMigr.c_str(), (char*)wopts.c_str(), fileAfterTagMigr.length(),
00229                                 strlen( wopts.c_str() ) );
00230         CHECKRC( ierr, "cannot write received mesh" )
00231     }
00232 
00233     MPI_Barrier( jcomm );
00234 
00235     // we can now free the sender buffers
00236     if( comm2 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid2, &context_id );
00237 
00238     if( comm2 != MPI_COMM_NULL )
00239     {
00240         ierr = iMOAB_DeregisterApplication( pid2 );
00241         CHECKRC( ierr, "cannot deregister app 2 receiver" )
00242     }
00243 
00244     if( comm1 != MPI_COMM_NULL )
00245     {
00246         ierr = iMOAB_DeregisterApplication( pid1 );
00247         CHECKRC( ierr, "cannot deregister app 1 sender" )
00248     }
00249 
00250     ierr = iMOAB_Finalize();
00251     CHECKRC( ierr, "did not finalize iMOAB" )
00252 
00253     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00254     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00255 
00256     MPI_Group_free( &group1 );
00257     MPI_Group_free( &group2 );
00258     return MB_SUCCESS;
00259 }
00260 // migrate from task 0 to task 1, non overlapping
00261 ErrorCode migrate_1_1( const char* filename )
00262 {
00263     startG1 = endG1 = 0;
00264     startG2 = endG2 = 1;
00265     return migrate( filename, "migrate11.h5m" );
00266 }
00267 // migrate from task 0 to 2 tasks (0 and 1)
00268 ErrorCode migrate_1_2( const char* filename )
00269 {
00270     startG1 = endG1 = startG2 = 0;
00271     endG2                     = 1;
00272     return migrate( filename, "migrate12.h5m" );
00273 }
00274 
00275 // migrate from 2 tasks (0, 1) to 1 task (0)
00276 ErrorCode migrate_2_1( const char* filename )
00277 {
00278     startG1 = endG2 = startG2 = 0;
00279     endG1                     = 1;
00280     return migrate( filename, "migrate21.h5m" );
00281 }
00282 
00283 // migrate from 2 tasks to 2 tasks (overkill)
00284 ErrorCode migrate_2_2( const char* filename )
00285 {
00286     startG1 = startG2 = 0;
00287     endG1 = endG2 = 1;
00288     return migrate( filename, "migrate22.h5m" );
00289 }
00290 // migrate from 4 tasks to 2 tasks
00291 ErrorCode migrate_4_2( const char* filename )
00292 {
00293     startG1 = startG2 = 0;
00294     endG2             = 1;
00295     endG1             = 3;
00296     return migrate( filename, "migrate42.h5m" );
00297 }
00298 
00299 ErrorCode migrate_2_4( const char* filename )
00300 {
00301     startG1 = startG2 = 0;
00302     endG2             = 3;
00303     endG1             = 1;
00304     return migrate( filename, "migrate24.h5m" );
00305 }
00306 
00307 ErrorCode migrate_4_3( const char* filename )
00308 {
00309     startG1 = startG2 = 0;
00310     endG2             = 2;
00311     endG1             = 3;
00312     return migrate( filename, "migrate43.h5m" );
00313 }
00314 
00315 ErrorCode migrate_overlap( const char* filename )
00316 {
00317     startG1 = 0;
00318     startG2 = 1;
00319     endG1   = 1;
00320     endG2   = 2;
00321     return migrate( filename, "migrate_over.h5m" );
00322 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines