MOAB: Mesh Oriented datABase  (version 5.4.1)
migrate_test.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_test will contain tests for migrating meshes in parallel environments, with iMOAB api
00003  * these  methods are tested also in the example MigrateMesh.F90, with variable
00004  * numbers of processes; migrate_test is launched usually on 2 processes, and it tests
00005  * various cases
00006  * a mesh is read on senders tasks, sent to receivers tasks, and then written out for verification
00007  * It depends on hdf5 parallel for reading and writing in parallel
00008  */
00009 
00010 #include "moab/ParallelComm.hpp"
00011 #include "moab/Core.hpp"
00012 #include "moab_mpi.h"
00013 #include "moab/iMOAB.h"
00014 #include "TestUtil.hpp"
00015 
00016 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00017 
00018 using namespace moab;
00019 
00020 #define CHECKRC( rc, message )            \
00021     if( 0 != ( rc ) )                     \
00022     {                                     \
00023         printf( "Error: %s\n", message ); \
00024         return MB_FAILURE;                \
00025     }
00026 
00027 int is_any_proc_error( int is_my_error )
00028 {
00029     int result = 0;
00030     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00031     return err || result;
00032 }
00033 
00034 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00035 {
00036     ErrorCode result = ( *func )( file_name );
00037     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00038     int rank;
00039     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00040     if( rank == 0 )
00041     {
00042         if( is_err )
00043             std::cout << func_name << " : FAILED!!" << std::endl;
00044         else
00045             std::cout << func_name << " : success" << std::endl;
00046     }
00047 
00048     return is_err;
00049 }
00050 
00051 ErrorCode migrate_1_1( const char* filename );
00052 ErrorCode migrate_1_2( const char* filename );
00053 ErrorCode migrate_2_1( const char* filename );
00054 ErrorCode migrate_2_2( const char* filename );
00055 ErrorCode migrate_4_2( const char* filename );
00056 ErrorCode migrate_2_4( const char* filename );
00057 ErrorCode migrate_4_3( const char* filename );
00058 ErrorCode migrate_overlap( const char* filename );
00059 
00060 // some global variables, used by all tests
00061 int rank, size, ierr;
00062 
00063 int compid1, compid2;  // component ids are unique over all pes, and established in advance;
00064 int nghlay;            // number of ghost layers for loading the file
00065 int groupTasks[4];     // at most 4 tasks
00066 int startG1, startG2, endG1, endG2;
00067 
00068 MPI_Comm jcomm;  // will be a copy of the global
00069 MPI_Group jgroup;
00070 
00071 int main( int argc, char* argv[] )
00072 {
00073     MPI_Init( &argc, &argv );
00074     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00075     MPI_Comm_size( MPI_COMM_WORLD, &size );
00076 
00077     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00078     MPI_Comm_group( jcomm, &jgroup );
00079 
00080     std::string filename;
00081     filename = TestDir + "unittest/field1.h5m";
00082     if( argc > 1 )
00083     {
00084         filename = argv[1];
00085     }
00086     int num_errors = 0;
00087     num_errors += RUN_TEST_ARG2( migrate_1_1, filename.c_str() );
00088     num_errors += RUN_TEST_ARG2( migrate_1_2, filename.c_str() );
00089     num_errors += RUN_TEST_ARG2( migrate_2_1, filename.c_str() );
00090     num_errors += RUN_TEST_ARG2( migrate_2_2, filename.c_str() );
00091     if( size >= 4 )
00092     {
00093         num_errors += RUN_TEST_ARG2( migrate_4_2, filename.c_str() );
00094         num_errors += RUN_TEST_ARG2( migrate_2_4, filename.c_str() );
00095         num_errors += RUN_TEST_ARG2( migrate_4_3, filename.c_str() );
00096         num_errors += RUN_TEST_ARG2( migrate_overlap, filename.c_str() );
00097     }
00098     if( rank == 0 )
00099     {
00100         if( !num_errors )
00101             std::cout << "All tests passed" << std::endl;
00102         else
00103             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00104     }
00105 
00106     MPI_Group_free( &jgroup );
00107     MPI_Comm_free( &jcomm );
00108     MPI_Finalize();
00109     return num_errors;
00110 }
00111 
00112 ErrorCode migrate( const char* filename, const char* outfile )
00113 {
00114     // first create MPI groups
00115 
00116     std::string filen( filename );
00117     MPI_Group group1, group2;
00118     for( int i = startG1; i <= endG1; i++ )
00119         groupTasks[i - startG1] = i;
00120 
00121     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, groupTasks, &group1 );
00122     CHECKRC( ierr, "can't create group1" )
00123 
00124     for( int i = startG2; i <= endG2; i++ )
00125         groupTasks[i - startG2] = i;
00126 
00127     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, groupTasks, &group2 );
00128     CHECKRC( ierr, "can't create group2" )
00129 
00130     // create 2 communicators, one for each group
00131     int tagcomm1 = 1, tagcomm2 = 2;
00132     MPI_Comm comm1, comm2;
00133     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );
00134     CHECKRC( ierr, "can't create comm1" )
00135 
00136     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );
00137     CHECKRC( ierr, "can't create comm2" )
00138 
00139     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00140     CHECKRC( ierr, "can't initialize iMOAB" )
00141 
00142     // give some dummy values to component ids, just to differentiate between them
00143     // the par comm graph is unique between components
00144     compid1        = 4;
00145     compid2        = 7;
00146     int context_id = -1;  // default context; will be now set to compid1 or compid2
00147 
00148     int appID1;
00149     iMOAB_AppID pid1 = &appID1;
00150     int appID2;
00151     iMOAB_AppID pid2 = &appID2;
00152 
00153     if( comm1 != MPI_COMM_NULL )
00154     {
00155         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );
00156         CHECKRC( ierr, "can't register app1 " )
00157     }
00158     if( comm2 != MPI_COMM_NULL )
00159     {
00160         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );
00161         CHECKRC( ierr, "can't register app2 " )
00162     }
00163 
00164     int method = 0;  // trivial partition for sending
00165     if( comm1 != MPI_COMM_NULL )
00166     {
00167 
00168         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00169 
00170         nghlay = 0;
00171 
00172         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );
00173         CHECKRC( ierr, "can't load mesh " )
00174         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &method );  // send to component 2
00175         CHECKRC( ierr, "cannot send elements" )
00176     }
00177 
00178     if( comm2 != MPI_COMM_NULL )
00179     {
00180         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00181         CHECKRC( ierr, "cannot receive elements" )
00182         std::string wopts;
00183         wopts = "PARALLEL=WRITE_PART;";
00184         ierr  = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );
00185         CHECKRC( ierr, "cannot write received mesh" )
00186     }
00187 
00188     MPI_Barrier( jcomm );
00189 
00190     // we can now free the sender buffers
00191     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00192 
00193     // exchange tag, from component to component
00194     // one is receiving, one is sending the tag; the one that is sending needs to have communicator
00195     // not null
00196     int size_tag  = 1;  // a double dense tag, on elements
00197     int tagType   = DENSE_DOUBLE;
00198     int tagIndex2 = 0, tagIndex1 = 0;  // these will be tag indices on each app pid
00199 
00200     std::string fileAfterTagMigr( outfile );  // has h5m
00201     int sizen = fileAfterTagMigr.length();
00202     fileAfterTagMigr.erase( sizen - 4, 4 );  // erase extension .h5m
00203     fileAfterTagMigr = fileAfterTagMigr + "_tag.h5m";
00204 
00205     // now send a tag from component 2, towards component 1
00206     if( comm2 != MPI_COMM_NULL )
00207     {
00208         ierr = iMOAB_DefineTagStorage( pid2, "element_field", &tagType, &size_tag, &tagIndex2 );
00209         CHECKRC( ierr, "failed to get tag element_field " );
00210         // this tag is already existing in the file
00211 
00212         // first, send from compid2 to compid1, from comm2, using common joint comm
00213         // as always, use nonblocking sends
00214         // contex_id should be now compid1
00215         context_id = compid1;
00216         ierr       = iMOAB_SendElementTag( pid2, "element_field", &jcomm, &context_id );
00217         CHECKRC( ierr, "cannot send tag values" )
00218     }
00219     // receive on component 1
00220     if( comm1 != MPI_COMM_NULL )
00221     {
00222         ierr = iMOAB_DefineTagStorage( pid1, "element_field", &tagType, &size_tag, &tagIndex1 );
00223         CHECKRC( ierr, "failed to get tag DFIELD " );
00224         context_id = compid2;
00225         ierr       = iMOAB_ReceiveElementTag( pid1, "element_field", &jcomm, &context_id );
00226         CHECKRC( ierr, "cannot send tag values" )
00227         std::string wopts;
00228         wopts = "PARALLEL=WRITE_PART;";
00229         ierr  = iMOAB_WriteMesh( pid1, fileAfterTagMigr.c_str(), wopts.c_str() );
00230         CHECKRC( ierr, "cannot write received mesh" )
00231     }
00232 
00233     MPI_Barrier( jcomm );
00234 
00235     // we can now free the sender buffers
00236     if( comm2 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid2, &context_id );
00237 
00238     if( comm2 != MPI_COMM_NULL )
00239     {
00240         ierr = iMOAB_DeregisterApplication( pid2 );
00241         CHECKRC( ierr, "cannot deregister app 2 receiver" )
00242     }
00243 
00244     if( comm1 != MPI_COMM_NULL )
00245     {
00246         ierr = iMOAB_DeregisterApplication( pid1 );
00247         CHECKRC( ierr, "cannot deregister app 1 sender" )
00248     }
00249 
00250     ierr = iMOAB_Finalize();
00251     CHECKRC( ierr, "did not finalize iMOAB" )
00252 
00253     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00254     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00255 
00256     MPI_Group_free( &group1 );
00257     MPI_Group_free( &group2 );
00258     return MB_SUCCESS;
00259 }
00260 // migrate from task 0 to task 1, non overlapping
00261 ErrorCode migrate_1_1( const char* filename )
00262 {
00263     startG1 = endG1 = 0;
00264     startG2 = endG2 = 1;
00265     return migrate( filename, "migrate11.h5m" );
00266 }
00267 // migrate from task 0 to 2 tasks (0 and 1)
00268 ErrorCode migrate_1_2( const char* filename )
00269 {
00270     startG1 = endG1 = startG2 = 0;
00271     endG2                     = 1;
00272     return migrate( filename, "migrate12.h5m" );
00273 }
00274 
00275 // migrate from 2 tasks (0, 1) to 1 task (0)
00276 ErrorCode migrate_2_1( const char* filename )
00277 {
00278     startG1 = endG2 = startG2 = 0;
00279     endG1                     = 1;
00280     return migrate( filename, "migrate21.h5m" );
00281 }
00282 
00283 // migrate from 2 tasks to 2 tasks (overkill)
00284 ErrorCode migrate_2_2( const char* filename )
00285 {
00286     startG1 = startG2 = 0;
00287     endG1 = endG2 = 1;
00288     return migrate( filename, "migrate22.h5m" );
00289 }
00290 // migrate from 4 tasks to 2 tasks
00291 ErrorCode migrate_4_2( const char* filename )
00292 {
00293     startG1 = startG2 = 0;
00294     endG2             = 1;
00295     endG1             = 3;
00296     return migrate( filename, "migrate42.h5m" );
00297 }
00298 
00299 ErrorCode migrate_2_4( const char* filename )
00300 {
00301     startG1 = startG2 = 0;
00302     endG2             = 3;
00303     endG1             = 1;
00304     return migrate( filename, "migrate24.h5m" );
00305 }
00306 
00307 ErrorCode migrate_4_3( const char* filename )
00308 {
00309     startG1 = startG2 = 0;
00310     endG2             = 2;
00311     endG1             = 3;
00312     return migrate( filename, "migrate43.h5m" );
00313 }
00314 
00315 ErrorCode migrate_overlap( const char* filename )
00316 {
00317     startG1 = 0;
00318     startG2 = 1;
00319     endG1   = 1;
00320     endG2   = 2;
00321     return migrate( filename, "migrate_over.h5m" );
00322 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines