MOAB: Mesh Oriented datABase  (version 5.3.1)
migrate_test.cpp
Go to the documentation of this file.
00001 /*
00002  * migrate_test will contain tests for migrating meshes in parallel environments, with iMOAB api
00003  * these  methods are tested also in the example MigrateMesh.F90, with variable
00004  * numbers of processes; migrate_test is launched usually on 2 processes, and it tests
00005  * various cases
00006  * a mesh is read on senders tasks, sent to receivers tasks, and then written out for verification
00007  * It depends on hdf5 parallel for reading and writing in parallel
00008  */
00009 
00010 #include "moab/ParallelComm.hpp"
00011 #include "moab/Core.hpp"
00012 #include "moab_mpi.h"
00013 #include "moab/iMOAB.h"
00014 #include "TestUtil.hpp"
00015 
00016 #define RUN_TEST_ARG2( A, B ) run_test( &( A ), #A, B )
00017 
00018 using namespace moab;
00019 
00020 #define CHECKRC( rc, message )            \
00021     if( 0 != ( rc ) )                     \
00022     {                                     \
00023         printf( "Error: %s\n", message ); \
00024         return MB_FAILURE;                \
00025     }
00026 
00027 int is_any_proc_error( int is_my_error )
00028 {
00029     int result = 0;
00030     int err    = MPI_Allreduce( &is_my_error, &result, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD );
00031     return err || result;
00032 }
00033 
00034 int run_test( ErrorCode ( *func )( const char* ), const char* func_name, const char* file_name )
00035 {
00036     ErrorCode result = ( *func )( file_name );
00037     int is_err       = is_any_proc_error( ( MB_SUCCESS != result ) );
00038     int rank;
00039     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00040     if( rank == 0 )
00041     {
00042         if( is_err )
00043             std::cout << func_name << " : FAILED!!" << std::endl;
00044         else
00045             std::cout << func_name << " : success" << std::endl;
00046     }
00047 
00048     return is_err;
00049 }
00050 
00051 ErrorCode migrate_1_1( const char* filename );
00052 ErrorCode migrate_1_2( const char* filename );
00053 ErrorCode migrate_2_1( const char* filename );
00054 ErrorCode migrate_2_2( const char* filename );
00055 ErrorCode migrate_4_2( const char* filename );
00056 ErrorCode migrate_2_4( const char* filename );
00057 ErrorCode migrate_4_3( const char* filename );
00058 ErrorCode migrate_overlap( const char* filename );
00059 
00060 // some global variables, used by all tests
00061 int rank, size, ierr;
00062 
00063 int compid1, compid2;  // component ids are unique over all pes, and established in advance;
00064 int nghlay;            // number of ghost layers for loading the file
00065 int groupTasks[4];     // at most 4 tasks
00066 int startG1, startG2, endG1, endG2;
00067 
00068 MPI_Comm jcomm;  // will be a copy of the global
00069 MPI_Group jgroup;
00070 
00071 int main( int argc, char* argv[] )
00072 {
00073     MPI_Init( &argc, &argv );
00074     MPI_Comm_rank( MPI_COMM_WORLD, &rank );
00075     MPI_Comm_size( MPI_COMM_WORLD, &size );
00076 
00077     MPI_Comm_dup( MPI_COMM_WORLD, &jcomm );
00078     MPI_Comm_group( jcomm, &jgroup );
00079 
00080     std::string filename;
00081     filename = TestDir + "unittest/field1.h5m";
00082     if( argc > 1 )
00083     {
00084         filename = argv[1];
00085     }
00086     int num_errors = 0;
00087     num_errors += RUN_TEST_ARG2( migrate_1_1, filename.c_str() );
00088     num_errors += RUN_TEST_ARG2( migrate_1_2, filename.c_str() );
00089     num_errors += RUN_TEST_ARG2( migrate_2_1, filename.c_str() );
00090     num_errors += RUN_TEST_ARG2( migrate_2_2, filename.c_str() );
00091     if( size >= 4 )
00092     {
00093         num_errors += RUN_TEST_ARG2( migrate_4_2, filename.c_str() );
00094         num_errors += RUN_TEST_ARG2( migrate_2_4, filename.c_str() );
00095         num_errors += RUN_TEST_ARG2( migrate_4_3, filename.c_str() );
00096         num_errors += RUN_TEST_ARG2( migrate_overlap, filename.c_str() );
00097     }
00098     if( rank == 0 )
00099     {
00100         if( !num_errors )
00101             std::cout << "All tests passed" << std::endl;
00102         else
00103             std::cout << num_errors << " TESTS FAILED!" << std::endl;
00104     }
00105 
00106     MPI_Group_free( &jgroup );
00107     MPI_Comm_free( &jcomm );
00108     MPI_Finalize();
00109     return num_errors;
00110 }
00111 
00112 ErrorCode migrate( const char* filename, const char* outfile )
00113 {
00114     // first create MPI groups
00115 
00116     std::string filen( filename );
00117     MPI_Group group1, group2;
00118     for( int i = startG1; i <= endG1; i++ )
00119         groupTasks[i - startG1] = i;
00120 
00121     ierr = MPI_Group_incl( jgroup, endG1 - startG1 + 1, groupTasks, &group1 );CHECKRC( ierr, "can't create group1" )
00122 
00123     for( int i = startG2; i <= endG2; i++ )
00124         groupTasks[i - startG2] = i;
00125 
00126     ierr = MPI_Group_incl( jgroup, endG2 - startG2 + 1, groupTasks, &group2 );CHECKRC( ierr, "can't create group2" )
00127 
00128     // create 2 communicators, one for each group
00129     int tagcomm1 = 1, tagcomm2 = 2;
00130     MPI_Comm comm1, comm2;
00131     ierr = MPI_Comm_create_group( jcomm, group1, tagcomm1, &comm1 );CHECKRC( ierr, "can't create comm1" )
00132 
00133     ierr = MPI_Comm_create_group( jcomm, group2, tagcomm2, &comm2 );CHECKRC( ierr, "can't create comm2" )
00134 
00135     ierr = iMOAB_Initialize( 0, 0 );  // not really needed anything from argc, argv, yet; maybe we should
00136     CHECKRC( ierr, "can't initialize iMOAB" )
00137 
00138     // give some dummy values to component ids, just to differentiate between them
00139     // the par comm graph is unique between components
00140     compid1        = 4;
00141     compid2        = 7;
00142     int context_id = -1;  // default context; will be now set to compid1 or compid2
00143 
00144     int appID1;
00145     iMOAB_AppID pid1 = &appID1;
00146     int appID2;
00147     iMOAB_AppID pid2 = &appID2;
00148 
00149     if( comm1 != MPI_COMM_NULL )
00150     {
00151         ierr = iMOAB_RegisterApplication( "APP1", &comm1, &compid1, pid1 );CHECKRC( ierr, "can't register app1 " )
00152     }
00153     if( comm2 != MPI_COMM_NULL )
00154     {
00155         ierr = iMOAB_RegisterApplication( "APP2", &comm2, &compid2, pid2 );CHECKRC( ierr, "can't register app2 " )
00156     }
00157 
00158     int method = 0;  // trivial partition for sending
00159     if( comm1 != MPI_COMM_NULL )
00160     {
00161 
00162         std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00163 
00164         nghlay = 0;
00165 
00166         ierr = iMOAB_LoadMesh( pid1, filen.c_str(), readopts.c_str(), &nghlay );CHECKRC( ierr, "can't load mesh " )
00167         ierr = iMOAB_SendMesh( pid1, &jcomm, &group2, &compid2, &method );  // send to component 2
00168         CHECKRC( ierr, "cannot send elements" )
00169     }
00170 
00171     if( comm2 != MPI_COMM_NULL )
00172     {
00173         ierr = iMOAB_ReceiveMesh( pid2, &jcomm, &group1, &compid1 );  // receive from component 1
00174         CHECKRC( ierr, "cannot receive elements" )
00175         std::string wopts;
00176         wopts = "PARALLEL=WRITE_PART;";
00177         ierr  = iMOAB_WriteMesh( pid2, outfile, wopts.c_str() );CHECKRC( ierr, "cannot write received mesh" )
00178     }
00179 
00180     MPI_Barrier( jcomm );
00181 
00182     // we can now free the sender buffers
00183     if( comm1 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid1, &context_id );
00184 
00185     // exchange tag, from component to component
00186     // one is receiving, one is sending the tag; the one that is sending needs to have communicator
00187     // not null
00188     int size_tag  = 1;  // a double dense tag, on elements
00189     int tagType   = DENSE_DOUBLE;
00190     int tagIndex2 = 0, tagIndex1 = 0;  // these will be tag indices on each app pid
00191 
00192     std::string fileAfterTagMigr( outfile );  // has h5m
00193     int sizen = fileAfterTagMigr.length();
00194     fileAfterTagMigr.erase( sizen - 4, 4 );  // erase extension .h5m
00195     fileAfterTagMigr = fileAfterTagMigr + "_tag.h5m";
00196 
00197     // now send a tag from component 2, towards component 1
00198     if( comm2 != MPI_COMM_NULL )
00199     {
00200         ierr = iMOAB_DefineTagStorage( pid2, "element_field", &tagType, &size_tag, &tagIndex2 );CHECKRC( ierr, "failed to get tag element_field " );
00201         // this tag is already existing in the file
00202 
00203         // first, send from compid2 to compid1, from comm2, using common joint comm
00204         // as always, use nonblocking sends
00205         // contex_id should be now compid1
00206         context_id = compid1;
00207         ierr       = iMOAB_SendElementTag( pid2, "element_field", &jcomm, &context_id );CHECKRC( ierr, "cannot send tag values" )
00208     }
00209     // receive on component 1
00210     if( comm1 != MPI_COMM_NULL )
00211     {
00212         ierr = iMOAB_DefineTagStorage( pid1, "element_field", &tagType, &size_tag, &tagIndex1 );CHECKRC( ierr, "failed to get tag DFIELD " );
00213         context_id = compid2;
00214         ierr       = iMOAB_ReceiveElementTag( pid1, "element_field", &jcomm, &context_id );CHECKRC( ierr, "cannot send tag values" )
00215         std::string wopts;
00216         wopts = "PARALLEL=WRITE_PART;";
00217         ierr  = iMOAB_WriteMesh( pid1, fileAfterTagMigr.c_str(), wopts.c_str() );CHECKRC( ierr, "cannot write received mesh" )
00218     }
00219 
00220     MPI_Barrier( jcomm );
00221 
00222     // we can now free the sender buffers
00223     if( comm2 != MPI_COMM_NULL ) ierr = iMOAB_FreeSenderBuffers( pid2, &context_id );
00224 
00225     if( comm2 != MPI_COMM_NULL )
00226     {
00227         ierr = iMOAB_DeregisterApplication( pid2 );CHECKRC( ierr, "cannot deregister app 2 receiver" )
00228     }
00229 
00230     if( comm1 != MPI_COMM_NULL )
00231     {
00232         ierr = iMOAB_DeregisterApplication( pid1 );CHECKRC( ierr, "cannot deregister app 1 sender" )
00233     }
00234 
00235     ierr = iMOAB_Finalize();CHECKRC( ierr, "did not finalize iMOAB" )
00236 
00237     if( MPI_COMM_NULL != comm1 ) MPI_Comm_free( &comm1 );
00238     if( MPI_COMM_NULL != comm2 ) MPI_Comm_free( &comm2 );
00239 
00240     MPI_Group_free( &group1 );
00241     MPI_Group_free( &group2 );
00242     return MB_SUCCESS;
00243 }
00244 // migrate from task 0 to task 1, non overlapping
00245 ErrorCode migrate_1_1( const char* filename )
00246 {
00247     startG1 = endG1 = 0;
00248     startG2 = endG2 = 1;
00249     return migrate( filename, "migrate11.h5m" );
00250 }
00251 // migrate from task 0 to 2 tasks (0 and 1)
00252 ErrorCode migrate_1_2( const char* filename )
00253 {
00254     startG1 = endG1 = startG2 = 0;
00255     endG2                     = 1;
00256     return migrate( filename, "migrate12.h5m" );
00257 }
00258 
00259 // migrate from 2 tasks (0, 1) to 1 task (0)
00260 ErrorCode migrate_2_1( const char* filename )
00261 {
00262     startG1 = endG2 = startG2 = 0;
00263     endG1                     = 1;
00264     return migrate( filename, "migrate21.h5m" );
00265 }
00266 
00267 // migrate from 2 tasks to 2 tasks (overkill)
00268 ErrorCode migrate_2_2( const char* filename )
00269 {
00270     startG1 = startG2 = 0;
00271     endG1 = endG2 = 1;
00272     return migrate( filename, "migrate22.h5m" );
00273 }
00274 // migrate from 4 tasks to 2 tasks
00275 ErrorCode migrate_4_2( const char* filename )
00276 {
00277     startG1 = startG2 = 0;
00278     endG2             = 1;
00279     endG1             = 3;
00280     return migrate( filename, "migrate42.h5m" );
00281 }
00282 
00283 ErrorCode migrate_2_4( const char* filename )
00284 {
00285     startG1 = startG2 = 0;
00286     endG2             = 3;
00287     endG1             = 1;
00288     return migrate( filename, "migrate24.h5m" );
00289 }
00290 
00291 ErrorCode migrate_4_3( const char* filename )
00292 {
00293     startG1 = startG2 = 0;
00294     endG2             = 2;
00295     endG1             = 3;
00296     return migrate( filename, "migrate43.h5m" );
00297 }
00298 
00299 ErrorCode migrate_overlap( const char* filename )
00300 {
00301     startG1 = 0;
00302     startG2 = 1;
00303     endG1   = 1;
00304     endG2   = 2;
00305     return migrate( filename, "migrate_over.h5m" );
00306 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines