MOAB: Mesh Oriented datABase
(version 5.4.1)
|
00001 /* 00002 * imoab_coupler_utils.hpp 00003 * 00004 * Created on: Aug. 22, 2020 00005 * \brief will contain utility methods for refactoring imoab*coupler tests, to avoid repetitive tasks 00006 * \ even migrate tests can use some of these utilities 00007 * 1) create_comm_group(int start, int end, int tag, MPI_Group& group, MPI_Comm& comm) 00008 * 00009 */ 00010 00011 #ifndef TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_ 00012 #define TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_ 00013 00014 #define CHECKIERR( rc, message ) \ 00015 if( 0 != ( rc ) ) \ 00016 { \ 00017 printf( "%s. ErrorCode = %d\n", message, rc ); \ 00018 return 1; \ 00019 } 00020 00021 #define PUSH_TIMER( operation ) \ 00022 { \ 00023 timer_ops = timer.time_since_birth(); \ 00024 opName = operation; \ 00025 } 00026 #define POP_TIMER( localcomm, localrank ) \ 00027 { \ 00028 double locElapsed = timer.time_since_birth() - timer_ops, minElapsed = 0, maxElapsed = 0; \ 00029 MPI_Reduce( &locElapsed, &maxElapsed, 1, MPI_DOUBLE, MPI_MAX, 0, localcomm ); \ 00030 MPI_Reduce( &locElapsed, &minElapsed, 1, MPI_DOUBLE, MPI_MIN, 0, localcomm ); \ 00031 if( !( localrank ) ) \ 00032 std::cout << "[LOG] Time taken to " << opName.c_str() << ": max = " << maxElapsed \ 00033 << ", avg = " << ( maxElapsed + minElapsed ) / 2 << "\n"; \ 00034 opName.clear(); \ 00035 } 00036 00037 /* 00038 * \brief create an MPI group and an MPI communicator for the group, in the global communicator 00039 */ 00040 int create_group_and_comm( int start, int end, MPI_Group worldGroup, MPI_Group* group, MPI_Comm* comm ) 00041 { 00042 std::vector< int > groupTasks; 00043 groupTasks.resize( end - start + 1, 0 ); 00044 for( int i = start; i <= end; i++ ) 00045 groupTasks[i - start] = i; 00046 00047 int ierr = MPI_Group_incl( worldGroup, end - start + 1, &groupTasks[0], group ); 00048 CHECKIERR( ierr, "Cannot create group" ) 00049 00050 ierr = MPI_Comm_create( MPI_COMM_WORLD, *group, comm ); 00051 CHECKIERR( ierr, "Cannot create comm" ) 00052 00053 return 0; 00054 } 00055 00056 int create_joint_comm_group( MPI_Group agroup, MPI_Group bgroup, MPI_Group* abgroup, MPI_Comm* abcomm ) 00057 { 00058 int ierr = MPI_Group_union( agroup, bgroup, abgroup ); 00059 CHECKIERR( ierr, "Cannot create joint union group" ) 00060 00061 ierr = MPI_Comm_create( MPI_COMM_WORLD, *abgroup, abcomm ); 00062 CHECKIERR( ierr, "Cannot create joint communicator from union group" ) 00063 00064 return 0; 00065 } 00066 00067 int setup_component_coupler_meshes( iMOAB_AppID cmpId, 00068 int cmpTag, 00069 iMOAB_AppID cplCmpId, 00070 int cmpcouTag, 00071 MPI_Comm* cmpcomm, 00072 MPI_Group* cmpPEGroup, 00073 MPI_Comm* coucomm, 00074 MPI_Group* cplPEGroup, 00075 MPI_Comm* cmpcoucomm, 00076 std::string& filename, 00077 std::string& readopts, 00078 int nghlay, 00079 int repartitioner_scheme ) 00080 { 00081 int ierr = 0; 00082 if( *cmpcomm != MPI_COMM_NULL ) 00083 { 00084 // load first mesh 00085 ierr = iMOAB_LoadMesh( cmpId, filename.c_str(), readopts.c_str(), &nghlay ); 00086 CHECKIERR( ierr, "Cannot load component mesh" ) 00087 00088 // then send mesh to coupler pes 00089 ierr = iMOAB_SendMesh( cmpId, cmpcoucomm, cplPEGroup, &cmpcouTag, 00090 &repartitioner_scheme ); // send to coupler pes 00091 CHECKIERR( ierr, "cannot send elements" ) 00092 } 00093 // now, receive mesh, on coupler communicator; first mesh 1, atm 00094 if( *coucomm != MPI_COMM_NULL ) 00095 { 00096 00097 ierr = iMOAB_ReceiveMesh( cplCmpId, cmpcoucomm, cmpPEGroup, 00098 &cmpTag ); // receive from component 00099 CHECKIERR( ierr, "cannot receive elements on coupler app" ) 00100 } 00101 00102 // we can now free the sender buffers 00103 if( *cmpcomm != MPI_COMM_NULL ) 00104 { 00105 int context_id = cmpcouTag; 00106 ierr = iMOAB_FreeSenderBuffers( cmpId, &context_id ); 00107 CHECKIERR( ierr, "cannot free buffers used to send atm mesh" ) 00108 } 00109 return 0; 00110 } 00111 00112 #endif /* TEST_PARALLEL_IMOAB_COUPLER_UTILS_HPP_ */