MOAB: Mesh Oriented datABase
(version 5.4.1)
|
00001 /* 00002 * This test will load a scrip file in parallel and write it back 00003 */ 00004 00005 00006 #include "moab/Core.hpp" 00007 00008 // MPI includes 00009 #include "moab_mpi.h" 00010 #include "moab/ParallelComm.hpp" 00011 #include "MBParallelConventions.h" 00012 00013 #include "moab/iMOAB.h" 00014 00015 #include "TestUtil.hpp" 00016 #include "moab/CpuTimer.hpp" 00017 #include "moab/ProgOptions.hpp" 00018 #include <iostream> 00019 #include <sstream> 00020 00021 #include "imoab_coupler_utils.hpp" 00022 00023 using namespace moab; 00024 00025 00026 int main( int argc, char* argv[] ) 00027 { 00028 int ierr; 00029 int rankInGlobalComm, numProcesses; 00030 MPI_Group jgroup; 00031 std::string readopts2( "PARALLEL=READ_PART;PARTITION_METHOD=RCBZOLTAN" ); 00032 std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" ); 00033 std::string readoptsLnd("PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION"); 00034 std::string filename=TestDir + "unittest/SCRIPgrid_2x2_nomask_c210211.nc"; 00035 std::string atmFilename=TestDir + "unittest/wholeATM_T.h5m"; 00036 std::string rofInp=TestDir + "unittest/wholeRof_06.h5m"; 00037 std::string seq_flds_r2x_fields("Forr_rofl:Forr_rofi:Firr_rofi:Flrr_flood:Flrr_volr:Flrr_volrmch:Flrr_supply:Flrr_deficit"); 00038 int cmpAtm = 5, cmpRof = 21, cplRof=22; 00039 int cplatm = 6; // component ids are unique over all pes, and established in advance; 00040 int nghlay = 0;// no ghost layers 00041 00042 00043 MPI_Init( &argc, &argv ); 00044 MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm ); 00045 MPI_Comm_size( MPI_COMM_WORLD, &numProcesses ); 00046 MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup 00047 00048 int startG1 = 0, startG2 = 0, startG4 = 0; 00049 int endG1, endG2, endG4; 00050 endG1 = endG2 = endG4 = numProcesses-1; 00051 00052 ProgOptions opts; 00053 opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename ", &atmFilename ); 00054 00055 opts.addOpt< std::string >( "mosart,m", " mosart with data", &rofInp ); 00056 00057 00058 opts.addOpt< std::string >( "scrip,s", "scrip mesh file", &filename ); 00059 00060 opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 ); 00061 opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 ); 00062 00063 opts.addOpt< int >( "startOcn,c", "start task for mosart layout", &startG2 ); 00064 opts.addOpt< int >( "endOcn,d", "end task for mosart layout", &endG2 ); 00065 00066 opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 ); 00067 opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 ); 00068 00069 opts.parseCommandLine( argc, argv ); 00070 00071 if( !rankInGlobalComm ) 00072 { 00073 std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 << 00074 "\n mosart input file file: " << rofInp << "\n on tasks : " << startG2 << ":" << endG2 << 00075 "\n scrip file on coupler: " << filename << 00076 "\n coupler on tasks : " << startG4 << ":" << endG4 << "\n"; 00077 } 00078 00079 // load files on 2 different communicators, groups 00080 // coupler will be on group 4 00081 MPI_Group atmPEGroup; 00082 MPI_Comm atmComm; 00083 ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm ); 00084 CHECKIERR( ierr, "Cannot create atm MPI group and communicator " ) 00085 00086 MPI_Group rofPEGroup; 00087 MPI_Comm rofComm; 00088 ierr = create_group_and_comm( startG2, endG2, jgroup, &rofPEGroup, &rofComm ); 00089 CHECKIERR( ierr, "Cannot create rof MPI group and communicator " ) 00090 00091 00092 // we will always have a coupler 00093 MPI_Group couPEGroup; 00094 MPI_Comm couComm; 00095 ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm ); 00096 CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " ) 00097 00098 // atm_coupler 00099 MPI_Group joinAtmCouGroup; 00100 MPI_Comm atmCouComm; 00101 ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm ); 00102 CHECKIERR( ierr, "Cannot create joint atm cou communicator" ) 00103 00104 // rof_coupler 00105 MPI_Group joinRofCouGroup; 00106 MPI_Comm rofCouComm; 00107 ierr = create_joint_comm_group( rofPEGroup, couPEGroup, &joinRofCouGroup, &rofCouComm ); 00108 CHECKIERR( ierr, "Cannot create joint rof cou communicator" ) 00109 00110 ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should 00111 CHECKIERR( ierr, "Cannot initialize iMOAB" ) 00112 00113 int cmpRofID = -1; 00114 iMOAB_AppID rofPID = &cmpRofID; 00115 if (rofComm != MPI_COMM_NULL ) { 00116 ierr = iMOAB_RegisterApplication( "ROF", &rofComm, &cmpRof, rofPID ); 00117 CHECKIERR( ierr, "Cannot register Rof App" ) 00118 } 00119 00120 int cmpAtmAppID = -1; 00121 iMOAB_AppID cmpAtmPID = &cmpAtmAppID; 00122 if (atmComm != MPI_COMM_NULL) { 00123 ierr = iMOAB_RegisterApplication( "ATM", &atmComm, &cmpAtm, cmpAtmPID ); 00124 CHECKIERR( ierr, "Cannot register Atm App" ) 00125 } 00126 int cplAtmAppID = -1; 00127 iMOAB_AppID cplAtmPID = &cplAtmAppID; 00128 00129 int cplRofAppID = -1; 00130 iMOAB_AppID cplRofPID = &cplRofAppID; 00131 00132 int rankInCouComm = -1; 00133 if( couComm != MPI_COMM_NULL ) 00134 { 00135 MPI_Comm_rank( couComm, &rankInCouComm ); 00136 // Register all the applications on the coupler PEs 00137 ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm, 00138 cplAtmPID ); // atm on coupler pes 00139 CHECKIERR( ierr, "Cannot register ATM over coupler PEs" ) 00140 00141 ierr = iMOAB_RegisterApplication( "ROFX", &couComm, &cplRof, 00142 cplRofPID ); // ocn on coupler pes 00143 CHECKIERR( ierr, "Cannot register ROFX over coupler PEs" ) 00144 } 00145 00146 int repartitioner_scheme = 0; 00147 #ifdef MOAB_HAVE_ZOLTAN 00148 repartitioner_scheme = 2; // zoltan is used 00149 #endif 00150 if( atmCouComm != MPI_COMM_NULL ) 00151 { 00152 ierr = setup_component_coupler_meshes( cmpAtmPID, cmpAtm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm, 00153 &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, 00154 repartitioner_scheme ); 00155 CHECKIERR( ierr, "Cannot load and migrate atm mesh " ) 00156 } 00157 int tagtype = 1, numco = 1, tagIndex = 0; 00158 if( cmpRofID >= 0 ) { 00159 // load rof mesh with data on it 00160 ierr = iMOAB_LoadMesh( rofPID, rofInp.c_str(), readoptsLnd.c_str(), &nghlay ); 00161 CHECKIERR( ierr, "Cannot load mosart data mesh" ) 00162 00163 ierr = iMOAB_DefineTagStorage( rofPID, seq_flds_r2x_fields.c_str(), &tagtype, &numco, &tagIndex ); 00164 CHECKIERR( ierr, "failed to define the fields on mosart point cloud" ) 00165 } 00166 // load rof scrip file on coupler only 00167 if( couComm != MPI_COMM_NULL ) 00168 { 00169 ierr = iMOAB_LoadMesh( cplRofPID, filename.c_str(), readopts2.c_str(), &nghlay ); 00170 CHECKIERR( ierr, "Cannot load scrip mesh on coupler" ) 00171 // define tags on receiving end 00172 ierr = iMOAB_DefineTagStorage( cplRofPID, seq_flds_r2x_fields.c_str(), &tagtype, &numco, &tagIndex ); 00173 CHECKIERR( ierr, "failed to define the fields on mosart coupler mesh " ) 00174 // test what we read from scrip file 00175 char outputFileTgt[] = "readCplRof.h5m"; 00176 char fileWriteOptions[] = "PARALLEL=WRITE_PART"; 00177 ierr = iMOAB_WriteMesh( cplRofPID, outputFileTgt, fileWriteOptions); 00178 CHECKIERR( ierr, "cannot write Rof mesh on coupler" ) 00179 } 00180 // compute comm graph between coupler and wholeRof 00181 if (MPI_COMM_NULL != rofCouComm) 00182 { 00183 // compute the comm graph between point cloud rof and coupler version of rof (full mesh) 00184 // we are now on joint pes, compute comm graph between rof and coupler model 00185 int typeA = 2; // point cloud on component PEs 00186 int typeB = 3; // full mesh on coupler pes, we just read it 00187 ierr = iMOAB_ComputeCommGraph( rofPID, cplRofPID, &rofCouComm, &rofPEGroup, &couPEGroup, 00188 &typeA, &typeB, &cmpRof, &cplRof) ; 00189 CHECKIERR( ierr, "cannot compute comm graph for mosart " ) 00190 } 00191 00192 // now send / receive some tags 00193 if( cmpRofID >= 0 ) 00194 { // send 00195 // basically, use the initial partitioning 00196 ierr = iMOAB_SendElementTag( rofPID, seq_flds_r2x_fields.c_str(), &rofCouComm, &cplRof ); 00197 CHECKIERR( ierr, "cannot send tags " ) 00198 } 00199 00200 if( cplRofAppID >= 0 ) 00201 { // we are on receiving end 00202 ierr = iMOAB_ReceiveElementTag( cplRofPID, seq_flds_r2x_fields.c_str(), &rofCouComm, &cmpRof ); 00203 CHECKIERR( ierr, "cannot receive tags " ) 00204 char outputFileTgt[] = "afterSend.h5m"; 00205 char fileWriteOptions[] = "PARALLEL=WRITE_PART"; 00206 ierr = iMOAB_WriteMesh( cplRofPID, outputFileTgt, fileWriteOptions ); 00207 CHECKIERR( ierr, "cannot write Rof mesh with data on coupler" ) 00208 } 00209 00210 if( cmpRofID >= 0 ) 00211 { // send 00212 // basically, use the initial partitioning 00213 ierr = iMOAB_FreeSenderBuffers( rofPID, &cplRof ); 00214 CHECKIERR( ierr, "cannot free buffers " ) 00215 } 00216 MPI_Finalize(); 00217 00218 return 0; 00219 }