MOAB: Mesh Oriented datABase
(version 5.4.1)
|
00001 /* 00002 * This imoab_coupler test will simulate coupling between 3 components 00003 * 3 meshes will be loaded from 3 files (atm, ocean, lnd), and they will be migrated to 00004 * all processors (coupler pes); then, intx will be performed between migrated meshes 00005 * and weights will be generated, such that a field from one component will be transferred to 00006 * the other component 00007 * currently, the atm will send some data to be projected to ocean and land components 00008 * 00009 * first, intersect atm and ocn, and recompute comm graph 1 between atm and atm_cx, for ocn intx 00010 * second, intersect atm and lnd, and recompute comm graph 2 between atm and atm_cx for lnd intx 00011 00012 */ 00013 00014 #include "moab/Core.hpp" 00015 #ifndef MOAB_HAVE_MPI 00016 #error mbtempest tool requires MPI configuration 00017 #endif 00018 00019 // MPI includes 00020 #include "moab_mpi.h" 00021 #include "moab/ParallelComm.hpp" 00022 #include "MBParallelConventions.h" 00023 00024 #include "moab/iMOAB.h" 00025 #include "TestUtil.hpp" 00026 #include "moab/CpuTimer.hpp" 00027 #include "moab/ProgOptions.hpp" 00028 #include <iostream> 00029 #include <sstream> 00030 00031 #include "imoab_coupler_utils.hpp" 00032 00033 using namespace moab; 00034 00035 //#define GRAPH_INFO 00036 00037 #ifndef MOAB_HAVE_TEMPESTREMAP 00038 #error The climate coupler test example requires MOAB configuration with TempestRemap 00039 #endif 00040 00041 #define ENABLE_ATMOCN_COUPLING 00042 #define ENABLE_ATMLND_COUPLING 00043 00044 #if( !defined( ENABLE_ATMOCN_COUPLING ) && !defined( ENABLE_ATMLND_COUPLING ) ) 00045 #error Enable either OCN (ENABLE_ATMOCN_COUPLING) and/or LND (ENABLE_ATMLND_COUPLING) for coupling 00046 #endif 00047 00048 int main( int argc, char* argv[] ) 00049 { 00050 int ierr; 00051 int rankInGlobalComm, numProcesses; 00052 MPI_Group jgroup; 00053 std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" ); 00054 std::string readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" ); 00055 00056 // Timer data 00057 moab::CpuTimer timer; 00058 double timer_ops; 00059 std::string opName; 00060 00061 int repartitioner_scheme = 0; 00062 #ifdef MOAB_HAVE_ZOLTAN 00063 repartitioner_scheme = 2; // use the graph partitioner in that caseS 00064 #endif 00065 00066 MPI_Init( &argc, &argv ); 00067 MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm ); 00068 MPI_Comm_size( MPI_COMM_WORLD, &numProcesses ); 00069 00070 MPI_Comm_group( MPI_COMM_WORLD, &jgroup ); // all processes in jgroup 00071 00072 std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m"; 00073 // on a regular case, 5 ATM, 6 CPLATM (ATMX), 17 OCN , 18 CPLOCN (OCNX) ; 00074 // intx atm/ocn is not in e3sm yet, give a number 00075 // 6 * 100+ 18 = 618 : atmocnid 00076 // 9 LND, 10 CPLLND 00077 // 6 * 100 + 10 = 610 atmlndid: 00078 // cmpatm is for atm on atm pes 00079 // cmpocn is for ocean, on ocean pe 00080 // cplatm is for atm on coupler pes 00081 // cplocn is for ocean on coupelr pes 00082 // atmocnid is for intx atm / ocn on coupler pes 00083 // 00084 int rankInAtmComm = -1; 00085 int cmpatm = 5, 00086 cplatm = 6; // component ids are unique over all pes, and established in advance; 00087 #ifdef ENABLE_ATMOCN_COUPLING 00088 std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m"; 00089 std::string baseline = TestDir + "unittest/baseline1.txt"; 00090 int rankInOcnComm = -1; 00091 int cmpocn = 17, cplocn = 18, 00092 atmocnid = 618; // component ids are unique over all pes, and established in advance; 00093 #endif 00094 00095 #ifdef ENABLE_ATMLND_COUPLING 00096 std::string lndFilename = TestDir + "unittest/wholeLnd.h5m"; 00097 int cpllnd = 10, cmplnd = 9, 00098 atmlndid = 610; // component ids are unique over all pes, and established in advance; 00099 #endif 00100 00101 int rankInCouComm = -1; 00102 00103 int nghlay = 0; // number of ghost layers for loading the file 00104 std::vector< int > groupTasks; 00105 int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1, 00106 endG3 = endG1; // Support launch of imoab_coupler test on any combo of 2*x processes 00107 int startG4 = startG1, endG4 = endG1; // these are for coupler layout 00108 int context_id = -1; // used now for freeing buffers 00109 00110 // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx 00111 // later, we need to compute weight matrix with tempestremap 00112 00113 ProgOptions opts; 00114 opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename ); 00115 #ifdef ENABLE_ATMOCN_COUPLING 00116 opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename ); 00117 #endif 00118 #ifdef ENABLE_ATMLND_COUPLING 00119 opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename ); 00120 #endif 00121 opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 ); 00122 opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 ); 00123 #ifdef ENABLE_ATMOCN_COUPLING 00124 opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 ); 00125 opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 ); 00126 #endif 00127 #ifdef ENABLE_ATMLND_COUPLING 00128 opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 ); 00129 opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 ); 00130 #endif 00131 00132 opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 ); 00133 opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 ); 00134 00135 opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme ); 00136 00137 int n = 1; // number of send/receive / project / send back cycles 00138 opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n ); 00139 00140 bool no_regression_test = false; 00141 opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test ); 00142 opts.parseCommandLine( argc, argv ); 00143 00144 char fileWriteOptions[] = "PARALLEL=WRITE_PART"; 00145 00146 if( !rankInGlobalComm ) 00147 { 00148 std::cout << " atm file: " << atmFilename << "\n on tasks : " << startG1 << ":" << endG1 << 00149 #ifdef ENABLE_ATMOCN_COUPLING 00150 "\n ocn file: " << ocnFilename << "\n on tasks : " << startG2 << ":" << endG2 << 00151 #endif 00152 #ifdef ENABLE_ATMLND_COUPLING 00153 "\n lnd file: " << lndFilename << "\n on tasks : " << startG3 << ":" << endG3 << 00154 #endif 00155 "\n partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n "; 00156 } 00157 00158 // load files on 3 different communicators, groups 00159 // first groups has task 0, second group tasks 0 and 1 00160 // coupler will be on joint tasks, will be on a third group (0 and 1, again) 00161 // first groups has task 0, second group tasks 0 and 1 00162 // coupler will be on joint tasks, will be on a third group (0 and 1, again) 00163 MPI_Group atmPEGroup; 00164 MPI_Comm atmComm; 00165 ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm ); 00166 CHECKIERR( ierr, "Cannot create atm MPI group and communicator " ) 00167 00168 #ifdef ENABLE_ATMOCN_COUPLING 00169 MPI_Group ocnPEGroup; 00170 MPI_Comm ocnComm; 00171 ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm ); 00172 CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " ) 00173 #endif 00174 00175 #ifdef ENABLE_ATMLND_COUPLING 00176 MPI_Group lndPEGroup; 00177 MPI_Comm lndComm; 00178 ierr = create_group_and_comm( startG3, endG3, jgroup, &lndPEGroup, &lndComm ); 00179 CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " ) 00180 #endif 00181 00182 // we will always have a coupler 00183 MPI_Group couPEGroup; 00184 MPI_Comm couComm; 00185 ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm ); 00186 CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " ) 00187 00188 // atm_coupler 00189 MPI_Group joinAtmCouGroup; 00190 MPI_Comm atmCouComm; 00191 ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm ); 00192 CHECKIERR( ierr, "Cannot create joint atm cou communicator" ) 00193 00194 #ifdef ENABLE_ATMOCN_COUPLING 00195 // ocn_coupler 00196 MPI_Group joinOcnCouGroup; 00197 MPI_Comm ocnCouComm; 00198 ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm ); 00199 CHECKIERR( ierr, "Cannot create joint ocn cou communicator" ) 00200 #endif 00201 00202 #ifdef ENABLE_ATMLND_COUPLING 00203 // lnd_coupler 00204 MPI_Group joinLndCouGroup; 00205 MPI_Comm lndCouComm; 00206 ierr = create_joint_comm_group( lndPEGroup, couPEGroup, &joinLndCouGroup, &lndCouComm ); 00207 CHECKIERR( ierr, "Cannot create joint ocn cou communicator" ) 00208 #endif 00209 00210 ierr = iMOAB_Initialize( argc, argv ); // not really needed anything from argc, argv, yet; maybe we should 00211 CHECKIERR( ierr, "Cannot initialize iMOAB" ) 00212 00213 int cmpAtmAppID = -1; 00214 iMOAB_AppID cmpAtmPID = &cmpAtmAppID; // atm 00215 int cplAtmAppID = -1; // -1 means it is not initialized 00216 iMOAB_AppID cplAtmPID = &cplAtmAppID; // atm on coupler PEs 00217 #ifdef ENABLE_ATMOCN_COUPLING 00218 int cmpOcnAppID = -1; 00219 iMOAB_AppID cmpOcnPID = &cmpOcnAppID; // ocn 00220 int cplOcnAppID = -1, cplAtmOcnAppID = -1; // -1 means it is not initialized 00221 iMOAB_AppID cplOcnPID = &cplOcnAppID; // ocn on coupler PEs 00222 iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID; // intx atm -ocn on coupler PEs 00223 #endif 00224 00225 #ifdef ENABLE_ATMLND_COUPLING 00226 int cmpLndAppID = -1; 00227 iMOAB_AppID cmpLndPID = &cmpLndAppID; // lnd 00228 int cplLndAppID = -1, cplAtmLndAppID = -1; // -1 means it is not initialized 00229 iMOAB_AppID cplLndPID = &cplLndAppID; // land on coupler PEs 00230 iMOAB_AppID cplAtmLndPID = &cplAtmLndAppID; // intx atm - lnd on coupler PEs 00231 #endif 00232 00233 if( couComm != MPI_COMM_NULL ) 00234 { 00235 MPI_Comm_rank( couComm, &rankInCouComm ); 00236 // Register all the applications on the coupler PEs 00237 ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm, 00238 cplAtmPID ); // atm on coupler pes 00239 CHECKIERR( ierr, "Cannot register ATM over coupler PEs" ) 00240 #ifdef ENABLE_ATMOCN_COUPLING 00241 ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn, 00242 cplOcnPID ); // ocn on coupler pes 00243 CHECKIERR( ierr, "Cannot register OCN over coupler PEs" ) 00244 #endif 00245 #ifdef ENABLE_ATMLND_COUPLING 00246 ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd, 00247 cplLndPID ); // lnd on coupler pes 00248 CHECKIERR( ierr, "Cannot register LND over coupler PEs" ) 00249 #endif 00250 } 00251 00252 if( atmComm != MPI_COMM_NULL ) 00253 { 00254 MPI_Comm_rank( atmComm, &rankInAtmComm ); 00255 ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID ); 00256 CHECKIERR( ierr, "Cannot register ATM App" ) 00257 } 00258 00259 #ifdef ENABLE_ATMOCN_COUPLING 00260 if( ocnComm != MPI_COMM_NULL ) 00261 { 00262 MPI_Comm_rank( ocnComm, &rankInOcnComm ); 00263 ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID ); 00264 CHECKIERR( ierr, "Cannot register OCN App" ) 00265 } 00266 #endif 00267 00268 // atm 00269 ierr = 00270 setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm, 00271 &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme ); 00272 CHECKIERR( ierr, "Cannot load and migrate atm mesh" ) 00273 #ifdef VERBOSE 00274 if( couComm != MPI_COMM_NULL && 1 == n ) 00275 { // write only for n==1 case 00276 char outputFileTgt3[] = "recvAtm.h5m"; 00277 ierr = iMOAB_WriteMesh( cplAtmPID, outputFileTgt3, fileWriteOptions, strlen( outputFileTgt3 ), 00278 strlen( fileWriteOptions ) ); 00279 CHECKIERR( ierr, "cannot write atm mesh after receiving" ) 00280 } 00281 #endif 00282 #ifdef GRAPH_INFO 00283 if( atmComm != MPI_COMM_NULL ) 00284 { 00285 00286 int is_sender = 1; 00287 int context = -1; 00288 iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmMigS" ); 00289 } 00290 if( couComm != MPI_COMM_NULL ) 00291 { 00292 int is_sender = 0; 00293 int context = -1; 00294 iMOAB_DumpCommGraph( cplAtmPID, &context, &is_sender, "AtmMigR" ); 00295 } 00296 #endif 00297 MPI_Barrier( MPI_COMM_WORLD ); 00298 00299 #ifdef ENABLE_ATMOCN_COUPLING 00300 // ocean 00301 ierr = 00302 setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm, 00303 &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme ); 00304 CHECKIERR( ierr, "Cannot load and migrate ocn mesh" ) 00305 00306 MPI_Barrier( MPI_COMM_WORLD ); 00307 00308 #ifdef VERBOSE 00309 if( couComm != MPI_COMM_NULL && 1 == n ) 00310 { // write only for n==1 case 00311 char outputFileTgt3[] = "recvOcn.h5m"; 00312 ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions ); 00313 CHECKIERR( ierr, "cannot write ocn mesh after receiving" ) 00314 } 00315 #endif 00316 #endif // #ifdef ENABLE_ATMOCN_COUPLING 00317 00318 #ifdef ENABLE_ATMLND_COUPLING 00319 // land 00320 if( lndComm != MPI_COMM_NULL ) 00321 { 00322 ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID ); 00323 CHECKIERR( ierr, "Cannot register LND App " ) 00324 } 00325 ierr = setup_component_coupler_meshes( cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm, &lndPEGroup, &couComm, 00326 &couPEGroup, &lndCouComm, lndFilename, readoptsLnd, nghlay, 00327 repartitioner_scheme ); 00328 00329 if( couComm != MPI_COMM_NULL && 1 == n ) 00330 { // write only for n==1 case 00331 char outputFileLnd[] = "recvLnd.h5m"; 00332 ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions ); 00333 CHECKIERR( ierr, "cannot write lnd mesh after receiving" ) 00334 } 00335 00336 #endif // #ifdef ENABLE_ATMLND_COUPLING 00337 00338 MPI_Barrier( MPI_COMM_WORLD ); 00339 00340 #ifdef ENABLE_ATMOCN_COUPLING 00341 if( couComm != MPI_COMM_NULL ) 00342 { 00343 // now compute intersection between OCNx and ATMx on coupler PEs 00344 ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID ); 00345 CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " ) 00346 } 00347 #endif 00348 #ifdef ENABLE_ATMLND_COUPLING 00349 if( couComm != MPI_COMM_NULL ) 00350 { 00351 // now compute intersection between LNDx and ATMx on coupler PEs 00352 ierr = iMOAB_RegisterApplication( "ATMLND", &couComm, &atmlndid, cplAtmLndPID ); 00353 CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " ) 00354 } 00355 #endif 00356 00357 int disc_orders[3] = { 4, 1, 1 }; 00358 const std::string weights_identifiers[2] = { "scalar", "scalar-pc" }; 00359 const std::string disc_methods[3] = { "cgll", "fv", "pcloud" }; 00360 const std::string dof_tag_names[3] = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" }; 00361 #ifdef ENABLE_ATMOCN_COUPLING 00362 if( couComm != MPI_COMM_NULL ) 00363 { 00364 PUSH_TIMER( "Compute ATM-OCN mesh intersection" ) 00365 ierr = iMOAB_ComputeMeshIntersectionOnSphere( 00366 cplAtmPID, cplOcnPID, 00367 cplAtmOcnPID ); // coverage mesh was computed here, for cplAtmPID, atm on coupler pes 00368 // basically, atm was redistributed according to target (ocean) partition, to "cover" the 00369 // ocean partitions check if intx valid, write some h5m intx file 00370 CHECKIERR( ierr, "cannot compute intersection" ) 00371 POP_TIMER( couComm, rankInCouComm ) 00372 #ifdef VERBOSE 00373 char prefix[] = "intx_atmocn"; 00374 ierr = iMOAB_WriteLocalMesh( cplAtmOcnPID, prefix, strlen( prefix ) ); 00375 CHECKIERR( ierr, "failed to write local intx mesh" ); 00376 #endif 00377 } 00378 00379 if( atmCouComm != MPI_COMM_NULL ) 00380 { 00381 00382 // the new graph will be for sending data from atm comp to coverage mesh; 00383 // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID 00384 // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh 00385 // after this, the sending of tags from atm pes to coupler pes will use the new par comm 00386 // graph, that has more precise info about what to send for ocean cover ; every time, we 00387 // will 00388 // use the element global id, which should uniquely identify the element 00389 PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" ) 00390 ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm, 00391 &cplocn ); // it happens over joint communicator 00392 CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" ) 00393 POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank 00394 } 00395 #endif 00396 00397 #ifdef ENABLE_ATMLND_COUPLING 00398 if( couComm != MPI_COMM_NULL ) 00399 { 00400 PUSH_TIMER( "Compute ATM-LND mesh intersection" ) 00401 ierr = iMOAB_ComputePointDoFIntersection( cplAtmPID, cplLndPID, cplAtmLndPID ); 00402 CHECKIERR( ierr, "failed to compute point-cloud mapping" ); 00403 POP_TIMER( couComm, rankInCouComm ) 00404 } 00405 if( atmCouComm != MPI_COMM_NULL ) 00406 { 00407 // the new graph will be for sending data from atm comp to coverage mesh for land mesh; 00408 // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID 00409 // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh 00410 // after this, the sending of tags from atm pes to coupler pes will use the new par comm 00411 // graph, that has more precise info about what to send (specifically for land cover); every 00412 // time, 00413 /// we will use the element global id, which should uniquely identify the element 00414 PUSH_TIMER( "Compute LND coverage graph for ATM mesh" ) 00415 ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmLndPID, &cmpatm, &cplatm, 00416 &cpllnd ); // it happens over joint communicator 00417 CHECKIERR( ierr, "cannot recompute direct coverage graph for land" ) 00418 POP_TIMER( atmCouComm, rankInAtmComm ) // hijack this rank 00419 } 00420 #endif 00421 00422 MPI_Barrier( MPI_COMM_WORLD ); 00423 00424 int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0; 00425 00426 #ifdef ENABLE_ATMOCN_COUPLING 00427 #ifdef VERBOSE 00428 if( couComm != MPI_COMM_NULL && 1 == n ) 00429 { // write only for n==1 case 00430 char serialWriteOptions[] = ""; // for writing in serial 00431 std::stringstream outf; 00432 outf << "intxAtmOcn_" << rankInCouComm << ".h5m"; 00433 std::string intxfile = outf.str(); // write in serial the intx file, for debugging 00434 ierr = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions ); 00435 CHECKIERR( ierr, "cannot write intx file result" ) 00436 } 00437 #endif 00438 00439 if( couComm != MPI_COMM_NULL ) 00440 { 00441 PUSH_TIMER( "Compute the projection weights with TempestRemap" ) 00442 ierr = 00443 iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), disc_methods[0].c_str(), 00444 &disc_orders[0], disc_methods[1].c_str(), &disc_orders[1], &fNoBubble, 00445 &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap, &fNoConserve, 00446 &fValidate, dof_tag_names[0].c_str(), dof_tag_names[1].c_str() ); 00447 CHECKIERR( ierr, "cannot compute scalar projection weights" ) 00448 POP_TIMER( couComm, rankInCouComm ) 00449 00450 // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB 00451 #ifdef MOAB_HAVE_NETCDF 00452 { 00453 const std::string atmocn_map_file_name = "atm_ocn_map.nc"; 00454 ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(), 00455 atmocn_map_file_name.c_str() ); 00456 CHECKIERR( ierr, "failed to write map file to disk" ); 00457 00458 const std::string intx_from_file_identifier = "map-from-file"; 00459 int dummyCpl = -1; 00460 int dummy_rowcol = -1; 00461 int dummyType = 0; 00462 ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType, 00463 intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() ); 00464 CHECKIERR( ierr, "failed to load map file from disk" ); 00465 } 00466 #endif 00467 } 00468 00469 #endif 00470 00471 MPI_Barrier( MPI_COMM_WORLD ); 00472 00473 #ifdef ENABLE_ATMLND_COUPLING 00474 if( couComm != MPI_COMM_NULL ) 00475 { 00476 fValidate = 0; 00477 /* Compute the weights to preoject the solution from ATM component to LND compoenent */ 00478 PUSH_TIMER( "Compute ATM-LND remapping weights" ) 00479 ierr = 00480 iMOAB_ComputeScalarProjectionWeights( cplAtmLndPID, weights_identifiers[1].c_str(), disc_methods[0].c_str(), 00481 &disc_orders[0], disc_methods[2].c_str(), &disc_orders[2], &fNoBubble, 00482 &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap, &fNoConserve, 00483 &fValidate, dof_tag_names[0].c_str(), dof_tag_names[2].c_str() ); 00484 CHECKIERR( ierr, "failed to compute remapping projection weights for ATM-LND scalar " 00485 "non-conservative field" ); 00486 POP_TIMER( couComm, rankInCouComm ) 00487 00488 // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB 00489 // VSM: TODO: This does not work since the LND model is a point cloud and we do not initilize 00490 // data correctly in TempestOnlineMap::WriteParallelWeightsToFile routine. 00491 // { 00492 // const char* atmlnd_map_file_name = "atm_lnd_map.nc"; 00493 // ierr = iMOAB_WriteMappingWeightsToFile( cplAtmLndPID, weights_identifiers[1], atmlnd_map_file_name ); 00494 // CHECKIERR( ierr, "failed to write map file to disk" ); 00495 00496 // const char* intx_from_file_identifier = "map-from-file"; 00497 // ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmLndPID, intx_from_file_identifier, atmlnd_map_file_name, 00498 // NULL, NULL, NULL, 00499 // ); 00500 // CHECKIERR( ierr, "failed to load map file from disk" ); 00501 // } 00502 } 00503 #endif 00504 00505 int tagIndex[2]; 00506 int tagTypes[2] = { DENSE_DOUBLE, DENSE_DOUBLE }; 00507 int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/; 00508 00509 const char* bottomFields = "a2oTbot:a2oUbot:a2oVbot"; 00510 const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj"; 00511 00512 if( couComm != MPI_COMM_NULL ) 00513 { 00514 ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] ); 00515 CHECKIERR( ierr, "failed to define the field tag a2oTbot" ); 00516 #ifdef ENABLE_ATMOCN_COUPLING 00517 ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] ); 00518 CHECKIERR( ierr, "failed to define the field tag a2oTbot_proj" ); 00519 #endif 00520 } 00521 00522 // need to make sure that the coverage mesh (created during intx method) received the tag that 00523 // need to be projected to target so far, the coverage mesh has only the ids and global dofs; 00524 // need to change the migrate method to accommodate any GLL tag 00525 // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh 00526 // (cplAtmPID), using the new coverage graph communicator 00527 00528 // make the tag 0, to check we are actually sending needed data 00529 { 00530 if( cplAtmAppID >= 0 ) 00531 { 00532 int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3]; 00533 /* 00534 * Each process in the communicator will have access to a local mesh instance, which 00535 * will contain the original cells in the local partition and ghost entities. Number of 00536 * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary 00537 * conditions will be returned in numProcesses 3 arrays, for local, ghost and total 00538 * numbers. 00539 */ 00540 ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc ); 00541 CHECKIERR( ierr, "failed to get num primary elems" ); 00542 int numAllElem = nelem[2]; 00543 std::vector< double > vals; 00544 int storLeng = atmCompNDoFs * numAllElem * 3; // 3 tags 00545 int eetype = 1; 00546 00547 vals.resize( storLeng ); 00548 for( int k = 0; k < storLeng; k++ ) 00549 vals[k] = 0.; 00550 00551 ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] ); 00552 CHECKIERR( ierr, "cannot make tag nul" ) 00553 // set the tag to 0 00554 } 00555 } 00556 00557 const char* concat_fieldname = "a2oTbot:a2oUbot:a2oVbot"; 00558 const char* concat_fieldnameT = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj"; 00559 00560 // start a virtual loop for number of iterations 00561 for( int iters = 0; iters < n; iters++ ) 00562 { 00563 #ifdef ENABLE_ATMOCN_COUPLING 00564 PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" ) 00565 if( atmComm != MPI_COMM_NULL ) 00566 { 00567 // as always, use nonblocking sends 00568 // this is for projection to ocean: 00569 ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn ); 00570 CHECKIERR( ierr, "cannot send tag values" ) 00571 #ifdef GRAPH_INFO 00572 int is_sender = 1; 00573 int context = cplocn; 00574 iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnS" ); 00575 #endif 00576 } 00577 if( couComm != MPI_COMM_NULL ) 00578 { 00579 // receive on atm on coupler pes, that was redistributed according to coverage 00580 ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn ); 00581 CHECKIERR( ierr, "cannot receive tag values" ) 00582 #ifdef GRAPH_INFO 00583 int is_sender = 0; 00584 int context = cplocn; // the same context, cplocn 00585 iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnR" ); 00586 #endif 00587 } 00588 POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm ) 00589 00590 // we can now free the sender buffers 00591 if( atmComm != MPI_COMM_NULL ) 00592 { 00593 ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn ); // context is for ocean 00594 CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" ) 00595 } 00596 #ifdef VERBOSE 00597 if( couComm != MPI_COMM_NULL && 1 == n ) 00598 { 00599 // write only for n==1 case 00600 char outputFileRecvd[] = "recvAtmCoupOcn.h5m"; 00601 ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions ); 00602 CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" ) 00603 } 00604 #endif 00605 00606 if( couComm != MPI_COMM_NULL ) 00607 { 00608 /* We have the remapping weights now. Let us apply the weights onto the tag we defined 00609 on the source mesh and get the projection on the target mesh */ 00610 PUSH_TIMER( "Apply Scalar projection weights" ) 00611 ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), concat_fieldname, 00612 concat_fieldnameT ); 00613 CHECKIERR( ierr, "failed to compute projection weight application" ); 00614 POP_TIMER( couComm, rankInCouComm ) 00615 if( 1 == n ) // write only for n==1 case 00616 { 00617 char outputFileTgt[] = "fOcnOnCpl.h5m"; 00618 ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions ); 00619 CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" ) 00620 } 00621 } 00622 00623 // send the projected tag back to ocean pes, with send/receive tag 00624 if( ocnComm != MPI_COMM_NULL ) 00625 { 00626 int tagIndexIn2; 00627 ierr = 00628 iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 ); 00629 CHECKIERR( ierr, "failed to define the field tag for receiving back the tags " 00630 "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" ); 00631 } 00632 // send the tag to ocean pes, from ocean mesh on coupler pes 00633 // from couComm, using common joint comm ocn_coupler 00634 // as always, use nonblocking sends 00635 // original graph (context is -1_ 00636 if( couComm != MPI_COMM_NULL ) 00637 { 00638 // need to use ocean comp id for context 00639 context_id = cmpocn; // id for ocean on comp 00640 ierr = 00641 iMOAB_SendElementTag( cplOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm, &context_id ); 00642 CHECKIERR( ierr, "cannot send tag values back to ocean pes" ) 00643 } 00644 00645 // receive on component 2, ocean 00646 if( ocnComm != MPI_COMM_NULL ) 00647 { 00648 context_id = cplocn; // id for ocean on coupler 00649 ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm, 00650 &context_id ); 00651 CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" ) 00652 } 00653 00654 MPI_Barrier( MPI_COMM_WORLD ); 00655 00656 if( couComm != MPI_COMM_NULL ) 00657 { 00658 context_id = cmpocn; 00659 ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id ); 00660 CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" ) 00661 } 00662 if( ocnComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case 00663 { 00664 char outputFileOcn[] = "OcnWithProj.h5m"; 00665 ierr = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions ); 00666 CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" ) 00667 // test results only for n == 1, for bottomTempProjectedField 00668 if( !no_regression_test ) 00669 { 00670 // the same as remap test 00671 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file 00672 // first get GlobalIds from ocn, and fields: 00673 int nverts[3], nelem[3]; 00674 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 ); 00675 CHECKIERR( ierr, "failed to get ocn mesh info" ); 00676 std::vector< int > gidElems; 00677 gidElems.resize( nelem[2] ); 00678 std::vector< double > tempElems; 00679 tempElems.resize( nelem[2] ); 00680 // get global id storage 00681 const std::string GidStr = "GLOBAL_ID"; // hard coded too 00682 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0; 00683 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd ); 00684 CHECKIERR( ierr, "failed to define global id tag" ); 00685 00686 int ent_type = 1; 00687 ierr = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] ); 00688 CHECKIERR( ierr, "failed to get global ids" ); 00689 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] ); 00690 CHECKIERR( ierr, "failed to get temperature field" ); 00691 int err_code = 1; 00692 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code ); 00693 if( 0 == err_code ) 00694 std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n"; 00695 } 00696 } 00697 #endif 00698 00699 #ifdef ENABLE_ATMLND_COUPLING 00700 // start land proj: 00701 PUSH_TIMER( "Send/receive data from component atm to coupler, in land context" ) 00702 if( atmComm != MPI_COMM_NULL ) 00703 { 00704 // as always, use nonblocking sends 00705 // this is for projection to land: 00706 ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cpllnd ); 00707 CHECKIERR( ierr, "cannot send tag values" ) 00708 } 00709 if( couComm != MPI_COMM_NULL ) 00710 { 00711 // receive on atm on coupler pes, that was redistributed according to coverage, for land 00712 // context 00713 ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cpllnd ); 00714 CHECKIERR( ierr, "cannot receive tag values" ) 00715 } 00716 POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm ) 00717 00718 // we can now free the sender buffers 00719 if( atmComm != MPI_COMM_NULL ) 00720 { 00721 ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cpllnd ); 00722 CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh " 00723 "for land context" ) 00724 } 00725 #ifdef VERBOSE 00726 if( couComm != MPI_COMM_NULL && 1 == n ) 00727 { // write only for n==1 case 00728 char outputFileRecvd[] = "recvAtmCoupLnd.h5m"; 00729 ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions ); 00730 CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" ) 00731 } 00732 #endif 00733 00734 /* We have the remapping weights now. Let us apply the weights onto the tag we defined 00735 on the source mesh and get the projection on the target mesh */ 00736 if( couComm != MPI_COMM_NULL ) 00737 { 00738 PUSH_TIMER( "Apply Scalar projection weights for land" ) 00739 ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmLndPID, weights_identifiers[1].c_str(), concat_fieldname, 00740 concat_fieldnameT ); 00741 CHECKIERR( ierr, "failed to compute projection weight application" ); 00742 POP_TIMER( couComm, rankInCouComm ) 00743 } 00744 00745 #ifdef VERBOSE 00746 if( couComm != MPI_COMM_NULL && 1 == n ) 00747 { // write only for n==1 case 00748 char outputFileTgtLnd[] = "fLndOnCpl.h5m"; 00749 ierr = iMOAB_WriteMesh( cplLndPID, outputFileTgtLnd, fileWriteOptions ); 00750 CHECKIERR( ierr, "could not write fLndOnCpl.h5m to disk" ) 00751 } 00752 #endif 00753 00754 // end land proj 00755 // send the tags back to land pes, from land mesh on coupler pes 00756 // send from cplLndPID to cmpLndPID, using common joint comm 00757 // as always, use nonblocking sends 00758 // original graph 00759 // int context_id = -1; 00760 // the land might not have these tags yet; it should be a different name for land 00761 // in e3sm we do have different names 00762 if( lndComm != MPI_COMM_NULL ) 00763 { 00764 int tagIndexIn2; 00765 ierr = 00766 iMOAB_DefineTagStorage( cmpLndPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 ); 00767 CHECKIERR( ierr, "failed to define the field tag for receiving back the tag " 00768 "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on lnd pes" ); 00769 } 00770 if( couComm != MPI_COMM_NULL ) 00771 { 00772 context_id = cmplnd; // land comp id 00773 ierr = 00774 iMOAB_SendElementTag( cplLndPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &lndCouComm, &context_id ); 00775 CHECKIERR( ierr, "cannot send tag values back to land pes" ) 00776 } 00777 // receive on component 3, land 00778 if( lndComm != MPI_COMM_NULL ) 00779 { 00780 context_id = cpllnd; // land on coupler id 00781 ierr = iMOAB_ReceiveElementTag( cmpLndPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &lndCouComm, 00782 &context_id ); 00783 CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" ) 00784 } 00785 00786 MPI_Barrier( MPI_COMM_WORLD ); 00787 if( couComm != MPI_COMM_NULL ) 00788 { 00789 context_id = cmplnd; 00790 ierr = iMOAB_FreeSenderBuffers( cplLndPID, &context_id ); 00791 CHECKIERR( ierr, "cannot free buffers used to resend land tag towards the coverage mesh " 00792 "for atm context" ) 00793 } 00794 if( lndComm != MPI_COMM_NULL && 1 == n ) // write only for n==1 case 00795 { 00796 char outputFileLnd[] = "LndWithProj.h5m"; 00797 ierr = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions ); 00798 CHECKIERR( ierr, "could not write LndWithProj.h5m to disk" ) 00799 } 00800 #endif // ENABLE_ATMLND_COUPLING 00801 00802 } // end loop iterations n 00803 #ifdef ENABLE_ATMLND_COUPLING 00804 if( lndComm != MPI_COMM_NULL ) 00805 { 00806 ierr = iMOAB_DeregisterApplication( cmpLndPID ); 00807 CHECKIERR( ierr, "cannot deregister app LND1" ) 00808 } 00809 #endif // ENABLE_ATMLND_COUPLING 00810 00811 #ifdef ENABLE_ATMOCN_COUPLING 00812 if( couComm != MPI_COMM_NULL ) 00813 { 00814 ierr = iMOAB_DeregisterApplication( cplAtmOcnPID ); 00815 CHECKIERR( ierr, "cannot deregister app intx AO" ) 00816 } 00817 if( ocnComm != MPI_COMM_NULL ) 00818 { 00819 ierr = iMOAB_DeregisterApplication( cmpOcnPID ); 00820 CHECKIERR( ierr, "cannot deregister app OCN1" ) 00821 } 00822 #endif // ENABLE_ATMOCN_COUPLING 00823 00824 if( atmComm != MPI_COMM_NULL ) 00825 { 00826 ierr = iMOAB_DeregisterApplication( cmpAtmPID ); 00827 CHECKIERR( ierr, "cannot deregister app ATM1" ) 00828 } 00829 00830 #ifdef ENABLE_ATMLND_COUPLING 00831 if( couComm != MPI_COMM_NULL ) 00832 { 00833 ierr = iMOAB_DeregisterApplication( cplLndPID ); 00834 CHECKIERR( ierr, "cannot deregister app LNDX" ) 00835 } 00836 #endif // ENABLE_ATMLND_COUPLING 00837 00838 #ifdef ENABLE_ATMOCN_COUPLING 00839 if( couComm != MPI_COMM_NULL ) 00840 { 00841 ierr = iMOAB_DeregisterApplication( cplOcnPID ); 00842 CHECKIERR( ierr, "cannot deregister app OCNX" ) 00843 } 00844 #endif // ENABLE_ATMOCN_COUPLING 00845 00846 if( couComm != MPI_COMM_NULL ) 00847 { 00848 ierr = iMOAB_DeregisterApplication( cplAtmPID ); 00849 CHECKIERR( ierr, "cannot deregister app ATMX" ) 00850 } 00851 00852 //#endif 00853 ierr = iMOAB_Finalize(); 00854 CHECKIERR( ierr, "did not finalize iMOAB" ) 00855 00856 // free atm coupler group and comm 00857 if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm ); 00858 MPI_Group_free( &joinAtmCouGroup ); 00859 if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm ); 00860 00861 #ifdef ENABLE_ATMOCN_COUPLING 00862 if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm ); 00863 // free ocn - coupler group and comm 00864 if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm ); 00865 MPI_Group_free( &joinOcnCouGroup ); 00866 #endif 00867 00868 #ifdef ENABLE_ATMLND_COUPLING 00869 if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm ); 00870 // free land - coupler group and comm 00871 if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm ); 00872 MPI_Group_free( &joinLndCouGroup ); 00873 #endif 00874 00875 if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm ); 00876 00877 MPI_Group_free( &atmPEGroup ); 00878 #ifdef ENABLE_ATMOCN_COUPLING 00879 MPI_Group_free( &ocnPEGroup ); 00880 #endif 00881 #ifdef ENABLE_ATMLND_COUPLING 00882 MPI_Group_free( &lndPEGroup ); 00883 #endif 00884 MPI_Group_free( &couPEGroup ); 00885 MPI_Group_free( &jgroup ); 00886 00887 MPI_Finalize(); 00888 00889 return 0; 00890 }