MOAB: Mesh Oriented datABase  (version 5.4.1)
imoab_phatm_ocn_coupler.cpp
Go to the documentation of this file.
00001 /**
00002  * imoab_phAtm_ocn_coupler.cpp
00003  *
00004  *  This imoab_phAtm_ocn_coupler test will simulate coupling between 3 components
00005  *  meshes will be loaded from 3 files (atm, ocean, and phys atm), and
00006  *  Atm and ocn will be migrated to coupler pes and compute intx on them
00007  *  then, intx will be performed between migrated meshes
00008  * and weights will be generated, such that a field from phys atm will be transferred to ocn
00009  * currently, the phys atm will send some data to be projected to ocean
00010  *
00011  * first, intersect atm and ocn, and recompute comm graph 1 between atm phys and atm_cx, for ocn
00012  * intx maybe repeat the same for land
00013  */
00014 
00015 #include "moab/Core.hpp"
00016 #ifndef MOAB_HAVE_MPI
00017 #error mbtempest tool requires MPI configuration
00018 #endif
00019 
00020 // MPI includes
00021 #include "moab_mpi.h"
00022 #include "moab/ParallelComm.hpp"
00023 #include "MBParallelConventions.h"
00024 
00025 #include "moab/iMOAB.h"
00026 #include "TestUtil.hpp"
00027 #include "moab/CpuTimer.hpp"
00028 #include "moab/ProgOptions.hpp"
00029 #include <iostream>
00030 #include <sstream>
00031 
00032 #include "imoab_coupler_utils.hpp"
00033 
00034 using namespace moab;
00035 
00036 //#define VERBOSE
00037 
00038 #ifndef MOAB_HAVE_TEMPESTREMAP
00039 #error The climate coupler test example requires MOAB configuration with TempestRemap
00040 #endif
00041 
00042 #define ENABLE_ATMOCN_COUPLING
00043 // for land coupling with phys atm, we do not have to "compute" intersection
00044 // it is enough to know that the ids are the same, we can project from atm to land that way, using a
00045 // computed graph
00046 #define ENABLE_ATMLND_COUPLING
00047 
00048 int main( int argc, char* argv[] )
00049 {
00050     int ierr;
00051     int rankInGlobalComm, numProcesses;
00052     MPI_Group jgroup;
00053     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00054     std::string readoptsPhysAtm( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
00055 
00056     // Timer data
00057     moab::CpuTimer timer;
00058     double timer_ops;
00059     std::string opName;
00060 
00061     int repartitioner_scheme = 0;
00062 #ifdef MOAB_HAVE_ZOLTAN
00063     repartitioner_scheme = 2;  // use the graph partitioner in that case
00064 #endif
00065 
00066     MPI_Init( &argc, &argv );
00067     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00068     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00069 
00070     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00071 
00072     std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";  // we should use only mesh from here
00073     std::string atmPhysMesh =
00074         TestDir + "unittest/AtmPhys_01.h5m";  // it has some data associated to vertices, T_ph, u_ph, v_ph
00075     // we will eventually project that data to ocean mesh, after intx atm/ocn
00076 
00077     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00078     // intx atm/ocn is not in e3sm yet, give a number
00079     //   6 * 100+ 18 = 618 : atmocnid
00080     // 9 LND, 10 CPLLND
00081     //   6 * 100 + 10 = 610  atmlndid:
00082     // cmpatm is for atm on atm pes
00083     // cmpocn is for ocean, on ocean pe
00084     // cplatm is for atm on coupler pes
00085     // cplocn is for ocean on coupler pes
00086     // atmocnid is for intx atm / ocn on coupler pes
00087     //
00088 
00089     int cmpatm     = 5,
00090         cplatm     = 6;    // component ids are unique over all pes, and established in advance;
00091     int cmpPhysAtm = 105;  // different from atm spectral ?
00092 #ifdef ENABLE_ATMOCN_COUPLING
00093     std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
00094     int rankInOcnComm       = -1;
00095     int cmpocn = 17, cplocn = 18,
00096         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00097 #endif
00098 #ifdef ENABLE_ATMLND_COUPLING
00099     std::string lndFilename = TestDir + "unittest/wholeLnd.h5m";
00100     int cpllnd              = 10,
00101         cmplnd              = 9;  // component ids are unique over all pes, and established in advance;
00102 #endif
00103 
00104     int rankInCouComm = -1;
00105 
00106     int nghlay = 0;  // number of ghost layers for loading the file
00107 
00108     int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1, endG3 = endG1;
00109     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00110     int context_id = -1;                   // used now for freeing buffers
00111 
00112     // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
00113     // later, we need to compute weight matrix with tempestremap
00114 
00115     ProgOptions opts;
00116     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00117 #ifdef ENABLE_ATMOCN_COUPLING
00118     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00119 #endif
00120 
00121 #ifdef ENABLE_ATMLND_COUPLING
00122     opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename );
00123 #endif
00124 
00125     opts.addOpt< std::string >( "physgrid,q", "physics grid file", &atmPhysMesh );
00126 
00127     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00128     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00129 #ifdef ENABLE_ATMOCN_COUPLING
00130     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00131     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00132 #endif
00133 
00134 #ifdef ENABLE_ATMLND_COUPLING
00135     opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 );
00136     opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 );
00137 #endif
00138 
00139     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00140     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00141 
00142     opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
00143 
00144     opts.parseCommandLine( argc, argv );
00145 
00146     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00147 
00148     if( !rankInGlobalComm )
00149     {
00150         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1 <<
00151 #ifdef ENABLE_ATMOCN_COUPLING
00152             "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2 <<
00153 #endif
00154 #ifdef ENABLE_ATMLND_COUPLING
00155             "\n land file: " << lndFilename << "\n     on tasks : " << startG3 << ":" << endG3 <<
00156 #endif
00157 
00158             "\n atm phys file: " << atmPhysMesh << "\n     on tasks : " << startG1 << ":" << endG1 <<
00159 
00160             "\n  partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n  ";
00161     }
00162 
00163     // load files on 3 different communicators, groups
00164     // first groups has task 0, second group tasks 0 and 1
00165     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00166     MPI_Group atmPEGroup;
00167     MPI_Comm atmComm;
00168     ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
00169     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00170 
00171 #ifdef ENABLE_ATMOCN_COUPLING
00172     MPI_Group ocnPEGroup;
00173     MPI_Comm ocnComm;
00174     ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
00175     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00176 #endif
00177 
00178 #ifdef ENABLE_ATMLND_COUPLING
00179     MPI_Group lndPEGroup;
00180     MPI_Comm lndComm;
00181     ierr = create_group_and_comm( startG3, endG3, jgroup, &lndPEGroup, &lndComm );
00182     CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " )
00183 #endif
00184 
00185     // we will always have a coupler
00186     MPI_Group couPEGroup;
00187     MPI_Comm couComm;
00188     ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
00189     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00190 
00191     // now, create the joint communicators atm_coupler, ocn_coupler, lnd_coupler
00192     // for each, we will have to create the group first, then the communicator
00193 
00194     // atm_coupler
00195     MPI_Group joinAtmCouGroup;
00196     MPI_Comm atmCouComm;
00197     ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
00198     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00199 
00200 #ifdef ENABLE_ATMOCN_COUPLING
00201     // ocn_coupler
00202     MPI_Group joinOcnCouGroup;
00203     MPI_Comm ocnCouComm;
00204     ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
00205     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00206 #endif
00207 
00208 #ifdef ENABLE_ATMLND_COUPLING
00209     // lnd_coupler
00210     MPI_Group joinLndCouGroup;
00211     MPI_Comm lndCouComm;
00212     ierr = create_joint_comm_group( lndPEGroup, couPEGroup, &joinLndCouGroup, &lndCouComm );
00213     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00214 #endif
00215 
00216     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00217     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00218 
00219     int cmpAtmAppID       = -1;
00220     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00221     int cplAtmAppID       = -1;            // -1 means it is not initialized
00222     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00223 
00224 #ifdef ENABLE_ATMOCN_COUPLING
00225     int cmpOcnAppID       = -1;
00226     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00227     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00228     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00229     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00230 #endif
00231 
00232 #ifdef ENABLE_ATMLND_COUPLING
00233     int cmpLndAppID       = -1;
00234     iMOAB_AppID cmpLndPID = &cmpLndAppID;  // lnd
00235     int cplLndAppID       = -1;            //, cplAtmLndAppID=-1;// -1 means it is not initialized
00236     iMOAB_AppID cplLndPID = &cplLndAppID;  // land on coupler PEs
00237     // iMOAB_AppID cplAtmLndPID=&cplAtmLndAppID; // intx atm - lnd on coupler PEs not needed anymore
00238 #endif
00239 
00240     int cmpPhysAtmID        = -1;
00241     iMOAB_AppID cmpPhAtmPID = &cmpPhysAtmID;  // phys atm; we do not need to move it to cpl!
00242 
00243     if( couComm != MPI_COMM_NULL )
00244     {
00245         MPI_Comm_rank( couComm, &rankInCouComm );
00246         // Register all the applications on the coupler PEs
00247         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00248                                           cplAtmPID );  // atm on coupler pes
00249         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00250 #ifdef ENABLE_ATMOCN_COUPLING
00251         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00252                                           cplOcnPID );  // ocn on coupler pes
00253         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00254 #endif
00255 
00256 #ifdef ENABLE_ATMLND_COUPLING
00257         ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd,
00258                                           cplLndPID );  // lnd on coupler pes
00259         CHECKIERR( ierr, "Cannot register LND over coupler PEs" )
00260 #endif
00261     }
00262 
00263     if( atmCouComm != MPI_COMM_NULL )
00264     {
00265         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00266         CHECKIERR( ierr, "Cannot register ATM App" )
00267     }
00268 
00269 #ifdef ENABLE_ATMOCN_COUPLING
00270     if( ocnComm != MPI_COMM_NULL )
00271     {
00272         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00273         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00274         CHECKIERR( ierr, "Cannot register OCN App" )
00275     }
00276 #endif
00277 
00278     // atm
00279     ierr =
00280         setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
00281                                         &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
00282     CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
00283 
00284     MPI_Barrier( MPI_COMM_WORLD );
00285 
00286 #ifdef ENABLE_ATMOCN_COUPLING
00287     // ocean
00288     ierr =
00289         setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
00290                                         &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
00291     CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
00292 #ifdef VERBOSE
00293     if( couComm != MPI_COMM_NULL )
00294     {
00295         char outputFileTgt3[] = "recvOcn2.h5m";
00296         ierr                  = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
00297         CHECKIERR( ierr, "cannot write ocn mesh after receiving" )
00298     }
00299 #endif
00300 
00301 #endif
00302 
00303     MPI_Barrier( MPI_COMM_WORLD );
00304 
00305     // load phys atm mesh, with some data on it already
00306     if( atmComm != MPI_COMM_NULL )
00307     {
00308         ierr = iMOAB_RegisterApplication( "PhysAtm", &atmComm, &cmpPhysAtm, cmpPhAtmPID );
00309         CHECKIERR( ierr, "Cannot register Phys Atm App " )
00310 
00311         // load the next component mesh
00312         ierr = iMOAB_LoadMesh( cmpPhAtmPID, atmPhysMesh.c_str(), readoptsPhysAtm.c_str(), &nghlay );
00313         CHECKIERR( ierr, "Cannot load Atm Phys  mesh on atm pes" )
00314 
00315         int nverts[3], nelem[3];
00316         ierr = iMOAB_GetMeshInfo( cmpPhAtmPID, nverts, nelem, 0, 0, 0 );
00317         CHECKIERR( ierr, "failed to get mesh info" );
00318         printf( "Phys Atm Component Mesh: %d vertices and %d elements\n", nverts[0], nelem[0] );
00319     }
00320 
00321     MPI_Barrier( MPI_COMM_WORLD );
00322 
00323 #ifdef ENABLE_ATMLND_COUPLING
00324     // land
00325     if( lndComm != MPI_COMM_NULL )
00326     {
00327         ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID );
00328         CHECKIERR( ierr, "Cannot register LND App " )
00329     }
00330     ierr = setup_component_coupler_meshes( cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm, &lndPEGroup, &couComm,
00331                                            &couPEGroup, &lndCouComm, lndFilename, readoptsPhysAtm, nghlay,
00332                                            repartitioner_scheme );
00333     if( couComm != MPI_COMM_NULL )
00334     {
00335         char outputFileLnd[] = "recvLnd2.h5m";
00336 
00337         ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions );
00338         CHECKIERR( ierr, "cannot write lnd mesh after receiving" )
00339     }
00340 
00341 #endif  //
00342 
00343     MPI_Barrier( MPI_COMM_WORLD );
00344 
00345 #ifdef ENABLE_ATMOCN_COUPLING
00346     if( couComm != MPI_COMM_NULL )
00347     {
00348         // now compute intersection between OCNx and ATMx on coupler PEs
00349         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00350         CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
00351     }
00352 #endif
00353 
00354     const char* weights_identifiers[2] = { "scalar", "scalar-pc" };
00355     int disc_orders[3]                 = { 4, 1, 1 };
00356     const char* disc_methods[3]        = { "cgll", "fv", "pcloud" };
00357     const char* dof_tag_names[3]       = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
00358 #ifdef ENABLE_ATMOCN_COUPLING
00359     if( couComm != MPI_COMM_NULL )
00360     {
00361 
00362         ierr = iMOAB_ComputeMeshIntersectionOnSphere(
00363             cplAtmPID, cplOcnPID,
00364             cplAtmOcnPID );  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
00365         // basically, atm was redistributed according to target (ocean) partition, to "cover" the
00366         // ocean partitions check if intx valid, write some h5m intx file
00367         CHECKIERR( ierr, "cannot compute intersection" )
00368     }
00369 
00370     if( atmCouComm != MPI_COMM_NULL )
00371     {
00372         // the new graph will be for sending data from atm comp to coverage mesh;
00373         // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
00374         // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
00375         // after this, the sending of tags from atm pes to coupler pes will use the new par comm
00376         // graph, that has more precise info about what to send for ocean cover ; every time, we
00377         // will
00378         //  use the element global id, which should uniquely identify the element
00379 
00380         ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
00381                                     &cplocn );  // it happens over joint communicator
00382         CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
00383     }
00384 
00385     // need to compute graph between phys atm and atm/ocn intx coverage
00386     if( atmCouComm != MPI_COMM_NULL )
00387     {
00388         int typeA = 2;  // point cloud
00389         int typeB = 1;  // quads in coverage set
00390         ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplAtmOcnPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
00391                                        &cmpatm, &atmocnid );
00392     }
00393 #endif
00394 
00395 #ifdef ENABLE_ATMLND_COUPLING
00396     // we will compute comm graph between atm phys and land, directly; we do not need any
00397     // intersection later, we will send data from atm phys to land on coupler; then back to land
00398     // comp;
00399     if( atmCouComm != MPI_COMM_NULL )
00400     {
00401         int typeA = 2;  // point cloud
00402         int typeB = 2;  // point cloud for land on coupler, too
00403         ierr = iMOAB_ComputeCommGraph( cmpPhAtmPID, cplLndPID, &atmCouComm, &atmPEGroup, &couPEGroup, &typeA, &typeB,
00404                                        &cmpatm, &cpllnd );
00405     }
00406 #endif
00407     MPI_Barrier( MPI_COMM_WORLD );
00408 
00409     int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
00410 
00411 #ifdef ENABLE_ATMOCN_COUPLING
00412 #ifdef VERBOSE
00413     if( couComm != MPI_COMM_NULL )
00414     {
00415         char serialWriteOptions[] = "";  // for writing in serial
00416         std::stringstream outf;
00417         outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
00418         std::string intxfile = outf.str();  // write in serial the intx file, for debugging
00419         ierr                 = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
00420         CHECKIERR( ierr, "cannot write intx file result" )
00421     }
00422 #endif
00423 
00424     if( couComm != MPI_COMM_NULL )
00425     {
00426         PUSH_TIMER( "Compute the projection weights with TempestRemap" )
00427         ierr = iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], disc_methods[0],
00428                                                      &disc_orders[0], disc_methods[1], &disc_orders[1], &fNoBubble,
00429                                                      &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap, &fNoConserve,
00430                                                      &fValidate, dof_tag_names[0], dof_tag_names[1] );
00431         CHECKIERR( ierr, "cannot compute scalar projection weights" )
00432         POP_TIMER( couComm, rankInCouComm )
00433     }
00434 
00435 #endif
00436 
00437     MPI_Barrier( MPI_COMM_WORLD );
00438 
00439     int tagIndex[2];
00440     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00441     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
00442 
00443     const char* bottomFields =
00444         "Sa_z:Sa_topo:Sa_u:Sa_v:Sa_tbot:Sa_ptem:Sa_shum:Sa_pbot:Sa_dens:Sa_uovern:Sa_pslv:Sa_co2prog:Sa_co2diag:Faxa_"
00445         "rainc:Faxa_rainl:Faxa_snowc:Faxa_snowl:Faxa_lwdn:Faxa_swndr:Faxa_swvdr:Faxa_swndf:Faxa_swvdf:Faxa_swnet:Faxa_"
00446         "bcphidry:Faxa_bcphodry:Faxa_bcphiwet:Faxa_ocphidry:Faxa_ocphodry:Faxa_ocphiwet:Faxa_dstwet1:Faxa_dstwet2:Faxa_"
00447         "dstwet3:Faxa_dstwet4:Faxa_dstdry1:Faxa_dstdry2:Faxa_dstdry3:Faxa_dstdry4";
00448     const char* bottomFieldsExt =
00449         "Sa_z_ext:Sa_topo_ext:Sa_u_ext:Sa_v_ext:Sa_tbot_ext:Sa_ptem_ext:Sa_shum_ext:Sa_pbot_ext:Sa_dens_ext:Sa_uovern_"
00450         "ext:Sa_pslv_ext:Sa_co2prog_ext:Sa_co2diag_ext:Faxa_rainc_ext:Faxa_rainl_ext:Faxa_snowc_ext:Faxa_snowl_ext:"
00451         "Faxa_lwdn_ext:Faxa_swndr_ext:Faxa_swvdr_ext:Faxa_swndf_ext:Faxa_swvdf_ext:Faxa_swnet_ext:Faxa_bcphidry_ext:"
00452         "Faxa_bcphodry_ext:Faxa_bcphiwet_ext:Faxa_ocphidry_ext:Faxa_ocphodry_ext:Faxa_ocphiwet_ext:Faxa_dstwet1_ext:"
00453         "Faxa_dstwet2_ext:Faxa_dstwet3_ext:Faxa_dstwet4_ext:Faxa_dstdry1_ext:Faxa_dstdry2_ext:Faxa_dstdry3_ext:Faxa_"
00454         "dstdry4_ext";
00455 
00456     if( couComm != MPI_COMM_NULL )
00457     {
00458         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFieldsExt, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00459         CHECKIERR( ierr, "failed to define the field tags T16_ph:u16_ph:v16_ph " );
00460 #ifdef ENABLE_ATMOCN_COUPLING
00461 
00462         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00463         CHECKIERR( ierr, "failed to define the field tags T_proj:u_proj:v_proj" );
00464 #endif
00465 
00466 #ifdef ENABLE_ATMLND_COUPLING
00467         // need to define tag storage for land; will use the same T_proj, u_proj, v_proj name,
00468         // because it will be used to send between point clouds ! use the same ndof and same size as
00469         // ocnCompNDoFs (1) !!
00470         ierr = iMOAB_DefineTagStorage( cplLndPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00471         CHECKIERR( ierr, "failed to define the bottomFields tag on coupler land" );
00472 #endif
00473     }
00474     // need to make sure that the coverage mesh (created during intx method) received the tag that
00475     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00476     // need to change the migrate method to accommodate any GLL tag
00477     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00478     // (cplAtmPID), using the new coverage graph communicator
00479 
00480     // make the tag 0, to check we are actually sending needed data
00481     {
00482         if( cplAtmAppID >= 0 )
00483         {
00484             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00485             /*
00486              * Each process in the communicator will have access to a local mesh instance, which
00487              * will contain the original cells in the local partition and ghost entities. Number of
00488              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00489              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00490              * numbers.
00491              */
00492             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00493             CHECKIERR( ierr, "failed to get num primary elems" );
00494             int numAllElem = nelem[2];
00495             std::vector< double > vals;
00496             int storLeng = atmCompNDoFs * numAllElem * 37;
00497             vals.resize( storLeng );
00498             for( int k = 0; k < storLeng; k++ )
00499                 vals[k] = 0.;
00500             int eetype = 1;
00501             ierr       = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFieldsExt, &storLeng, &eetype, &vals[0] );
00502             CHECKIERR( ierr, "cannot make tags nul" )
00503             // set the tag to 0
00504         }
00505     }
00506 
00507 #ifdef ENABLE_ATMOCN_COUPLING
00508     PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00509     if( atmComm != MPI_COMM_NULL )
00510     {
00511         // as always, use nonblocking sends
00512         // this is for projection to ocean:
00513         ierr = iMOAB_SendElementTag( cmpPhAtmPID, bottomFields, &atmCouComm, &atmocnid );
00514         CHECKIERR( ierr, "cannot send tag values" )
00515 #ifdef VERBOSE
00516         int is_sender = 1;
00517         int context   = atmocnid;  // used to identity the parcommgraph in charge
00518         iMOAB_DumpCommGraph( cmpPhAtmPID, &context, &is_sender, "PhysAtmA2OS", );
00519 #endif
00520     }
00521     if( couComm != MPI_COMM_NULL )
00522     {
00523         // receive on atm on coupler pes, that was redistributed according to coverage
00524         ierr = iMOAB_ReceiveElementTag( cplAtmOcnPID, bottomFieldsExt, &atmCouComm, &cmpatm );
00525         CHECKIERR( ierr, "cannot receive tag values" )
00526 #ifdef VERBOSE
00527         int is_sender = 0;
00528         int context   = cmpatm;  // used to identity the parcommgraph in charge
00529         iMOAB_DumpCommGraph( cplAtmOcnPID, &context, &is_sender, "PhysAtmA2OR", );
00530 #endif
00531     }
00532     POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00533 
00534     // we can now free the sender buffers
00535     if( atmComm != MPI_COMM_NULL )
00536     {
00537         ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &atmocnid );  // context is for ocean
00538         CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00539     }
00540     /*
00541     #ifdef VERBOSE
00542       if (couComm != MPI_COMM_NULL) {
00543         char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
00544         ierr = iMOAB_WriteMesh(cplAtmPID, outputFileRecvd, fileWriteOptions,
00545             strlen(outputFileRecvd), strlen(fileWriteOptions) );
00546       }
00547     #endif
00548     */
00549 
00550     if( couComm != MPI_COMM_NULL )
00551     {
00552         /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00553            on the source mesh and get the projection on the target mesh */
00554         PUSH_TIMER( "Apply Scalar projection weights" )
00555         ierr =
00556             iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0], bottomFieldsExt, bottomFields );
00557         CHECKIERR( ierr, "failed to compute projection weight application" );
00558         POP_TIMER( couComm, rankInCouComm )
00559 
00560         char outputFileTgt[] = "fOcnOnCpl7.h5m";
00561         ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00562     }
00563     // send the projected tag back to ocean pes, with send/receive tag
00564     if( ocnComm != MPI_COMM_NULL )
00565     {
00566         int tagIndexIn2;
00567         ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
00568         CHECKIERR( ierr, "failed to define the field tags for receiving back the atm tags on ocn pes" );
00569     }
00570     // send the tag to ocean pes, from ocean mesh on coupler pes
00571     //   from couComm, using common joint comm ocn_coupler
00572     // as always, use nonblocking sends
00573     // original graph (context is -1_
00574     if( couComm != MPI_COMM_NULL )
00575     {
00576         context_id = cmpocn;
00577         ierr       = iMOAB_SendElementTag( cplOcnPID, bottomFields, &ocnCouComm, &context_id );
00578         CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00579     }
00580 
00581     // receive on component 2, ocean
00582     if( ocnComm != MPI_COMM_NULL )
00583     {
00584         context_id = cplocn;
00585         ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, bottomFields, &ocnCouComm, &context_id );
00586         CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00587     }
00588     MPI_Barrier( MPI_COMM_WORLD );
00589 
00590     if( couComm != MPI_COMM_NULL )
00591     {
00592         context_id = cmpocn;
00593         ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00594     }
00595     if( ocnComm != MPI_COMM_NULL )
00596     {
00597         char outputFileOcn[] = "OcnWithProj2.h5m";
00598         ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00599     }
00600 
00601 #endif
00602 
00603 #ifdef ENABLE_ATMLND_COUPLING
00604     // start land proj:
00605 
00606     // we used this to compute
00607     //  ierr = iMOAB_ComputeCommGraph(cmpPhAtmPID, cplLndPID, &atmCouComm, &atmPEGroup, &couPEGroup,
00608     //     &typeA, &typeB, &cmpatm, &cpllnd);
00609 
00610     // end copy
00611     PUSH_TIMER( "Send/receive data from phys comp atm to coupler land, using computed graph" )
00612     if( atmComm != MPI_COMM_NULL )
00613     {
00614 
00615         // as always, use nonblocking sends
00616         // this is for projection to land:
00617         ierr = iMOAB_SendElementTag( cmpPhAtmPID, bottomFields, &atmCouComm, &cpllnd );
00618         CHECKIERR( ierr, "cannot send tag values towards cpl on land" )
00619     }
00620     if( couComm != MPI_COMM_NULL )
00621     {
00622         // receive on lnd on coupler pes
00623         ierr = iMOAB_ReceiveElementTag( cplLndPID, bottomFields, &atmCouComm, &cmpatm );
00624         CHECKIERR( ierr, "cannot receive tag values on land on coupler" )
00625     }
00626     POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00627 
00628     // we can now free the sender buffers
00629     if( atmComm != MPI_COMM_NULL )
00630     {
00631         ierr = iMOAB_FreeSenderBuffers( cmpPhAtmPID, &cpllnd );
00632         CHECKIERR( ierr, "cannot free buffers used to send atm tag towards the land on coupler" )
00633     }
00634 
00635 #ifdef VERBOSE
00636     if( couComm != MPI_COMM_NULL )
00637     {
00638         char outputFileTgtLnd[] = "fLndOnCpl.h5m";
00639         ierr                    = iMOAB_WriteMesh( cplLndPID, outputFileTgtLnd, fileWriteOptions );
00640     }
00641 #endif
00642 
00643     // end land proj
00644     // send the tags back to land pes, from land mesh on coupler pes
00645     // send from cplLndPID to cmpLndPID, using common joint comm
00646     // as always, use nonblocking sends
00647     // original graph
00648     // int context_id = -1;
00649     // the land might not have these tags yet; it should be a different name for land
00650     // in e3sm we do have different names
00651     if( lndComm != MPI_COMM_NULL )
00652     {
00653         int tagIndexIn2;
00654         ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
00655         CHECKIERR( ierr, "failed to define the field tag for receiving back the tag T_proj:u_proj:v_proj on lnd pes" );
00656     }
00657     if( couComm != MPI_COMM_NULL )
00658     {
00659         context_id = cmplnd;
00660         ierr       = iMOAB_SendElementTag( cplLndPID, bottomFields, &lndCouComm, &context_id );
00661         CHECKIERR( ierr, "cannot send tag values back to land pes" )
00662     }
00663     // receive on component 3, land
00664     if( lndComm != MPI_COMM_NULL )
00665     {
00666         context_id = cpllnd;  // receive from coupler on land
00667         ierr       = iMOAB_ReceiveElementTag( cmpLndPID, bottomFields, &lndCouComm, &context_id );
00668         CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" )
00669     }
00670 
00671     MPI_Barrier( MPI_COMM_WORLD );
00672     if( couComm != MPI_COMM_NULL )
00673     {
00674         context_id = cmplnd;
00675         ierr       = iMOAB_FreeSenderBuffers( cplLndPID, &context_id );
00676     }
00677     if( lndComm != MPI_COMM_NULL )
00678     {
00679         char outputFileLnd[] = "LndWithProj2.h5m";
00680         ierr                 = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions );
00681     }
00682 
00683 #endif  // ENABLE_ATMLND_COUPLING
00684 
00685 #ifdef ENABLE_ATMOCN_COUPLING
00686     if( couComm != MPI_COMM_NULL )
00687     {
00688         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00689         CHECKIERR( ierr, "cannot deregister app intx AO" )
00690     }
00691 #endif
00692 
00693 #ifdef ENABLE_ATMLND_COUPLING
00694     if( lndComm != MPI_COMM_NULL )
00695     {
00696         ierr = iMOAB_DeregisterApplication( cmpLndPID );
00697         CHECKIERR( ierr, "cannot deregister app LND1" )
00698     }
00699 #endif  // ENABLE_ATMLND_COUPLING
00700     if( atmComm != MPI_COMM_NULL )
00701     {
00702         ierr = iMOAB_DeregisterApplication( cmpPhAtmPID );
00703         CHECKIERR( ierr, "cannot deregister app PhysAtm " )
00704     }
00705 #ifdef ENABLE_ATMOCN_COUPLING
00706     if( ocnComm != MPI_COMM_NULL )
00707     {
00708         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00709         CHECKIERR( ierr, "cannot deregister app OCN1" )
00710     }
00711 #endif  // ENABLE_ATMOCN_COUPLING
00712 
00713     if( atmComm != MPI_COMM_NULL )
00714     {
00715         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00716         CHECKIERR( ierr, "cannot deregister app ATM1" )
00717     }
00718 
00719 #ifdef ENABLE_ATMLND_COUPLING
00720     if( couComm != MPI_COMM_NULL )
00721     {
00722         ierr = iMOAB_DeregisterApplication( cplLndPID );
00723         CHECKIERR( ierr, "cannot deregister app LNDX" )
00724     }
00725 #endif  // ENABLE_ATMLND_COUPLING
00726 
00727 #ifdef ENABLE_ATMOCN_COUPLING
00728     if( couComm != MPI_COMM_NULL )
00729     {
00730         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00731         CHECKIERR( ierr, "cannot deregister app OCNX" )
00732     }
00733 #endif  // ENABLE_ATMOCN_COUPLING
00734 
00735     if( couComm != MPI_COMM_NULL )
00736     {
00737         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00738         CHECKIERR( ierr, "cannot deregister app ATMX" )
00739     }
00740 
00741     //#endif
00742     ierr = iMOAB_Finalize();
00743     CHECKIERR( ierr, "did not finalize iMOAB" )
00744 
00745     // free atm coupler group and comm
00746     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00747     MPI_Group_free( &joinAtmCouGroup );
00748     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00749 
00750 #ifdef ENABLE_ATMOCN_COUPLING
00751     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00752     // free ocn - coupler group and comm
00753     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00754     MPI_Group_free( &joinOcnCouGroup );
00755 #endif
00756 
00757 #ifdef ENABLE_ATMLND_COUPLING
00758     if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm );
00759     // free land - coupler group and comm
00760     if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm );
00761     MPI_Group_free( &joinLndCouGroup );
00762 #endif
00763 
00764     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00765 
00766     MPI_Group_free( &atmPEGroup );
00767 #ifdef ENABLE_ATMOCN_COUPLING
00768     MPI_Group_free( &ocnPEGroup );
00769 #endif
00770 #ifdef ENABLE_ATMLND_COUPLING
00771     MPI_Group_free( &lndPEGroup );
00772 #endif
00773     MPI_Group_free( &couPEGroup );
00774     MPI_Group_free( &jgroup );
00775 
00776     MPI_Finalize();
00777     // endif #if 0
00778 
00779     return 0;
00780 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines