MOAB: Mesh Oriented datABase  (version 5.2.1)
imoab_coupler.cpp
Go to the documentation of this file.
00001 /*
00002  * This imoab_coupler test will simulate coupling between 3 components
00003  * 3 meshes will be loaded from 3 files (atm, ocean, lnd), and they will be migrated to
00004  * all processors (coupler pes); then, intx will be performed between migrated meshes
00005  * and weights will be generated, such that a field from one component will be transferred to
00006  * the other component
00007  * currently, the atm will send some data to be projected to ocean and land components
00008  *
00009  * first, intersect atm and ocn, and recompute comm graph 1 between atm and atm_cx, for ocn intx
00010  * second, intersect atm and lnd, and recompute comm graph 2 between atm and atm_cx for lnd intx
00011 
00012  */
00013 
00014 #include "moab/Core.hpp"
00015 #ifndef MOAB_HAVE_MPI
00016 #error mbtempest tool requires MPI configuration
00017 #endif
00018 
00019 // MPI includes
00020 #include "moab_mpi.h"
00021 #include "moab/ParallelComm.hpp"
00022 #include "MBParallelConventions.h"
00023 
00024 #include "moab/iMOAB.h"
00025 #include "TestUtil.hpp"
00026 #include "moab/CpuTimer.hpp"
00027 #include "moab/ProgOptions.hpp"
00028 #include <iostream>
00029 #include <sstream>
00030 
00031 #include "imoab_coupler_utils.hpp"
00032 
00033 using namespace moab;
00034 
00035 //#define GRAPH_INFO
00036 
00037 #ifndef MOAB_HAVE_TEMPESTREMAP
00038 #error The climate coupler test example requires MOAB configuration with TempestRemap
00039 #endif
00040 
00041 #define ENABLE_ATMOCN_COUPLING
00042 #define ENABLE_ATMLND_COUPLING
00043 
00044 #if( !defined( ENABLE_ATMOCN_COUPLING ) && !defined( ENABLE_ATMLND_COUPLING ) )
00045 #error Enable either OCN (ENABLE_ATMOCN_COUPLING) and/or LND (ENABLE_ATMLND_COUPLING) for coupling
00046 #endif
00047 
00048 int main( int argc, char* argv[] )
00049 {
00050     int ierr;
00051     int rankInGlobalComm, numProcesses;
00052     MPI_Group jgroup;
00053     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00054     std::string readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
00055 
00056     // Timer data
00057     moab::CpuTimer timer;
00058     double timer_ops;
00059     std::string opName;
00060 
00061     int repartitioner_scheme = 0;
00062 #ifdef MOAB_HAVE_ZOLTAN
00063     repartitioner_scheme = 2;  // use the graph partitioner in that caseS
00064 #endif
00065 
00066     MPI_Init( &argc, &argv );
00067     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00068     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00069 
00070     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00071 
00072     std::string atmFilename = TestDir + "/wholeATM_T.h5m";
00073     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00074     // intx atm/ocn is not in e3sm yet, give a number
00075     //   6 * 100+ 18 = 618 : atmocnid
00076     // 9 LND, 10 CPLLND
00077     //   6 * 100 + 10 = 610  atmlndid:
00078     // cmpatm is for atm on atm pes
00079     // cmpocn is for ocean, on ocean pe
00080     // cplatm is for atm on coupler pes
00081     // cplocn is for ocean on coupelr pes
00082     // atmocnid is for intx atm / ocn on coupler pes
00083     //
00084     int rankInAtmComm = -1;
00085     int cmpatm        = 5,
00086         cplatm        = 6;  // component ids are unique over all pes, and established in advance;
00087 #ifdef ENABLE_ATMOCN_COUPLING
00088     std::string ocnFilename = TestDir + "/recMeshOcn.h5m";
00089     int rankInOcnComm       = -1;
00090     int cmpocn = 17, cplocn = 18,
00091         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00092 #endif
00093 
00094 #ifdef ENABLE_ATMLND_COUPLING
00095     std::string lndFilename = TestDir + "/wholeLnd.h5m";
00096     int cpllnd = 10, cmplnd = 9,
00097         atmlndid = 610;  // component ids are unique over all pes, and established in advance;
00098 #endif
00099 
00100     int rankInCouComm = -1;
00101 
00102     int nghlay = 0;  // number of ghost layers for loading the file
00103     std::vector< int > groupTasks;
00104     int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1, startG3 = startG1,
00105         endG3   = endG1;                   // Support launch of imoab_coupler test on any combo of 2*x processes
00106     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00107     int context_id = -1;                   // used now for freeing buffers
00108 
00109     // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
00110     // later, we need to compute weight matrix with tempestremap
00111 
00112     ProgOptions opts;
00113     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00114 #ifdef ENABLE_ATMOCN_COUPLING
00115     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00116 #endif
00117 #ifdef ENABLE_ATMLND_COUPLING
00118     opts.addOpt< std::string >( "land,l", "land mesh filename (target)", &lndFilename );
00119 #endif
00120     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00121     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00122 #ifdef ENABLE_ATMOCN_COUPLING
00123     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00124     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00125 #endif
00126 #ifdef ENABLE_ATMLND_COUPLING
00127     opts.addOpt< int >( "startLnd,e", "start task for land layout", &startG3 );
00128     opts.addOpt< int >( "endLnd,f", "end task for land layout", &endG3 );
00129 #endif
00130 
00131     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00132     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00133 
00134     opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
00135 
00136     int n = 1;  // number of send/receive / project / send back cycles
00137     opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
00138 
00139     opts.parseCommandLine( argc, argv );
00140 
00141     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00142 
00143     if( !rankInGlobalComm )
00144     {
00145         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1 <<
00146 #ifdef ENABLE_ATMOCN_COUPLING
00147             "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2 <<
00148 #endif
00149 #ifdef ENABLE_ATMLND_COUPLING
00150             "\n lnd file: " << lndFilename << "\n     on tasks : " << startG3 << ":" << endG3 <<
00151 #endif
00152             "\n  partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n  ";
00153     }
00154 
00155     // load files on 3 different communicators, groups
00156     // first groups has task 0, second group tasks 0 and 1
00157     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00158     // first groups has task 0, second group tasks 0 and 1
00159     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00160     MPI_Group atmPEGroup;
00161     MPI_Comm atmComm;
00162     ierr = create_group_and_comm(startG1, endG1, jgroup, &atmPEGroup, &atmComm);
00163     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00164 
00165 #ifdef ENABLE_ATMOCN_COUPLING
00166     MPI_Group ocnPEGroup;
00167     MPI_Comm ocnComm;
00168     ierr = create_group_and_comm(startG2, endG2, jgroup, &ocnPEGroup, &ocnComm);
00169     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00170 #endif
00171 
00172 #ifdef ENABLE_ATMLND_COUPLING
00173     MPI_Group lndPEGroup;
00174     MPI_Comm lndComm;
00175     ierr = create_group_and_comm(startG3, endG3, jgroup, &lndPEGroup, &lndComm);
00176     CHECKIERR( ierr, "Cannot create lnd MPI group and communicator " )
00177 #endif
00178 
00179     // we will always have a coupler
00180     MPI_Group couPEGroup;
00181     MPI_Comm couComm;
00182     ierr = create_group_and_comm(startG4, endG4, jgroup, &couPEGroup, &couComm);
00183     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00184 
00185     // atm_coupler
00186     MPI_Group joinAtmCouGroup;
00187     MPI_Comm atmCouComm;
00188     ierr = create_joint_comm_group(atmPEGroup, couPEGroup,  &joinAtmCouGroup, &atmCouComm);
00189     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00190 
00191 #ifdef ENABLE_ATMOCN_COUPLING
00192     // ocn_coupler
00193     MPI_Group joinOcnCouGroup;
00194     MPI_Comm ocnCouComm;
00195     ierr = create_joint_comm_group(ocnPEGroup, couPEGroup,  &joinOcnCouGroup, &ocnCouComm);
00196     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00197 #endif
00198 
00199 #ifdef ENABLE_ATMLND_COUPLING
00200     // lnd_coupler
00201     MPI_Group joinLndCouGroup;
00202     MPI_Comm lndCouComm;
00203     ierr = create_joint_comm_group(lndPEGroup, couPEGroup,  &joinLndCouGroup, &lndCouComm);
00204     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00205 #endif
00206 
00207     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00208     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00209 
00210     int cmpAtmAppID       = -1;
00211     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00212     int cplAtmAppID       = -1;            // -1 means it is not initialized
00213     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00214 #ifdef ENABLE_ATMOCN_COUPLING
00215     int cmpOcnAppID       = -1;
00216     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00217     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00218     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00219     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00220 #endif
00221 
00222 #ifdef ENABLE_ATMLND_COUPLING
00223     int cmpLndAppID       = -1;
00224     iMOAB_AppID cmpLndPID = &cmpLndAppID;        // lnd
00225     int cplLndAppID = -1, cplAtmLndAppID = -1;   // -1 means it is not initialized
00226     iMOAB_AppID cplLndPID    = &cplLndAppID;     // land on coupler PEs
00227     iMOAB_AppID cplAtmLndPID = &cplAtmLndAppID;  // intx atm - lnd on coupler PEs
00228 #endif
00229 
00230     if( couComm != MPI_COMM_NULL )
00231     {
00232         MPI_Comm_rank( couComm, &rankInCouComm );
00233         // Register all the applications on the coupler PEs
00234         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00235                                           cplAtmPID );  // atm on coupler pes
00236         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00237 #ifdef ENABLE_ATMOCN_COUPLING
00238         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00239                                           cplOcnPID );  // ocn on coupler pes
00240         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00241 #endif
00242 #ifdef ENABLE_ATMLND_COUPLING
00243         ierr = iMOAB_RegisterApplication( "LNDX", &couComm, &cpllnd,
00244                                           cplLndPID );  // lnd on coupler pes
00245         CHECKIERR( ierr, "Cannot register LND over coupler PEs" )
00246 #endif
00247     }
00248 
00249     if( atmComm != MPI_COMM_NULL )
00250     {
00251         MPI_Comm_rank( atmComm, &rankInAtmComm );
00252         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00253         CHECKIERR( ierr, "Cannot register ATM App" )
00254     }
00255 
00256 #ifdef ENABLE_ATMOCN_COUPLING
00257     if( ocnComm != MPI_COMM_NULL )
00258     {
00259         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00260         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00261         CHECKIERR( ierr, "Cannot register OCN App" )
00262     }
00263 #endif
00264 
00265     //atm
00266     ierr = setup_component_coupler_meshes(cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
00267              &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme);
00268     CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
00269 #ifdef GRAPH_INFO
00270     if( atmComm != MPI_COMM_NULL )
00271     {
00272 
00273         int is_sender = 1;
00274         int context   = -1;
00275         iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmMigS", strlen( "AtmMigS" ) );
00276     }
00277     if( couComm != MPI_COMM_NULL )
00278     {
00279         int is_sender = 0;
00280         int context   = -1;
00281         iMOAB_DumpCommGraph( cplAtmPID, &context, &is_sender, "AtmMigR", strlen( "AtmMigR" ) );
00282     }
00283 #endif
00284     MPI_Barrier( MPI_COMM_WORLD );
00285 
00286 #ifdef ENABLE_ATMOCN_COUPLING
00287     // ocean
00288     ierr = setup_component_coupler_meshes(cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm,  &ocnPEGroup, &couComm,
00289              &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme);
00290     CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
00291 
00292     MPI_Barrier( MPI_COMM_WORLD );
00293 
00294 #ifdef VERBOSE
00295     if( couComm != MPI_COMM_NULL && 1 == n )
00296     {  // write only for n==1 case
00297         char outputFileTgt3[] = "recvOcn.h5m";
00298         ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions, strlen( outputFileTgt3 ),
00299                                 strlen( fileWriteOptions ) );
00300         CHECKIERR( ierr, "cannot write ocn mesh after receiving" )
00301     }
00302 #endif
00303 #endif  // #ifdef ENABLE_ATMOCN_COUPLING
00304 
00305 #ifdef ENABLE_ATMLND_COUPLING
00306     // land
00307     if( lndComm != MPI_COMM_NULL )
00308     {
00309         ierr = iMOAB_RegisterApplication( "LND1", &lndComm, &cmplnd, cmpLndPID );
00310         CHECKIERR( ierr, "Cannot register LND App " )
00311     }
00312     ierr = setup_component_coupler_meshes(cmpLndPID, cmplnd, cplLndPID, cpllnd, &lndComm,  &lndPEGroup, &couComm,
00313              &couPEGroup, &lndCouComm, lndFilename, readoptsLnd, nghlay, repartitioner_scheme);
00314 
00315     if( couComm != MPI_COMM_NULL && 1 == n )
00316     {  // write only for n==1 case
00317         char outputFileLnd[] = "recvLnd.h5m";
00318         ierr = iMOAB_WriteMesh( cplLndPID, outputFileLnd, fileWriteOptions, strlen( outputFileLnd ),
00319                                 strlen( fileWriteOptions ) );
00320         CHECKIERR( ierr, "cannot write lnd mesh after receiving" )
00321     }
00322 
00323 #endif  // #ifdef ENABLE_ATMLND_COUPLING
00324 
00325     MPI_Barrier( MPI_COMM_WORLD );
00326 
00327 #ifdef ENABLE_ATMOCN_COUPLING
00328     if( couComm != MPI_COMM_NULL )
00329     {
00330         // now compute intersection between OCNx and ATMx on coupler PEs
00331         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00332         CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
00333     }
00334 #endif
00335 #ifdef ENABLE_ATMLND_COUPLING
00336     if( couComm != MPI_COMM_NULL )
00337     {
00338         // now compute intersection between LNDx and ATMx on coupler PEs
00339         ierr = iMOAB_RegisterApplication( "ATMLND", &couComm, &atmlndid, cplAtmLndPID );
00340         CHECKIERR( ierr, "Cannot register ocn_atm intx over coupler pes " )
00341     }
00342 #endif
00343 
00344     int disc_orders[3]                       = { 4, 1, 1 };
00345     const std::string weights_identifiers[2] = { "scalar", "scalar-pc" };
00346     const std::string disc_methods[3]        = { "cgll", "fv", "pcloud" };
00347     const std::string dof_tag_names[3]       = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
00348 #ifdef ENABLE_ATMOCN_COUPLING
00349     if( couComm != MPI_COMM_NULL )
00350     {
00351         PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
00352         ierr = iMOAB_ComputeMeshIntersectionOnSphere(
00353             cplAtmPID, cplOcnPID,
00354             cplAtmOcnPID );  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
00355         // basically, atm was redistributed according to target (ocean) partition, to "cover" the
00356         // ocean partitions check if intx valid, write some h5m intx file
00357         CHECKIERR( ierr, "cannot compute intersection" )
00358         POP_TIMER( couComm, rankInCouComm )
00359     }
00360 
00361     if( atmCouComm != MPI_COMM_NULL )
00362     {
00363 
00364         // the new graph will be for sending data from atm comp to coverage mesh;
00365         // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
00366         // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
00367         // after this, the sending of tags from atm pes to coupler pes will use the new par comm
00368         // graph, that has more precise info about what to send for ocean cover ; every time, we
00369         // will
00370         //  use the element global id, which should uniquely identify the element
00371         PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" )
00372         ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID,
00373                                     &cplocn );  // it happens over joint communicator
00374         CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
00375         POP_TIMER( atmCouComm, rankInAtmComm )  // hijack this rank
00376     }
00377 #endif
00378 
00379 #ifdef ENABLE_ATMLND_COUPLING
00380     if( couComm != MPI_COMM_NULL )
00381     {
00382         PUSH_TIMER( "Compute ATM-LND mesh intersection" )
00383         ierr = iMOAB_ComputePointDoFIntersection( cplAtmPID, cplLndPID, cplAtmLndPID );
00384         CHECKIERR( ierr, "failed to compute point-cloud mapping" );
00385         POP_TIMER( couComm, rankInCouComm )
00386     }
00387     if( atmCouComm != MPI_COMM_NULL )
00388     {
00389         // the new graph will be for sending data from atm comp to coverage mesh for land mesh;
00390         // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
00391         // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh
00392         // after this, the sending of tags from atm pes to coupler pes will use the new par comm
00393         // graph, that has more precise info about what to send (specifically for land cover); every
00394         // time,
00395         /// we will use the element global id, which should uniquely identify the element
00396         PUSH_TIMER( "Compute LND coverage graph for ATM mesh" )
00397         ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmLndPID,
00398                                     &cpllnd );  // it happens over joint communicator
00399         CHECKIERR( ierr, "cannot recompute direct coverage graph for land" )
00400         POP_TIMER( atmCouComm, rankInAtmComm )  // hijack this rank
00401     }
00402 #endif
00403 
00404     MPI_Barrier( MPI_COMM_WORLD );
00405 
00406     int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 1, fNoConserve = 0;
00407 
00408 #ifdef ENABLE_ATMOCN_COUPLING
00409 #ifdef VERBOSE
00410     if( couComm != MPI_COMM_NULL && 1 == n )
00411     {                                    // write only for n==1 case
00412         char serialWriteOptions[] = "";  // for writing in serial
00413         std::stringstream outf;
00414         outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
00415         std::string intxfile = outf.str();  // write in serial the intx file, for debugging
00416         ierr = iMOAB_WriteMesh( cplAtmOcnPID, (char*)intxfile.c_str(), serialWriteOptions, (int)intxfile.length(),
00417                                 strlen( serialWriteOptions ) );
00418         CHECKIERR( ierr, "cannot write intx file result" )
00419     }
00420 #endif
00421 
00422     if( couComm != MPI_COMM_NULL )
00423     {
00424         PUSH_TIMER( "Compute the projection weights with TempestRemap" )
00425         ierr = iMOAB_ComputeScalarProjectionWeights(
00426             cplAtmOcnPID, weights_identifiers[0].c_str(), disc_methods[0].c_str(), &disc_orders[0], disc_methods[1].c_str(), &disc_orders[1],
00427             &fMonotoneTypeID, &fVolumetric, &fNoConserve, &fValidate, dof_tag_names[0].c_str(), dof_tag_names[1].c_str(),
00428             weights_identifiers[0].size(), disc_methods[0].size(), disc_methods[1].size(),
00429             dof_tag_names[0].size(), dof_tag_names[1].size() );
00430         CHECKIERR( ierr, "cannot compute scalar projection weights" )
00431         POP_TIMER( couComm, rankInCouComm )
00432 
00433         // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
00434 #ifdef MOAB_HAVE_NETCDF
00435         {
00436             const std::string atmocn_map_file_name = "atm_ocn_map.nc";
00437             ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(), atmocn_map_file_name.c_str(),
00438                                                     weights_identifiers[0].size(), atmocn_map_file_name.size() );
00439             CHECKIERR( ierr, "failed to write map file to disk" );
00440 
00441             const std::string intx_from_file_identifier = "map-from-file";
00442             ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str(),
00443                                                      NULL, NULL, NULL, intx_from_file_identifier.size(),
00444                                                      atmocn_map_file_name.size() );
00445             CHECKIERR( ierr, "failed to load map file from disk" );
00446         }
00447 #endif
00448     }
00449 
00450 #endif
00451 
00452     MPI_Barrier( MPI_COMM_WORLD );
00453 
00454 #ifdef ENABLE_ATMLND_COUPLING
00455     if( couComm != MPI_COMM_NULL )
00456     {
00457         /* Compute the weights to preoject the solution from ATM component to LND compoenent */
00458         PUSH_TIMER( "Compute ATM-LND remapping weights" )
00459         ierr = iMOAB_ComputeScalarProjectionWeights(
00460             cplAtmLndPID, weights_identifiers[1].c_str(), disc_methods[0].c_str(), &disc_orders[0],
00461             disc_methods[2].c_str(), &disc_orders[2], &fMonotoneTypeID, &fVolumetric, &fNoConserve, &fValidate,
00462             dof_tag_names[0].c_str(), dof_tag_names[2].c_str(), weights_identifiers[1].size(), disc_methods[0].size(),
00463             disc_methods[2].size(), dof_tag_names[0].size(), dof_tag_names[2].size() );
00464         CHECKIERR( ierr, "failed to compute remapping projection weights for ATM-LND scalar "
00465                          "non-conservative field" );
00466         POP_TIMER( couComm, rankInCouComm )
00467 
00468         // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
00469         // VSM: TODO: This does not work since the LND model is a point cloud and we do not initilize
00470         // data correctly in TempestOnlineMap::WriteParallelWeightsToFile routine.
00471         // {
00472         //     const char* atmlnd_map_file_name = "atm_lnd_map.nc";
00473         //     ierr = iMOAB_WriteMappingWeightsToFile( cplAtmLndPID, weights_identifiers[1], atmlnd_map_file_name,
00474         //                                             strlen( weights_identifiers[0] ), strlen( atmlnd_map_file_name ) );
00475         //     CHECKIERR( ierr, "failed to write map file to disk" );
00476 
00477         //     const char* intx_from_file_identifier = "map-from-file";
00478         //     ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmLndPID, intx_from_file_identifier, atmlnd_map_file_name,
00479         //                                              NULL, NULL, NULL, strlen( intx_from_file_identifier ),
00480         //                                              strlen( atmlnd_map_file_name ) );
00481         //     CHECKIERR( ierr, "failed to load map file from disk" );
00482         // }
00483     }
00484 #endif
00485 
00486     int tagIndex[2];
00487     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00488     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
00489 
00490     const char* bottomTempField          = "a2oTbot";
00491     const char* bottomTempProjectedField = "a2oTbot_proj";
00492     // Define more fields
00493     const char* bottomUVelField          = "a2oUbot";
00494     const char* bottomUVelProjectedField = "a2oUbot_proj";
00495     const char* bottomVVelField          = "a2oVbot";
00496     const char* bottomVVelProjectedField = "a2oVbot_proj";
00497 
00498     if( couComm != MPI_COMM_NULL )
00499     {
00500         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0],
00501                                        strlen( bottomTempField ) );
00502         CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
00503 #ifdef ENABLE_ATMOCN_COUPLING
00504 
00505         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1],
00506                                        strlen( bottomTempProjectedField ) );
00507         CHECKIERR( ierr, "failed to define the field tag a2oTbot_proj" );
00508 #endif
00509 
00510         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomUVelField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0],
00511                                        strlen( bottomUVelField ) );
00512         CHECKIERR( ierr, "failed to define the field tag a2oUbot" );
00513 #ifdef ENABLE_ATMOCN_COUPLING
00514 
00515         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomUVelProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1],
00516                                        strlen( bottomUVelProjectedField ) );
00517         CHECKIERR( ierr, "failed to define the field tag a2oUbot_proj" );
00518 #endif
00519 
00520         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomVVelField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0],
00521                                        strlen( bottomVVelField ) );
00522         CHECKIERR( ierr, "failed to define the field tag a2oUbot" );
00523 #ifdef ENABLE_ATMOCN_COUPLING
00524         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomVVelProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1],
00525                                        strlen( bottomVVelProjectedField ) );
00526         CHECKIERR( ierr, "failed to define the field tag a2oUbot_proj" );
00527 #endif
00528     }
00529 
00530     // need to make sure that the coverage mesh (created during intx method) received the tag that
00531     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00532     // need to change the migrate method to accommodate any GLL tag
00533     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00534     // (cplAtmPID), using the new coverage graph communicator
00535 
00536     // make the tag 0, to check we are actually sending needed data
00537     {
00538         if( cplAtmAppID >= 0 )
00539         {
00540             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00541             /*
00542              * Each process in the communicator will have access to a local mesh instance, which
00543              * will contain the original cells in the local partition and ghost entities. Number of
00544              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00545              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00546              * numbers.
00547              */
00548             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00549             CHECKIERR( ierr, "failed to get num primary elems" );
00550             int numAllElem = nelem[2];
00551             std::vector< double > vals;
00552             int storLeng = atmCompNDoFs * numAllElem;
00553             int eetype   = 1;
00554 
00555             vals.resize( storLeng );
00556             for( int k = 0; k < storLeng; k++ )
00557                 vals[k] = 0.;
00558 
00559             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0],
00560                                               strlen( bottomTempField ) );
00561             CHECKIERR( ierr, "cannot make tag nul" )
00562             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomUVelField, &storLeng, &eetype, &vals[0],
00563                                               strlen( bottomUVelField ) );
00564             CHECKIERR( ierr, "cannot make tag nul" )
00565             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomVVelField, &storLeng, &eetype, &vals[0],
00566                                               strlen( bottomVVelField ) );
00567             CHECKIERR( ierr, "cannot make tag nul" )
00568             // set the tag to 0
00569         }
00570     }
00571 
00572     const char* concat_fieldname  = "a2oTbot;a2oUbot;a2oVbot;";
00573     const char* concat_fieldnameT = "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;";
00574 
00575     // start a virtual loop for number of iterations
00576     for( int iters = 0; iters < n; iters++ )
00577     {
00578 #ifdef ENABLE_ATMOCN_COUPLING
00579         PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00580         if( atmComm != MPI_COMM_NULL )
00581         {
00582             // as always, use nonblocking sends
00583             // this is for projection to ocean:
00584             ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot;a2oUbot;a2oVbot;", &atmCouComm, &cplocn,
00585                                          strlen( "a2oTbot;a2oUbot;a2oVbot;" ) );
00586             CHECKIERR( ierr, "cannot send tag values" )
00587 #ifdef GRAPH_INFO
00588             int is_sender = 1;
00589             int context   = cplocn;
00590             iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnS", strlen( "AtmMigOcnS" ) );
00591 #endif
00592         }
00593         if( couComm != MPI_COMM_NULL )
00594         {
00595             // receive on atm on coupler pes, that was redistributed according to coverage
00596             ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot;a2oUbot;a2oVbot;", &atmCouComm, &cplocn,
00597                                             strlen( "a2oTbot;a2oUbot;a2oVbot;" ) );
00598             CHECKIERR( ierr, "cannot receive tag values" )
00599 #ifdef GRAPH_INFO
00600             int is_sender = 0;
00601             int context   = cplocn;  // the same context, cplocn
00602             iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnR", strlen( "AtmMigOcnR" ) );
00603 #endif
00604         }
00605         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00606 
00607         // we can now free the sender buffers
00608         if( atmComm != MPI_COMM_NULL )
00609         {
00610             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );  // context is for ocean
00611             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00612         }
00613 #ifdef VERBOSE
00614         if( couComm != MPI_COMM_NULL && 1 == n )
00615         {
00616             // write only for n==1 case
00617             char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
00618             ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions, strlen( outputFileRecvd ),
00619                                     strlen( fileWriteOptions ) );
00620             CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
00621         }
00622 #endif
00623 
00624         if( couComm != MPI_COMM_NULL )
00625         {
00626             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00627                on the source mesh and get the projection on the target mesh */
00628             PUSH_TIMER( "Apply Scalar projection weights" )
00629             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), concat_fieldname,
00630                                                        concat_fieldnameT, weights_identifiers[0].size(),
00631                                                        strlen( concat_fieldname ), strlen( concat_fieldnameT ) );
00632             CHECKIERR( ierr, "failed to compute projection weight application" );
00633             POP_TIMER( couComm, rankInCouComm )
00634             if( 1 == n )  // write only for n==1 case
00635             {
00636                 char outputFileTgt[] = "fOcnOnCpl.h5m";
00637                 ierr = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions, strlen( outputFileTgt ),
00638                                         strlen( fileWriteOptions ) );
00639                 CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
00640             }
00641         }
00642 
00643         // send the projected tag back to ocean pes, with send/receive tag
00644         if( ocnComm != MPI_COMM_NULL )
00645         {
00646             int tagIndexIn2;
00647             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
00648                                            &tagIndexIn2, strlen( bottomTempProjectedField ) );
00649             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00650                              "a2oTbot_proj on ocn pes" );
00651             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomUVelProjectedField, &tagTypes[1], &ocnCompNDoFs,
00652                                            &tagIndexIn2, strlen( bottomUVelProjectedField ) );
00653             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00654                              "a2oUbot_proj on ocn pes" );
00655             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomVVelProjectedField, &tagTypes[1], &ocnCompNDoFs,
00656                                            &tagIndexIn2, strlen( bottomVVelProjectedField ) );
00657             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00658                              "a2oVbot_proj on ocn pes" );
00659         }
00660         // send the tag to ocean pes, from ocean mesh on coupler pes
00661         //   from couComm, using common joint comm ocn_coupler
00662         // as always, use nonblocking sends
00663         // original graph (context is -1_
00664         if( couComm != MPI_COMM_NULL )
00665         {
00666             ierr = iMOAB_SendElementTag( cplOcnPID, "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;", &ocnCouComm, &context_id,
00667                                          strlen( "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;" ) );
00668             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00669         }
00670 
00671         // receive on component 2, ocean
00672         if( ocnComm != MPI_COMM_NULL )
00673         {
00674             ierr = iMOAB_ReceiveElementTag( cmpOcnPID, "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;", &ocnCouComm,
00675                                             &context_id, strlen( "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;" ) );
00676             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00677         }
00678 
00679         MPI_Barrier( MPI_COMM_WORLD );
00680 
00681         if( couComm != MPI_COMM_NULL ) { ierr = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id ); }
00682         if( ocnComm != MPI_COMM_NULL && 1 == n )  // write only for n==1 case
00683         {
00684             char outputFileOcn[] = "OcnWithProj.h5m";
00685             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions, strlen( outputFileOcn ),
00686                                     strlen( fileWriteOptions ) );
00687             CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
00688         }
00689 #endif
00690 
00691 #ifdef ENABLE_ATMLND_COUPLING
00692         // start land proj:
00693         PUSH_TIMER( "Send/receive data from component atm to coupler, in land context" )
00694         if( atmComm != MPI_COMM_NULL )
00695         {
00696             // as always, use nonblocking sends
00697             // this is for projection to land:
00698             ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot;a2oUbot;a2oVbot;", &atmCouComm, &cpllnd,
00699                                          strlen( "a2oTbot;a2oUbot;a2oVbot;" ) );
00700             CHECKIERR( ierr, "cannot send tag values" )
00701         }
00702         if( couComm != MPI_COMM_NULL )
00703         {
00704             // receive on atm on coupler pes, that was redistributed according to coverage, for land
00705             // context
00706             ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot;a2oUbot;a2oVbot;", &atmCouComm, &cpllnd,
00707                                             strlen( "a2oTbot;a2oUbot;a2oVbot;" ) );
00708             CHECKIERR( ierr, "cannot receive tag values" )
00709         }
00710         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00711 
00712         // we can now free the sender buffers
00713         if( atmComm != MPI_COMM_NULL )
00714         {
00715             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cpllnd );
00716             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh "
00717                              "for land context" )
00718         }
00719 #ifdef VERBOSE
00720         if( couComm != MPI_COMM_NULL && 1 == n )
00721         {  // write only for n==1 case
00722             char outputFileRecvd[] = "recvAtmCoupLnd.h5m";
00723             ierr = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions, strlen( outputFileRecvd ),
00724                                     strlen( fileWriteOptions ) );
00725             CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
00726         }
00727 #endif
00728 
00729         /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00730            on the source mesh and get the projection on the target mesh */
00731         if( couComm != MPI_COMM_NULL )
00732         {
00733             PUSH_TIMER( "Apply Scalar projection weights for land" )
00734             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmLndPID, weights_identifiers[1].c_str(), concat_fieldname,
00735                                                        concat_fieldnameT, weights_identifiers[1].size(),
00736                                                        strlen( concat_fieldname ), strlen( concat_fieldnameT ) );
00737             CHECKIERR( ierr, "failed to compute projection weight application" );
00738             POP_TIMER( couComm, rankInCouComm )
00739         }
00740 
00741 #ifdef VERBOSE
00742         if( couComm != MPI_COMM_NULL && 1 == n )
00743         {  // write only for n==1 case
00744             char outputFileTgtLnd[] = "fLndOnCpl.h5m";
00745             ierr = iMOAB_WriteMesh( cplLndPID, outputFileTgtLnd, fileWriteOptions, strlen( outputFileTgtLnd ),
00746                                     strlen( fileWriteOptions ) );
00747             CHECKIERR( ierr, "could not write fLndOnCpl.h5m to disk" )
00748         }
00749 #endif
00750 
00751         // end land proj
00752         // send the tags back to land pes, from land mesh on coupler pes
00753         // send from cplLndPID to cmpLndPID, using common joint comm
00754         // as always, use nonblocking sends
00755         // original graph
00756         // int context_id = -1;
00757         // the land might not have these tags yet; it should be a different name for land
00758         // in e3sm we do have different names
00759         if( lndComm != MPI_COMM_NULL )
00760         {
00761             int tagIndexIn2;
00762             ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
00763                                            &tagIndexIn2, strlen( bottomTempProjectedField ) );
00764             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00765                              "a2oTbot_proj on lnd pes" );
00766             ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomUVelProjectedField, &tagTypes[1], &ocnCompNDoFs,
00767                                            &tagIndexIn2, strlen( bottomUVelProjectedField ) );
00768             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00769                              "a2oUbot_proj on lnd pes" );
00770             ierr = iMOAB_DefineTagStorage( cmpLndPID, bottomVVelProjectedField, &tagTypes[1], &ocnCompNDoFs,
00771                                            &tagIndexIn2, strlen( bottomVVelProjectedField ) );
00772             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00773                              "a2oVbot_proj on lnd pes" );
00774         }
00775         if( couComm != MPI_COMM_NULL )
00776         {
00777             ierr = iMOAB_SendElementTag( cplLndPID, "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;", &lndCouComm, &context_id,
00778                                          strlen( "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;" ) );
00779             CHECKIERR( ierr, "cannot send tag values back to land pes" )
00780         }
00781         // receive on component 3, land
00782         if( lndComm != MPI_COMM_NULL )
00783         {
00784             ierr = iMOAB_ReceiveElementTag( cmpLndPID, "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;", &lndCouComm,
00785                                             &context_id, strlen( "a2oTbot_proj;a2oUbot_proj;a2oVbot_proj;" ) );
00786             CHECKIERR( ierr, "cannot receive tag values from land mesh on coupler pes" )
00787         }
00788 
00789         MPI_Barrier( MPI_COMM_WORLD );
00790         if( couComm != MPI_COMM_NULL ) { ierr = iMOAB_FreeSenderBuffers( cplLndPID, &context_id ); }
00791         if( lndComm != MPI_COMM_NULL && 1 == n )  // write only for n==1 case
00792         {
00793             char outputFileLnd[] = "LndWithProj.h5m";
00794             ierr                 = iMOAB_WriteMesh( cmpLndPID, outputFileLnd, fileWriteOptions, strlen( outputFileLnd ),
00795                                     strlen( fileWriteOptions ) );
00796             CHECKIERR( ierr, "could not write LndWithProj.h5m to disk" )
00797         }
00798 #endif  // ENABLE_ATMLND_COUPLING
00799 
00800     }  // end loop iterations n
00801 #ifdef ENABLE_ATMLND_COUPLING
00802     if( lndComm != MPI_COMM_NULL )
00803     {
00804         ierr = iMOAB_DeregisterApplication( cmpLndPID );
00805         CHECKIERR( ierr, "cannot deregister app LND1" )
00806     }
00807 #endif  // ENABLE_ATMLND_COUPLING
00808 
00809 #ifdef ENABLE_ATMOCN_COUPLING
00810     if( couComm != MPI_COMM_NULL )
00811     {
00812         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00813         CHECKIERR( ierr, "cannot deregister app intx AO" )
00814     }
00815     if( ocnComm != MPI_COMM_NULL )
00816     {
00817         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00818         CHECKIERR( ierr, "cannot deregister app OCN1" )
00819     }
00820 #endif  // ENABLE_ATMOCN_COUPLING
00821 
00822     if( atmComm != MPI_COMM_NULL )
00823     {
00824         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00825         CHECKIERR( ierr, "cannot deregister app ATM1" )
00826     }
00827 
00828 #ifdef ENABLE_ATMLND_COUPLING
00829     if( couComm != MPI_COMM_NULL )
00830     {
00831         ierr = iMOAB_DeregisterApplication( cplLndPID );
00832         CHECKIERR( ierr, "cannot deregister app LNDX" )
00833     }
00834 #endif  // ENABLE_ATMLND_COUPLING
00835 
00836 #ifdef ENABLE_ATMOCN_COUPLING
00837     if( couComm != MPI_COMM_NULL )
00838     {
00839         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00840         CHECKIERR( ierr, "cannot deregister app OCNX" )
00841     }
00842 #endif  // ENABLE_ATMOCN_COUPLING
00843 
00844     if( couComm != MPI_COMM_NULL )
00845     {
00846         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00847         CHECKIERR( ierr, "cannot deregister app ATMX" )
00848     }
00849 
00850     //#endif
00851     ierr = iMOAB_Finalize();
00852     CHECKIERR( ierr, "did not finalize iMOAB" )
00853 
00854     // free atm coupler group and comm
00855     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00856     MPI_Group_free( &joinAtmCouGroup );
00857     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00858 
00859 #ifdef ENABLE_ATMOCN_COUPLING
00860     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00861     // free ocn - coupler group and comm
00862     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00863     MPI_Group_free( &joinOcnCouGroup );
00864 #endif
00865 
00866 #ifdef ENABLE_ATMLND_COUPLING
00867     if( MPI_COMM_NULL != lndComm ) MPI_Comm_free( &lndComm );
00868     // free land - coupler group and comm
00869     if( MPI_COMM_NULL != lndCouComm ) MPI_Comm_free( &lndCouComm );
00870     MPI_Group_free( &joinLndCouGroup );
00871 #endif
00872 
00873     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00874 
00875     MPI_Group_free( &atmPEGroup );
00876 #ifdef ENABLE_ATMOCN_COUPLING
00877     MPI_Group_free( &ocnPEGroup );
00878 #endif
00879 #ifdef ENABLE_ATMLND_COUPLING
00880     MPI_Group_free( &lndPEGroup );
00881 #endif
00882     MPI_Group_free( &couPEGroup );
00883     MPI_Group_free( &jgroup );
00884 
00885     MPI_Finalize();
00886 
00887     return 0;
00888 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines