MOAB: Mesh Oriented datABase  (version 5.4.1)
imoab_coupler_twohop.cpp
Go to the documentation of this file.
00001 /*
00002  * This imoab_coupler test will simulate coupling between 3 components
00003  * 3 meshes will be loaded from 3 files (atm, ocean, lnd), and they will be migrated to
00004  * all processors (coupler pes); then, intx will be performed between migrated meshes
00005  * and weights will be generated, such that a field from one component will be transferred to
00006  * the other component
00007  * currently, the atm will send some data to be projected to ocean and land components
00008  *
00009  * first, intersect atm and ocn, and recompute comm graph 1 between atm and atm_cx, for ocn intx
00010  * second, intersect atm and lnd, and recompute comm graph 2 between atm and atm_cx for lnd intx
00011 
00012  */
00013 
00014 #include "moab/Core.hpp"
00015 #ifndef MOAB_HAVE_MPI
00016 #error mbtempest tool requires MPI configuration
00017 #endif
00018 
00019 // MPI includes
00020 #include "moab_mpi.h"
00021 #include "moab/ParallelComm.hpp"
00022 #include "MBParallelConventions.h"
00023 
00024 #include "moab/iMOAB.h"
00025 #include "TestUtil.hpp"
00026 #include "moab/CpuTimer.hpp"
00027 #include "moab/ProgOptions.hpp"
00028 #include <iostream>
00029 #include <sstream>
00030 
00031 #include "imoab_coupler_utils.hpp"
00032 
00033 using namespace moab;
00034 
00035 //#define GRAPH_INFO
00036 
00037 #ifndef MOAB_HAVE_TEMPESTREMAP
00038 #error The climate coupler test example requires MOAB configuration with TempestRemap
00039 #endif
00040 
00041 #define ENABLE_ATMOCN_COUPLING
00042 #define ENABLE_ATMCPLOCN_COUPLING
00043 
00044 #if( !defined( ENABLE_ATMOCN_COUPLING ) && !defined( ENABLE_ATMCPLOCN_COUPLING ) )
00045 #error Enable either OCN (ENABLE_ATMOCN_COUPLING) and/or LND (ENABLE_ATMCPLOCN_COUPLING) for coupling
00046 #endif
00047 
00048 int main( int argc, char* argv[] )
00049 {
00050     int ierr;
00051     int rankInGlobalComm, numProcesses;
00052     MPI_Group jgroup;
00053     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00054     std::string readoptsLnd( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION" );
00055 
00056     // Timer data
00057     moab::CpuTimer timer;
00058     double timer_ops;
00059     std::string opName;
00060 
00061     int repartitioner_scheme = 0;
00062 #ifdef MOAB_HAVE_ZOLTAN
00063     repartitioner_scheme = 2;  // use the graph partitioner in that caseS
00064 #endif
00065 
00066     MPI_Init( &argc, &argv );
00067     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00068     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00069 
00070     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00071 
00072     std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
00073     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00074     // intx atm/ocn is not in e3sm yet, give a number
00075     //   6 * 100+ 18 = 618 : atmocnid
00076     // 9 LND, 10 CPLLND
00077     //   6 * 100 + 10 = 610  atmlndid:
00078     // cmpatm is for atm on atm pes
00079     // cmpocn is for ocean, on ocean pe
00080     // cplatm is for atm on coupler pes
00081     // cplocn is for ocean on coupelr pes
00082     // atmocnid is for intx atm / ocn on coupler pes
00083     //
00084     int rankInAtmComm = -1;
00085     int cmpatm        = 5,
00086         cplatm        = 6;  // component ids are unique over all pes, and established in advance;
00087 #ifdef ENABLE_ATMOCN_COUPLING
00088     std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
00089     std::string baseline    = TestDir + "unittest/baseline1.txt";
00090     int rankInOcnComm       = -1;
00091     int cmpocn = 17, cplocn = 18,
00092         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00093 #endif
00094 
00095 #ifdef ENABLE_ATMCPLOCN_COUPLING
00096     int cplatm2 = 10,
00097         atm2ocnid = 610;  // component ids are unique over all pes, and established in advance;
00098 #endif
00099 
00100     int rankInCouComm = -1;
00101 
00102     int nghlay = 0;  // number of ghost layers for loading the file
00103     std::vector< int > groupTasks;
00104     int startG1 = 0, startG2 = 0,
00105         endG1 = numProcesses - 1, endG2 = numProcesses - 1; // Support launch of imoab_coupler test on any combo of 2*x processes
00106     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00107     int context_id = -1;                   // used now for freeing buffers
00108 
00109     // default: load atm on 2 proc, ocean on 2, land on 2; migrate to 2 procs, then compute intx
00110     // later, we need to compute weight matrix with tempestremap
00111 
00112     ProgOptions opts;
00113     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00114 #ifdef ENABLE_ATMOCN_COUPLING
00115     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00116 #endif
00117     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00118     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00119 #ifdef ENABLE_ATMOCN_COUPLING
00120     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00121     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00122 #endif
00123 
00124     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00125     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00126 
00127     opts.addOpt< int >( "partitioning,p", "partitioning option for migration", &repartitioner_scheme );
00128 
00129     int n = 1;  // number of send/receive / project / send back cycles
00130     opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
00131 
00132     bool no_regression_test = false;
00133     opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
00134     opts.parseCommandLine( argc, argv );
00135 
00136     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00137 
00138     if( !rankInGlobalComm )
00139     {
00140         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1 <<
00141 #ifdef ENABLE_ATMOCN_COUPLING
00142             "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2 <<
00143 #endif
00144             "\n  partitioning (0 trivial, 1 graph, 2 geometry) " << repartitioner_scheme << "\n  ";
00145     }
00146 
00147     // load files on 3 different communicators, groups
00148     // first groups has task 0, second group tasks 0 and 1
00149     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00150     // first groups has task 0, second group tasks 0 and 1
00151     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00152     MPI_Group atmPEGroup;
00153     MPI_Comm atmComm;
00154     ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
00155     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00156 
00157 #ifdef ENABLE_ATMOCN_COUPLING
00158     MPI_Group ocnPEGroup;
00159     MPI_Comm ocnComm;
00160     ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
00161     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00162 #endif
00163 
00164     // we will always have a coupler
00165     MPI_Group couPEGroup;
00166     MPI_Comm couComm;
00167     ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
00168     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00169 
00170     // atm_coupler
00171     MPI_Group joinAtmCouGroup;
00172     MPI_Comm atmCouComm;
00173     ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
00174     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00175 
00176 #ifdef ENABLE_ATMOCN_COUPLING
00177     // ocn_coupler
00178     MPI_Group joinOcnCouGroup;
00179     MPI_Comm ocnCouComm;
00180     ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
00181     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00182 #endif
00183 
00184     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00185     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00186 
00187     int cmpAtmAppID       = -1;
00188     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00189     int cplAtmAppID       = -1;            // -1 means it is not initialized
00190     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00191 #ifdef ENABLE_ATMOCN_COUPLING
00192     int cmpOcnAppID       = -1;
00193     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00194     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00195     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00196     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00197 #endif
00198 
00199 #ifdef ENABLE_ATMCPLOCN_COUPLING
00200     int cplAtm2AppID          = -1;                // -1 means it is not initialized
00201     iMOAB_AppID cplAtm2PID    = &cplAtm2AppID;     // atm on second coupler PEs
00202     int cplAtm2OcnAppID       = -1;                // -1 means it is not initialized
00203     iMOAB_AppID cplAtm2OcnPID = &cplAtm2OcnAppID;  // intx atm - lnd on coupler PEs
00204 #endif
00205 
00206     if( couComm != MPI_COMM_NULL )
00207     {
00208         MPI_Comm_rank( couComm, &rankInCouComm );
00209         // Register all the applications on the coupler PEs
00210         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00211                                           cplAtmPID );  // atm on coupler pes
00212         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00213 #ifdef ENABLE_ATMOCN_COUPLING
00214         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00215                                           cplOcnPID );  // ocn on coupler pes
00216         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00217 #endif
00218 #ifdef ENABLE_ATMCPLOCN_COUPLING
00219         ierr = iMOAB_RegisterApplication( "ATMX2", &couComm, &cplatm2,
00220                                           cplAtm2PID );  // second atm on coupler pes
00221         CHECKIERR( ierr, "Cannot register second ATM over coupler PEs" )
00222 #endif
00223     }
00224 
00225     if( atmComm != MPI_COMM_NULL )
00226     {
00227         MPI_Comm_rank( atmComm, &rankInAtmComm );
00228         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00229         CHECKIERR( ierr, "Cannot register ATM App" )
00230     }
00231 
00232 #ifdef ENABLE_ATMOCN_COUPLING
00233     if( ocnComm != MPI_COMM_NULL )
00234     {
00235         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00236         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00237         CHECKIERR( ierr, "Cannot register OCN App" )
00238     }
00239 #endif
00240 
00241     // atm
00242     ierr =
00243         setup_component_coupler_meshes( cmpAtmPID, cmpatm, cplAtmPID, cplatm, &atmComm, &atmPEGroup, &couComm,
00244                                         &couPEGroup, &atmCouComm, atmFilename, readopts, nghlay, repartitioner_scheme );
00245     CHECKIERR( ierr, "Cannot load and migrate atm mesh" )
00246 
00247 #ifdef ENABLE_ATMOCN_COUPLING
00248     // ocean
00249     ierr =
00250         setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
00251                                         &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
00252     CHECKIERR( ierr, "Cannot load and migrate ocn mesh" )
00253 
00254 #endif  // #ifdef ENABLE_ATMOCN_COUPLING
00255 
00256 #ifdef ENABLE_ATMCPLOCN_COUPLING
00257 
00258     if( atmComm != MPI_COMM_NULL )
00259     {
00260         // then send mesh to second coupler pes
00261         ierr = iMOAB_SendMesh( cmpAtmPID, &atmCouComm, &couPEGroup, &cplatm2,
00262                                &repartitioner_scheme );  // send to  coupler pes
00263         CHECKIERR( ierr, "cannot send elements to coupler-2" )
00264     }
00265     // now, receive mesh, on coupler communicator; first mesh 1, atm
00266     if( couComm != MPI_COMM_NULL )
00267     {
00268         ierr = iMOAB_ReceiveMesh( cplAtm2PID, &atmCouComm, &atmPEGroup,
00269                                   &cmpatm );  // receive from component
00270         CHECKIERR( ierr, "cannot receive elements on coupler app" )
00271     }
00272 
00273     // we can now free the sender buffers
00274     if( atmComm != MPI_COMM_NULL )
00275     {
00276         int context_id = cplatm;
00277         ierr           = iMOAB_FreeSenderBuffers( cmpAtmPID, &context_id );
00278         CHECKIERR( ierr, "cannot free buffers used to send atm mesh" )
00279     }
00280 
00281     if( couComm != MPI_COMM_NULL && 1 == n )
00282     {  // write only for n==1 case
00283         char outputFileLnd[] = "recvAtm2.h5m";
00284         ierr                 = iMOAB_WriteMesh( cplAtm2PID, outputFileLnd, fileWriteOptions );
00285         CHECKIERR( ierr, "cannot write second atm mesh after receiving" )
00286     }
00287 
00288 #endif  // #ifdef ENABLE_ATMCPLOCN_COUPLING
00289 
00290     MPI_Barrier( MPI_COMM_WORLD );
00291 
00292 #ifdef ENABLE_ATMOCN_COUPLING
00293     if( couComm != MPI_COMM_NULL )
00294     {
00295         // now compute intersection between OCNx and ATMx on coupler PEs
00296         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00297         CHECKIERR( ierr, "Cannot register atm/ocn intersection over coupler pes " )
00298     }
00299 #endif
00300 #ifdef ENABLE_ATMCPLOCN_COUPLING
00301     if( couComm != MPI_COMM_NULL )
00302     {
00303         // now compute intersection between LNDx and ATMx on coupler PEs
00304         ierr = iMOAB_RegisterApplication( "ATM2OCN", &couComm, &atm2ocnid, cplAtm2OcnPID );
00305         CHECKIERR( ierr, "Cannot register atm2/ocn intersection over coupler pes " )
00306     }
00307 #endif
00308 
00309     int disc_orders[3]                       = { 4, 1, 1 };
00310     const std::string weights_identifiers[2] = { "scalar", "scalar-pc" };
00311     const std::string disc_methods[3]        = { "cgll", "fv", "pcloud" };
00312     const std::string dof_tag_names[3]       = { "GLOBAL_DOFS", "GLOBAL_ID", "GLOBAL_ID" };
00313 #ifdef ENABLE_ATMOCN_COUPLING
00314     if( couComm != MPI_COMM_NULL )
00315     {
00316         PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
00317         ierr = iMOAB_ComputeMeshIntersectionOnSphere(
00318             cplAtmPID, cplOcnPID,
00319             cplAtmOcnPID );  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
00320         // basically, atm was redistributed according to target (ocean) partition, to "cover" the
00321         // ocean partitions check if intx valid, write some h5m intx file
00322         CHECKIERR( ierr, "cannot compute intersection for atm/ocn" )
00323         POP_TIMER( couComm, rankInCouComm )
00324 #ifdef VERBOSE
00325         char prefix[] = "intx_atmocn";
00326         ierr          = iMOAB_WriteLocalMesh( cplAtmOcnPID, prefix, strlen( prefix ) );
00327         CHECKIERR( ierr, "failed to write local intx mesh" );
00328 #endif
00329     }
00330 
00331     if( atmCouComm != MPI_COMM_NULL )
00332     {
00333         // the new graph will be for sending data from atm comp to coverage mesh;
00334         // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
00335         // results are in cplAtmOcnPID, intx mesh; remapper also has some info about coverage mesh
00336         // after this, the sending of tags from atm pes to coupler pes will use the new par comm
00337         // graph, that has more precise info about what to send for ocean cover ; every time, we
00338         // will
00339         //  use the element global id, which should uniquely identify the element
00340         PUSH_TIMER( "Compute OCN coverage graph for ATM mesh" )
00341         ierr = iMOAB_CoverageGraph( &atmCouComm, cmpAtmPID, cplAtmPID, cplAtmOcnPID, &cmpatm, &cplatm,
00342                                     &cplocn );  // it happens over joint communicator
00343         CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean" )
00344         POP_TIMER( atmCouComm, rankInAtmComm )  // hijack this rank
00345     }
00346 #endif
00347 
00348 #ifdef ENABLE_ATMCPLOCN_COUPLING
00349     if( couComm != MPI_COMM_NULL )
00350     {
00351         PUSH_TIMER( "Compute ATM-OCN mesh intersection" )
00352         ierr = iMOAB_ComputeMeshIntersectionOnSphere(
00353             cplAtm2PID, cplOcnPID,
00354             cplAtm2OcnPID );  // coverage mesh was computed here, for cplAtmPID, atm on coupler pes
00355         // basically, atm was redistributed according to target (ocean) partition, to "cover" the
00356         // ocean partitions check if intx valid, write some h5m intx file
00357         CHECKIERR( ierr, "cannot compute intersection for atm2/ocn" )
00358         POP_TIMER( couComm, rankInCouComm )
00359     // }
00360     // if( atmCouComm != MPI_COMM_NULL )
00361     // {
00362         // the new graph will be for sending data from atm comp to coverage mesh for land mesh;
00363         // it involves initial atm app; cmpAtmPID; also migrate atm mesh on coupler pes, cplAtmPID
00364         // results are in cplAtmLndPID, intx mesh; remapper also has some info about coverage mesh
00365         // after this, the sending of tags from atm pes to coupler pes will use the new par comm
00366         // graph, that has more precise info about what to send (specifically for land cover); every
00367         // time,
00368         /// we will use the element global id, which should uniquely identify the element
00369         PUSH_TIMER( "Compute OCN coverage graph for ATM2 mesh" )
00370         // Context: cplatm2 already holds the comm-graph for communicating between atm component and coupler2
00371         // We just need to create a comm graph to internally transfer data from coupler atm to coupler ocean
00372         // ierr = iMOAB_CoverageGraph( &couComm, cplAtm2PID, cplAtm2OcnPID, cplAtm2OcnPID, &cplatm2, &atm2ocnid,
00373                                     // &cplocn );  // it happens over joint communicator
00374         int type1 = 1;
00375         int type2 = 1;
00376         ierr      = iMOAB_ComputeCommGraph( cplAtm2PID, cplAtm2OcnPID, &couComm, &couPEGroup, &couPEGroup, &type1, &type2,
00377                                             &cplatm2, &atm2ocnid );
00378         CHECKIERR( ierr, "cannot recompute direct coverage graph for ocean from atm2" )
00379         POP_TIMER( couComm, rankInCouComm )  // hijack this rank
00380     }
00381 #endif
00382 
00383     MPI_Barrier( MPI_COMM_WORLD );
00384 
00385     int fMonotoneTypeID = 0, fVolumetric = 0, fValidate = 0, fNoConserve = 0, fNoBubble = 1, fInverseDistanceMap = 0;
00386 
00387 #ifdef ENABLE_ATMOCN_COUPLING
00388 #ifdef VERBOSE
00389     if( couComm != MPI_COMM_NULL && 1 == n )
00390     {                                    // write only for n==1 case
00391         char serialWriteOptions[] = "";  // for writing in serial
00392         std::stringstream outf;
00393         outf << "intxAtmOcn_" << rankInCouComm << ".h5m";
00394         std::string intxfile = outf.str();  // write in serial the intx file, for debugging
00395         ierr                 = iMOAB_WriteMesh( cplAtmOcnPID, intxfile.c_str(), serialWriteOptions );
00396         CHECKIERR( ierr, "cannot write intx file result" )
00397     }
00398 #endif
00399 
00400     if( couComm != MPI_COMM_NULL )
00401     {
00402         PUSH_TIMER( "Compute the projection weights with TempestRemap" )
00403         ierr =
00404             iMOAB_ComputeScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), disc_methods[0].c_str(),
00405                                                   &disc_orders[0], disc_methods[1].c_str(), &disc_orders[1], &fNoBubble,
00406                                                   &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap, &fNoConserve,
00407                                                   &fValidate, dof_tag_names[0].c_str(), dof_tag_names[1].c_str() );
00408         CHECKIERR( ierr, "cannot compute scalar projection weights" )
00409         POP_TIMER( couComm, rankInCouComm )
00410 
00411         // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
00412 #ifdef MOAB_HAVE_NETCDF
00413         {
00414             const std::string atmocn_map_file_name = "atm_ocn_map2.nc";
00415             ierr = iMOAB_WriteMappingWeightsToFile( cplAtmOcnPID, weights_identifiers[0].c_str(),
00416                                                     atmocn_map_file_name.c_str() );
00417             CHECKIERR( ierr, "failed to write map file to disk" );
00418 
00419             const std::string intx_from_file_identifier = "map-from-file";
00420             int dummyCpl = -1;
00421             int dummy_rowcol = -1;
00422             int dummyType = 0;
00423             ierr = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
00424                  intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() );
00425             CHECKIERR( ierr, "failed to load map file from disk" );
00426         }
00427 #endif
00428     }
00429 
00430 #endif
00431 
00432     MPI_Barrier( MPI_COMM_WORLD );
00433 
00434 #ifdef ENABLE_ATMCPLOCN_COUPLING
00435     if( couComm != MPI_COMM_NULL )
00436     {
00437         PUSH_TIMER( "Compute the projection weights with TempestRemap for atm2/ocn" )
00438         ierr =
00439             iMOAB_ComputeScalarProjectionWeights( cplAtm2OcnPID, weights_identifiers[0].c_str(), disc_methods[0].c_str(),
00440                                                   &disc_orders[0], disc_methods[1].c_str(), &disc_orders[1], &fNoBubble,
00441                                                   &fMonotoneTypeID, &fVolumetric, &fInverseDistanceMap, &fNoConserve,
00442                                                   &fValidate, dof_tag_names[0].c_str(), dof_tag_names[1].c_str() );
00443         CHECKIERR( ierr, "cannot compute scalar projection weights for atm2/ocn" )
00444         POP_TIMER( couComm, rankInCouComm )
00445 
00446         // Let us now write the map file to disk and then read it back to test the I/O API in iMOAB
00447 #ifdef MOAB_HAVE_NETCDF
00448         {
00449             const std::string atmocn_map_file_name = "atm2_ocn_map.nc";
00450             ierr = iMOAB_WriteMappingWeightsToFile( cplAtm2OcnPID, weights_identifiers[0].c_str(),
00451                                                     atmocn_map_file_name.c_str() );
00452             CHECKIERR( ierr, "failed to write map file to disk" );
00453 
00454             const std::string intx_from_file_identifier = "map2-from-file";
00455             int dummyCpl                                = -1;
00456             int dummy_rowcol                            = -1;
00457             int dummyType                               = 0;
00458             ierr = iMOAB_LoadMappingWeightsFromFile( cplAtm2OcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
00459                                                      intx_from_file_identifier.c_str(), atmocn_map_file_name.c_str() );
00460             CHECKIERR( ierr, "failed to load map file from disk" );
00461         }
00462 #endif
00463     }
00464 #endif
00465 
00466     int tagIndex[2];
00467     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00468     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
00469 
00470     const char* bottomFields          = "a2oTbot:a2oUbot:a2oVbot";
00471     const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
00472     const char* bottomSourceFields2 = "a2oT2bot_src:a2oU2bot_src:a2oV2bot_src";
00473     const char* bottomProjectedFields3 = "a2oT2bot_proj:a2oU2bot_proj:a2oV2bot_proj";
00474 
00475     if( couComm != MPI_COMM_NULL )
00476     {
00477         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00478         CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
00479         ierr = iMOAB_DefineTagStorage( cplAtm2PID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00480         CHECKIERR( ierr, "failed to define the field tag a2oTbot" );
00481 
00482 #ifdef ENABLE_ATMOCN_COUPLING
00483         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00484         CHECKIERR( ierr, "failed to define the field tag a2oTbot_proj" );
00485 #endif
00486 #ifdef ENABLE_ATMCPLOCN_COUPLING
00487         ierr = iMOAB_DefineTagStorage( cplAtm2PID, bottomSourceFields2, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00488         CHECKIERR( ierr, "failed to define the field tag a2oT2bot_proj" );
00489         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields3, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00490         CHECKIERR( ierr, "failed to define the field tag a2oT2bot_proj" );
00491 #endif
00492     }
00493 
00494     // need to make sure that the coverage mesh (created during intx method) received the tag that
00495     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00496     // need to change the migrate method to accommodate any GLL tag
00497     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00498     // (cplAtmPID), using the new coverage graph communicator
00499 
00500     // make the tag 0, to check we are actually sending needed data
00501     {
00502         if( cplAtmAppID >= 0 )
00503         {
00504             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00505             /*
00506              * Each process in the communicator will have access to a local mesh instance, which
00507              * will contain the original cells in the local partition and ghost entities. Number of
00508              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00509              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00510              * numbers.
00511              */
00512             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00513             CHECKIERR( ierr, "failed to get num primary elems" );
00514             int numAllElem = nelem[2];
00515             std::vector< double > vals;
00516             int storLeng = atmCompNDoFs * numAllElem *3; // 3 tags
00517             int eetype   = 1;
00518 
00519             vals.resize( storLeng );
00520             for( int k = 0; k < storLeng; k++ )
00521                 vals[k] = 0.;
00522 
00523             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
00524             CHECKIERR( ierr, "cannot make tag nul" )
00525             // set the tag to 0
00526         }
00527         if( cplAtm2AppID >= 0 )
00528         {
00529             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00530             /*
00531              * Each process in the communicator will have access to a local mesh instance, which
00532              * will contain the original cells in the local partition and ghost entities. Number of
00533              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00534              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00535              * numbers.
00536              */
00537             ierr = iMOAB_GetMeshInfo( cplAtm2PID, nverts, nelem, nblocks, nsbc, ndbc );
00538             CHECKIERR( ierr, "failed to get num primary elems" );
00539             int numAllElem = nelem[2];
00540             std::vector< double > vals;
00541             int storLeng = atmCompNDoFs * numAllElem * 3;  // 3 tags
00542             int eetype   = 1;
00543 
00544             vals.resize( storLeng );
00545             for( int k = 0; k < storLeng; k++ )
00546                 vals[k] = 0.;
00547 
00548             ierr = iMOAB_SetDoubleTagStorage( cplAtm2PID, bottomSourceFields2, &storLeng, &eetype, &vals[0] );
00549             CHECKIERR( ierr, "cannot make tag nul" )
00550             // set the tag to 0
00551         }
00552     }
00553 
00554     // start a virtual loop for number of iterations
00555     for( int iters = 0; iters < n; iters++ )
00556     {
00557 #ifdef ENABLE_ATMOCN_COUPLING
00558         PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00559         if( atmComm != MPI_COMM_NULL )
00560         {
00561             // as always, use nonblocking sends
00562             // this is for projection to ocean:
00563             ierr = iMOAB_SendElementTag( cmpAtmPID, bottomFields, &atmCouComm, &cplocn );
00564             CHECKIERR( ierr, "cannot send tag values" )
00565         }
00566         if( couComm != MPI_COMM_NULL )
00567         {
00568             // receive on atm on coupler pes, that was redistributed according to coverage
00569             ierr = iMOAB_ReceiveElementTag( cplAtmPID, bottomFields, &atmCouComm, &cplocn );
00570             CHECKIERR( ierr, "cannot receive tag values" )
00571         }
00572         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00573 
00574         // we can now free the sender buffers
00575         if( atmComm != MPI_COMM_NULL )
00576         {
00577             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );  // context is for ocean
00578             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00579         }
00580 #ifdef VERBOSE
00581         if( couComm != MPI_COMM_NULL && 1 == n )
00582         {
00583             // write only for n==1 case
00584             char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
00585             ierr                   = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
00586             CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
00587         }
00588 #endif
00589 
00590         if( couComm != MPI_COMM_NULL )
00591         {
00592             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00593                on the source mesh and get the projection on the target mesh */
00594             PUSH_TIMER( "Apply Scalar projection weights" )
00595             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, weights_identifiers[0].c_str(), bottomFields,
00596                                                        bottomProjectedFields );
00597             CHECKIERR( ierr, "failed to compute projection weight application" );
00598             POP_TIMER( couComm, rankInCouComm )
00599             if( 1 == n )  // write only for n==1 case
00600             {
00601                 char outputFileTgt[] = "fOcnOnCpl1.h5m";
00602                 ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00603                 CHECKIERR( ierr, "could not write fOcnOnCpl1.h5m to disk" )
00604             }
00605         }
00606 
00607         // send the projected tag back to ocean pes, with send/receive tag
00608         if( ocnComm != MPI_COMM_NULL )
00609         {
00610             int tagIndexIn2;
00611             ierr =
00612                 iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
00613             CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
00614                              "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" );
00615         }
00616         // send the tag to ocean pes, from ocean mesh on coupler pes
00617         //   from couComm, using common joint comm ocn_coupler
00618         // as always, use nonblocking sends
00619         // original graph (context is -1_
00620         if( couComm != MPI_COMM_NULL )
00621         {
00622             // need to use ocean comp id for context
00623             context_id = cmpocn;  // id for ocean on comp
00624             ierr =
00625                 iMOAB_SendElementTag( cplOcnPID, bottomProjectedFields, &ocnCouComm, &context_id );
00626             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00627         }
00628 
00629         // receive on component 2, ocean
00630         if( ocnComm != MPI_COMM_NULL )
00631         {
00632             context_id = cplocn;  // id for ocean on coupler
00633             ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, bottomProjectedFields, &ocnCouComm,
00634                                                   &context_id );
00635             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00636         }
00637 
00638         MPI_Barrier( MPI_COMM_WORLD );
00639 
00640         if( couComm != MPI_COMM_NULL )
00641         {
00642             context_id = cmpocn;
00643             ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00644             CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
00645         }
00646         if( ocnComm != MPI_COMM_NULL && 1 == n )  // write only for n==1 case
00647         {
00648             char outputFileOcn[] = "OcnWithProj.h5m";
00649             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00650             CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
00651             // test results only for n == 1, for bottomTempProjectedField
00652             if( !no_regression_test )
00653             {
00654                 // the same as remap test
00655                 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
00656                 // first get GlobalIds from ocn, and fields:
00657                 int nverts[3], nelem[3];
00658                 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
00659                 CHECKIERR( ierr, "failed to get ocn mesh info" );
00660                 std::vector< int > gidElems;
00661                 gidElems.resize( nelem[2] );
00662                 std::vector< double > tempElems;
00663                 tempElems.resize( nelem[2] );
00664                 // get global id storage
00665                 const std::string GidStr = "GLOBAL_ID";  // hard coded too
00666                 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
00667                 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
00668                 CHECKIERR( ierr, "failed to define global id tag" );
00669 
00670                 int ent_type = 1;
00671                 ierr         = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
00672                 CHECKIERR( ierr, "failed to get global ids" );
00673                 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type,
00674                                                   &tempElems[0] );
00675                 CHECKIERR( ierr, "failed to get temperature field" );
00676                 int err_code = 1;
00677                 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
00678                 if( 0 == err_code )
00679                     std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
00680             }
00681         }
00682 #endif
00683 
00684 #ifdef ENABLE_ATMCPLOCN_COUPLING
00685         PUSH_TIMER( "Send/receive data from atm2 component to coupler of all data" )
00686         if( atmComm != MPI_COMM_NULL )
00687         {
00688             // as always, use nonblocking sends
00689             // this is for projection to ocean:
00690             ierr = iMOAB_SendElementTag( cmpAtmPID, bottomFields, &atmCouComm, &cplatm2 );
00691             CHECKIERR( ierr, "cannot send tag values" )
00692         }
00693         if( couComm != MPI_COMM_NULL )
00694         {
00695             // receive on atm on coupler pes, that was redistributed according to coverage
00696             ierr = iMOAB_ReceiveElementTag( cplAtm2PID, bottomFields, &atmCouComm, &cmpatm );
00697             CHECKIERR( ierr, "cannot receive tag values" )
00698         }
00699         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00700 
00701         // we can now free the sender buffers
00702         if( atmComm != MPI_COMM_NULL )
00703         {
00704             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplatm );  // context is for ocean
00705             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00706         }
00707 // #ifdef VERBOSE
00708         if( couComm != MPI_COMM_NULL && 1 == n )
00709         {  // write only for n==1 case
00710             char outputFileRecvd[] = "recvAtm2CoupFull.h5m";
00711             ierr                   = iMOAB_WriteMesh( cplAtm2PID, outputFileRecvd, fileWriteOptions );
00712             CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
00713         }
00714 // #endif
00715 
00716         PUSH_TIMER( "Send/receive data from atm2 coupler to ocean coupler based on coverage data" )
00717         if( couComm != MPI_COMM_NULL )
00718         {
00719             // as always, use nonblocking sends
00720             // this is for projection to ocean:
00721             ierr = iMOAB_SendElementTag( cplAtm2PID, bottomFields, &couComm, &atm2ocnid );
00722             CHECKIERR( ierr, "cannot send tag values" )
00723 
00724             // receive on atm on coupler pes, that was redistributed according to coverage
00725             // receive in the coverage mesh, basically
00726             ierr = iMOAB_ReceiveElementTag( cplAtm2OcnPID, bottomSourceFields2, &couComm, &cplatm2 );
00727             CHECKIERR( ierr, "cannot receive tag values" )
00728         }
00729         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00730 
00731         // we can now free the sender buffers
00732         if( couComm != MPI_COMM_NULL )
00733         {
00734             ierr = iMOAB_FreeSenderBuffers( cplAtm2PID, &atm2ocnid );  // context is intx external id
00735             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00736         }
00737 // #ifdef VERBOSE
00738         // we should not write this one, is should be the same as recvAtm2CoupFull above
00739       /*  if( couComm != MPI_COMM_NULL && 1 == n )
00740         {  // write only for n==1 case
00741             char outputFileRecvd[] = "recvAtm2CoupOcnCtx.h5m";
00742             ierr                   = iMOAB_WriteMesh( cplAtm2PID, outputFileRecvd, fileWriteOptions );
00743             CHECKIERR( ierr, "could not write recvAtmCoupLnd.h5m to disk" )
00744         }
00745 // #endif*/
00746 
00747         if( couComm != MPI_COMM_NULL )
00748         {
00749             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00750                on the source mesh and get the projection on the target mesh */
00751             PUSH_TIMER( "Apply Scalar projection weights" )
00752             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtm2OcnPID, weights_identifiers[0].c_str(),
00753                                                        bottomSourceFields2, bottomProjectedFields3 );
00754             CHECKIERR( ierr, "failed to compute projection weight application" );
00755             POP_TIMER( couComm, rankInCouComm )
00756             if( 1 == n )  // write only for n==1 case
00757             {
00758                 char outputFileTgt[] = "fOcnOnCpl0.h5m";
00759                 ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00760                 CHECKIERR( ierr, "could not write the second fOcnOnCpl0.h5m to disk" )
00761             }
00762         }
00763 
00764         std::cout << "applied scalar projection\n";
00765         // send the projected tag back to ocean pes, with send/receive tag
00766         if( ocnComm != MPI_COMM_NULL )
00767         {
00768             int tagIndexIn2;
00769             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields3, &tagTypes[1],
00770                                            &ocnCompNDoFs, &tagIndexIn2 );
00771             CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
00772                              "a2oTbot_proj, a2oUbot_proj, a2oVbot_proj on ocn pes" );
00773         }
00774         std::cout << "defined tag agian on ocn\n";
00775         // send the tag to ocean pes, from ocean mesh on coupler pes
00776         //   from couComm, using common joint comm ocn_coupler
00777         // as always, use nonblocking sends
00778         // original graph (context is -1_
00779         if( couComm != MPI_COMM_NULL )
00780         {
00781             // need to use ocean comp id for context
00782             context_id = cmpocn;  // id for ocean on comp
00783             ierr       = iMOAB_SendElementTag( cplOcnPID, bottomProjectedFields3, &ocnCouComm, &context_id );
00784             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00785         }
00786         std::cout << "sent ocn data from coupler to component\n";
00787 
00788         // receive on component 2, ocean
00789         if( ocnComm != MPI_COMM_NULL )
00790         {
00791             context_id = cplocn;  // id for ocean on coupler
00792             ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, bottomProjectedFields3, &ocnCouComm,
00793                                                   &context_id );
00794             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00795         }
00796         std::cout << "received ocn data from coupler to component\n";
00797 
00798         MPI_Barrier( MPI_COMM_WORLD );
00799 
00800         if( couComm != MPI_COMM_NULL )
00801         {
00802             context_id = cmpocn;
00803             ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00804             CHECKIERR( ierr, "cannot free send/receive buffers for OCN context" )
00805         }
00806         std::cout << "freed send/recv ocn data from coupler to component\n";
00807         if( ocnComm != MPI_COMM_NULL && 1 == n )  // write only for n==1 case
00808         {
00809             char outputFileOcn[] = "Ocn2WithProj.h5m";
00810             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00811             CHECKIERR( ierr, "could not write Ocn2WithProj.h5m to disk" )
00812             // test results only for n == 1, for bottomTempProjectedField
00813             if( !no_regression_test )
00814             {
00815                 // the same as remap test
00816                 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
00817                 // first get GlobalIds from ocn, and fields:
00818                 int nverts[3], nelem[3];
00819                 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
00820                 CHECKIERR( ierr, "failed to get ocn mesh info" );
00821                 std::vector< int > gidElems;
00822                 gidElems.resize( nelem[2] );
00823                 std::vector< double > tempElems;
00824                 tempElems.resize( nelem[2] );
00825                 // get global id storage
00826                 const std::string GidStr = "GLOBAL_ID";  // hard coded too
00827                 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
00828                 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
00829                 CHECKIERR( ierr, "failed to define global id tag" );
00830 
00831                 int ent_type = 1;
00832                 ierr         = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
00833                 CHECKIERR( ierr, "failed to get global ids" );
00834                 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
00835                 CHECKIERR( ierr, "failed to get temperature field" );
00836                 int err_code = 1;
00837                 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
00838                 if( 0 == err_code )
00839                     std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
00840             }
00841 
00842             std::cout << "wrote ocn data on component to disk\n";
00843         }
00844 #endif  // ENABLE_ATMCPLOCN_COUPLING
00845 
00846     }  // end loop iterations n
00847 #ifdef ENABLE_ATMCPLOCN_COUPLING
00848     if( couComm != MPI_COMM_NULL )
00849     {
00850         ierr = iMOAB_DeregisterApplication( cplAtm2OcnPID );
00851         CHECKIERR( ierr, "cannot deregister app intx AO" )
00852     }
00853 #endif  // ENABLE_ATMCPLOCN_COUPLING
00854 
00855 #ifdef ENABLE_ATMOCN_COUPLING
00856     if( couComm != MPI_COMM_NULL )
00857     {
00858         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00859         CHECKIERR( ierr, "cannot deregister app intx AO" )
00860     }
00861     if( ocnComm != MPI_COMM_NULL )
00862     {
00863         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00864         CHECKIERR( ierr, "cannot deregister app OCN1" )
00865     }
00866 #endif  // ENABLE_ATMOCN_COUPLING
00867 
00868     if( atmComm != MPI_COMM_NULL )
00869     {
00870         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00871         CHECKIERR( ierr, "cannot deregister app ATM1" )
00872     }
00873 
00874 #ifdef ENABLE_ATMOCN_COUPLING
00875     if( couComm != MPI_COMM_NULL )
00876     {
00877         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00878         CHECKIERR( ierr, "cannot deregister app OCNX" )
00879     }
00880 #endif  // ENABLE_ATMOCN_COUPLING
00881 
00882     if( couComm != MPI_COMM_NULL )
00883     {
00884         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00885         CHECKIERR( ierr, "cannot deregister app ATMX" )
00886     }
00887 
00888     //#endif
00889     ierr = iMOAB_Finalize();
00890     CHECKIERR( ierr, "did not finalize iMOAB" )
00891 
00892     // free atm coupler group and comm
00893     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00894     MPI_Group_free( &joinAtmCouGroup );
00895     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00896 
00897 #ifdef ENABLE_ATMOCN_COUPLING
00898     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00899     // free ocn - coupler group and comm
00900     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00901     MPI_Group_free( &joinOcnCouGroup );
00902 #endif
00903 
00904     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00905 
00906     MPI_Group_free( &atmPEGroup );
00907 #ifdef ENABLE_ATMOCN_COUPLING
00908     MPI_Group_free( &ocnPEGroup );
00909 #endif
00910     MPI_Group_free( &couPEGroup );
00911     MPI_Group_free( &jgroup );
00912 
00913     MPI_Finalize();
00914 
00915     return 0;
00916 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines