MOAB: Mesh Oriented datABase  (version 5.4.1)
imoab_read_map.cpp
Go to the documentation of this file.
00001 /*
00002  * This imoab_read_map test will simulate coupling between 2 components
00003  * 2 meshes will be loaded from 2 files (src, tgt), and one map file
00004  * after the map is read, in parallel, on coupler pes, with distributed rows, the
00005  * coupler meshes for source and target will be generated, in a migration step,
00006  * in which we will migrate from target pes according to row ids, to coupler target mesh,
00007  *  and from source to coverage mesh mesh on coupler. During this migration, par comm graphs
00008  *  will be established between source and coupler and target and coupler, which will assist
00009  *  in field transfer from source to target, through coupler
00010  *
00011  */
00012 
00013 #include "moab/Core.hpp"
00014 #ifndef MOAB_HAVE_MPI
00015 #error mbtempest tool requires MPI configuration
00016 #endif
00017 
00018 // MPI includes
00019 #include "moab_mpi.h"
00020 #include "moab/ParallelComm.hpp"
00021 #include "MBParallelConventions.h"
00022 
00023 #include "moab/iMOAB.h"
00024 #include "TestUtil.hpp"
00025 #include "moab/CpuTimer.hpp"
00026 #include "moab/ProgOptions.hpp"
00027 #include <iostream>
00028 #include <sstream>
00029 
00030 #include "imoab_coupler_utils.hpp"
00031 
00032 #ifndef MOAB_HAVE_TEMPESTREMAP
00033 #error The climate coupler test example requires MOAB configuration with TempestRemap
00034 #endif
00035 
00036 int main( int argc, char* argv[] )
00037 {
00038     int ierr;
00039     int rankInGlobalComm, numProcesses;
00040     MPI_Group jgroup;
00041     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00042 
00043     // Timer data
00044     moab::CpuTimer timer;
00045     double timer_ops;
00046     std::string opName;
00047 
00048     MPI_Init( &argc, &argv );
00049     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00050     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00051 
00052     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00053 
00054     std::string atmFilename = TestDir + "unittest/wholeATM_T.h5m";
00055     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00056     // intx atm/ocn is not in e3sm yet, give a number
00057     //   6 * 100+ 18 = 618 : atmocnid
00058     // 9 LND, 10 CPLLND
00059     //   6 * 100 + 10 = 610  atmlndid:
00060     // cmpatm is for atm on atm pes
00061     // cmpocn is for ocean, on ocean pe
00062     // cplatm is for atm on coupler pes
00063     // cplocn is for ocean on coupelr pes
00064     // atmocnid is for intx atm / ocn on coupler pes
00065     //
00066     int rankInAtmComm = -1;
00067     int cmpatm        = 5,
00068         cplatm        = 6;  // component ids are unique over all pes, and established in advance;
00069 
00070     std::string ocnFilename = TestDir + "unittest/recMeshOcn.h5m";
00071     std::string mapFilename = TestDir + "unittest/atm_ocn_map.nc";  // this is a netcdf file!
00072 
00073     std::string baseline = TestDir + "unittest/baseline1.txt";
00074     int rankInOcnComm    = -1;
00075     int cmpocn = 17, cplocn = 18,
00076         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00077 
00078     int rankInCouComm = -1;
00079 
00080     int nghlay = 0;  // number of ghost layers for loading the file
00081     std::vector< int > groupTasks;
00082     int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
00083 
00084     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00085     int context_id = -1;                   // used now for freeing buffers
00086 
00087     // default: load atm on 2 proc, ocean on 2,
00088     // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
00089     // probably all source cells will be involved in coverage mesh on both tasks
00090 
00091     ProgOptions opts;
00092     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00093     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00094     opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
00095 
00096     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00097     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00098 
00099     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00100     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00101 
00102     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00103     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00104 
00105     int n = 1;  // number of send/receive / project / send back cycles
00106     opts.addOpt< int >( "iterations,n", "number of iterations for coupler", &n );
00107 
00108     bool no_regression_test = false;
00109     opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
00110     opts.parseCommandLine( argc, argv );
00111 
00112     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00113 
00114     if( !rankInGlobalComm )
00115     {
00116         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1
00117                   << "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2
00118                   << "\n map file:" << mapFilename << "\n     on tasks : " << startG4 << ":" << endG4 << "\n";
00119         if( !no_regression_test )
00120         {
00121             std::cout << " check projection against baseline: " << baseline << "\n";
00122         }
00123     }
00124 
00125     // load files on 3 different communicators, groups
00126     // first groups has task 0, second group tasks 0 and 1
00127     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00128     // first groups has task 0, second group tasks 0 and 1
00129     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00130     MPI_Group atmPEGroup;
00131     MPI_Comm atmComm;
00132     ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
00133     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00134 
00135     MPI_Group ocnPEGroup;
00136     MPI_Comm ocnComm;
00137     ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
00138     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00139 
00140     // we will always have a coupler
00141     MPI_Group couPEGroup;
00142     MPI_Comm couComm;
00143     ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
00144     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00145 
00146     // atm_coupler
00147     MPI_Group joinAtmCouGroup;
00148     MPI_Comm atmCouComm;
00149     ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
00150     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00151 
00152     // ocn_coupler
00153     MPI_Group joinOcnCouGroup;
00154     MPI_Comm ocnCouComm;
00155     ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
00156     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00157 
00158     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00159     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00160 
00161     int cmpAtmAppID       = -1;
00162     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00163     int cplAtmAppID       = -1;            // -1 means it is not initialized
00164     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00165 
00166     int cmpOcnAppID       = -1;
00167     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00168     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00169     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00170     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00171 
00172     if( couComm != MPI_COMM_NULL )
00173     {
00174         MPI_Comm_rank( couComm, &rankInCouComm );
00175         // Register all the applications on the coupler PEs
00176         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00177                                           cplAtmPID );  // atm on coupler pes
00178         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00179 
00180         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00181                                           cplOcnPID );  // ocn on coupler pes
00182         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00183     }
00184 
00185     if( atmComm != MPI_COMM_NULL )
00186     {
00187         MPI_Comm_rank( atmComm, &rankInAtmComm );
00188         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00189         CHECKIERR( ierr, "Cannot register ATM App" )
00190         ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
00191         CHECKIERR( ierr, "Cannot load atm mesh" )
00192     }
00193 
00194     MPI_Barrier( MPI_COMM_WORLD );
00195     if( ocnComm != MPI_COMM_NULL )
00196     {
00197         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00198         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00199         CHECKIERR( ierr, "Cannot register OCN App" )
00200         ierr = iMOAB_LoadMesh( cmpOcnPID, ocnFilename.c_str(), readopts.c_str(), &nghlay );
00201         CHECKIERR( ierr, "Cannot load ocn mesh" )
00202     }
00203 
00204     MPI_Barrier( MPI_COMM_WORLD );
00205 
00206     if( couComm != MPI_COMM_NULL )
00207     {
00208         // now load map between OCNx and ATMx on coupler PEs
00209         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00210         CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
00211     }
00212 
00213     int disc_orders[3] = { 4, 1, 1 };
00214 
00215     const std::string intx_from_file_identifier = "map-from-file";
00216 
00217     if( couComm != MPI_COMM_NULL )
00218     {
00219         int dummyCpl     = -1;
00220         int dummy_rowcol = -1;
00221         int dummyType    = 0;
00222         ierr             = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
00223                                                              intx_from_file_identifier.c_str(), mapFilename.c_str() );
00224         CHECKIERR( ierr, "failed to load map file from disk" );
00225     }
00226 
00227     if( atmCouComm != MPI_COMM_NULL )
00228     {
00229         int type      = 1;  // quads in source set
00230         int direction = 1;  // from source to coupler; will create a mesh on cplAtmPID
00231         // because it is like "coverage", context will be cplocn
00232         ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
00233                                      &cmpatm, &cplocn, &direction );
00234         CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
00235 #ifdef VERBOSE
00236         if( *cplAtmPID >= 0 )
00237         {
00238             char prefix[] = "atmcov";
00239             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00240             CHECKIERR( ierr, "failed to write local mesh" );
00241         }
00242 #endif
00243     }
00244     MPI_Barrier( MPI_COMM_WORLD );
00245 
00246     if( ocnCouComm != MPI_COMM_NULL )
00247     {
00248         int type      = 3;  // cells with GLOBAL_ID in ocean / target set
00249         int direction = 2;  // from coupler to target; will create a mesh on cplOcnPID
00250         // it will be like initial migrate cmpocn <-> cplocn
00251         ierr = iMOAB_MigrateMapMesh( cmpOcnPID, cplAtmOcnPID, cplOcnPID, &ocnCouComm, &ocnPEGroup, &couPEGroup, &type,
00252                                      &cmpocn, &cplocn, &direction );
00253         CHECKIERR( ierr, "failed to migrate mesh for ocn on coupler" );
00254 #ifdef VERBOSE
00255         if( *cplOcnPID >= 0 )
00256         {
00257             char prefix[] = "ocntgt";
00258             ierr          = iMOAB_WriteLocalMesh( cplOcnPID, prefix );
00259             CHECKIERR( ierr, "failed to write local ocean mesh" );
00260             char outputFileRec[] = "CoupOcn.h5m";
00261             ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileRec, fileWriteOptions );
00262             CHECKIERR( ierr, "failed to write ocean global mesh file" );
00263         }
00264 #endif
00265     }
00266     MPI_Barrier( MPI_COMM_WORLD );
00267 
00268     int tagIndex[2];
00269     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00270     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = 1 /*FV*/;
00271 
00272     const char* bottomFields          = "a2oTbot:a2oUbot:a2oVbot";
00273     const char* bottomProjectedFields = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
00274 
00275     if( couComm != MPI_COMM_NULL )
00276     {
00277         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomFields, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00278         CHECKIERR( ierr, "failed to define the field tags a2oTbot:a2oUbot:a2oVbot" );
00279 
00280         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00281         CHECKIERR( ierr, "failed to define the field tags a2oTbot_proj:a2oUbot_proj:a2oVbot_proj " );
00282     }
00283 
00284     // need to make sure that the coverage mesh (created during intx method) received the tag that
00285     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00286     // need to change the migrate method to accommodate any GLL tag
00287     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00288     // (cplAtmPID), using the new coverage graph communicator
00289 
00290     // make the tag 0, to check we are actually sending needed data
00291     {
00292         if( cplAtmAppID >= 0 )
00293         {
00294             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00295             /*
00296              * Each process in the communicator will have access to a local mesh instance, which
00297              * will contain the original cells in the local partition and ghost entities. Number of
00298              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00299              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00300              * numbers.
00301              */
00302             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00303             CHECKIERR( ierr, "failed to get num primary elems" );
00304             int numAllElem = nelem[2];
00305             std::vector< double > vals;
00306             int storLeng = atmCompNDoFs * numAllElem * 3;  // 3 tags
00307             int eetype   = 1;
00308 
00309             vals.resize( storLeng );
00310             for( int k = 0; k < storLeng; k++ )
00311                 vals[k] = 0.;
00312 
00313             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomFields, &storLeng, &eetype, &vals[0] );
00314             CHECKIERR( ierr, "cannot make tag nul" )
00315         }
00316     }
00317 
00318     const char* concat_fieldname  = "a2oTbot:a2oUbot:a2oVbot";
00319     const char* concat_fieldnameT = "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj";
00320 
00321     // start a virtual loop for number of iterations
00322     for( int iters = 0; iters < n; iters++ )
00323     {
00324 
00325         PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00326         if( atmComm != MPI_COMM_NULL )
00327         {
00328             // as always, use nonblocking sends
00329             // this is for projection to ocean:
00330             ierr = iMOAB_SendElementTag( cmpAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cplocn );
00331             CHECKIERR( ierr, "cannot send tag values" )
00332 #ifdef GRAPH_INFO
00333             int is_sender = 1;
00334             int context   = cplocn;
00335             iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnS" );
00336 #endif
00337         }
00338         if( couComm != MPI_COMM_NULL )
00339         {
00340             // receive on atm on coupler pes, that was redistributed according to coverage
00341             ierr = iMOAB_ReceiveElementTag( cplAtmPID, "a2oTbot:a2oUbot:a2oVbot", &atmCouComm, &cmpatm );
00342             CHECKIERR( ierr, "cannot receive tag values" )
00343 #ifdef GRAPH_INFO
00344             int is_sender = 0;
00345             int context   = atmocnid;  // the same context
00346             iMOAB_DumpCommGraph( cmpAtmPID, &context, &is_sender, "AtmCovOcnR" );
00347 #endif
00348         }
00349 
00350         // we can now free the sender buffers
00351         if( atmComm != MPI_COMM_NULL )
00352         {
00353             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );  // context is for ocean
00354             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00355         }
00356         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00357 #ifdef VERBOSE
00358         if( *cplAtmPID >= 0 && n == 1 )
00359         {
00360             char prefix[] = "atmcov_withdata";
00361             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00362             CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
00363         }
00364 
00365         if( couComm != MPI_COMM_NULL && 1 == n )
00366         {
00367             // write only for n==1 case
00368             char outputFileRecvd[] = "recvAtmCoupOcn.h5m";
00369             ierr                   = iMOAB_WriteMesh( cplAtmPID, outputFileRecvd, fileWriteOptions );
00370             CHECKIERR( ierr, "could not write recvAtmCoupOcn.h5m to disk" )
00371         }
00372 #endif
00373 
00374         if( couComm != MPI_COMM_NULL )
00375         {
00376             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00377                on the source mesh and get the projection on the target mesh */
00378             PUSH_TIMER( "Apply Scalar projection weights" )
00379             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
00380                                                        concat_fieldname, concat_fieldnameT );
00381             CHECKIERR( ierr, "failed to compute projection weight application" );
00382             POP_TIMER( couComm, rankInCouComm )
00383             if( 1 == n )  // write only for n==1 case
00384             {
00385                 char outputFileTgt[] = "fOcnOnCpl8.h5m";
00386                 ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00387                 CHECKIERR( ierr, "could not write fOcnOnCpl8.h5m to disk" )
00388             }
00389         }
00390 
00391         // send the projected tag back to ocean pes, with send/receive tag
00392         if( ocnComm != MPI_COMM_NULL )
00393         {
00394             int tagIndexIn2;
00395             ierr =
00396                 iMOAB_DefineTagStorage( cmpOcnPID, bottomProjectedFields, &tagTypes[1], &ocnCompNDoFs, &tagIndexIn2 );
00397             CHECKIERR( ierr, "failed to define the field tag for receiving back the tags "
00398                              "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj on ocn pes" );
00399         }
00400         // send the tag to ocean pes, from ocean mesh on coupler pes
00401         //   from couComm, using common joint comm ocn_coupler
00402         // as always, use nonblocking sends
00403         // original graph (context is -1_
00404         if( couComm != MPI_COMM_NULL )
00405         {
00406             // need to use ocean comp id for context
00407             context_id = cmpocn;  // id for ocean on comp
00408             ierr =
00409                 iMOAB_SendElementTag( cplOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm, &context_id );
00410             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00411         }
00412 
00413         // receive on component 2, ocean
00414         if( ocnComm != MPI_COMM_NULL )
00415         {
00416             context_id = cplocn;  // id for ocean on coupler
00417             ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, "a2oTbot_proj:a2oUbot_proj:a2oVbot_proj", &ocnCouComm,
00418                                                   &context_id );
00419             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00420         }
00421 
00422         if( couComm != MPI_COMM_NULL )
00423         {
00424             context_id = cmpocn;
00425             ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00426         }
00427         MPI_Barrier( MPI_COMM_WORLD );
00428 
00429         if( ocnComm != MPI_COMM_NULL && 1 == n )  // write only for n==1 case
00430         {
00431 #ifdef VERBOSE
00432             char outputFileOcn[] = "OcnWithProj.h5m";
00433             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00434             CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
00435 #endif
00436             // test results only for n == 1, for bottomTempProjectedField = "a2oTbot_proj"
00437             if( !no_regression_test )
00438             {
00439                 // the same as remap test
00440                 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
00441                 // first get GlobalIds from ocn, and fields:
00442                 int nverts[3], nelem[3];
00443                 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
00444                 CHECKIERR( ierr, "failed to get ocn mesh info" );
00445                 std::vector< int > gidElems;
00446                 gidElems.resize( nelem[2] );
00447                 std::vector< double > tempElems;
00448                 tempElems.resize( nelem[2] );
00449                 // get global id storage
00450                 const std::string GidStr = "GLOBAL_ID";  // hard coded too
00451                 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
00452                 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
00453                 CHECKIERR( ierr, "failed to define global id tag" );
00454 
00455                 int ent_type = 1;
00456                 ierr         = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
00457                 CHECKIERR( ierr, "failed to get global ids" );
00458                 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, "a2oTbot_proj", &nelem[2], &ent_type, &tempElems[0] );
00459                 CHECKIERR( ierr, "failed to get temperature field" );
00460                 int err_code = 1;
00461                 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
00462                 if( 0 == err_code )
00463                     std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
00464             }
00465         }
00466 
00467     }  // end loop iterations n
00468 
00469     if( couComm != MPI_COMM_NULL )
00470     {
00471         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00472         CHECKIERR( ierr, "cannot deregister app intx AO" )
00473     }
00474     if( ocnComm != MPI_COMM_NULL )
00475     {
00476         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00477         CHECKIERR( ierr, "cannot deregister app OCN1" )
00478     }
00479 
00480     if( atmComm != MPI_COMM_NULL )
00481     {
00482         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00483         CHECKIERR( ierr, "cannot deregister app ATM1" )
00484     }
00485 
00486     if( couComm != MPI_COMM_NULL )
00487     {
00488         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00489         CHECKIERR( ierr, "cannot deregister app OCNX" )
00490     }
00491 
00492     if( couComm != MPI_COMM_NULL )
00493     {
00494         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00495         CHECKIERR( ierr, "cannot deregister app ATMX" )
00496     }
00497 
00498     //#endif
00499     ierr = iMOAB_Finalize();
00500     CHECKIERR( ierr, "did not finalize iMOAB" )
00501 
00502     // free atm coupler group and comm
00503     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00504     MPI_Group_free( &joinAtmCouGroup );
00505     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00506 
00507     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00508     // free ocn - coupler group and comm
00509     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00510     MPI_Group_free( &joinOcnCouGroup );
00511 
00512     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00513 
00514     MPI_Group_free( &atmPEGroup );
00515 
00516     MPI_Group_free( &ocnPEGroup );
00517 
00518     MPI_Group_free( &couPEGroup );
00519     MPI_Group_free( &jgroup );
00520 
00521     MPI_Finalize();
00522 
00523     return 0;
00524 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines