MOAB: Mesh Oriented datABase  (version 5.4.1)
imoab_map_target.cpp
Go to the documentation of this file.
00001 /*
00002  * This imoab_map_target test will simulate coupling between 2 components
00003  * 2 meshes will be loaded from 2 files (src, tgt), and one map file
00004  * the target mesh is migrated to coupler with a partitioning method
00005  * after the map is read, in parallel, on coupler pes, with row ownership from
00006  * target mesh, the
00007  * coupler meshes for source will be generated, in a migration step,
00008  * from source to coverage mesh mesh on coupler. During this migration, par comm graph
00009  *  will be established between source and coupler, which will assist
00010  *  in field transfer from source to coupler; the original migrate
00011  *  will be used for target mesh from coupler to target component
00012  *
00013  */
00014 
00015 #include "moab/Core.hpp"
00016 #ifndef MOAB_HAVE_MPI
00017 #error mbtempest tool requires MPI configuration
00018 #endif
00019 
00020 // MPI includes
00021 #include "moab_mpi.h"
00022 #include "moab/ParallelComm.hpp"
00023 #include "MBParallelConventions.h"
00024 
00025 #include "moab/iMOAB.h"
00026 #include "TestUtil.hpp"
00027 #include "moab/CpuTimer.hpp"
00028 #include "moab/ProgOptions.hpp"
00029 #include <iostream>
00030 #include <sstream>
00031 
00032 #include "imoab_coupler_utils.hpp"
00033 
00034 #ifndef MOAB_HAVE_TEMPESTREMAP
00035 #error The climate coupler test example requires MOAB configuration with TempestRemap
00036 #endif
00037 
00038 int main( int argc, char* argv[] )
00039 {
00040     int ierr;
00041     int rankInGlobalComm, numProcesses;
00042     MPI_Group jgroup;
00043     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00044 
00045     // Timer data
00046     moab::CpuTimer timer;
00047     double timer_ops;
00048     std::string opName;
00049 
00050     MPI_Init( &argc, &argv );
00051     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00052     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00053 
00054     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00055 
00056     std::string atmFilename = TestDir + "unittest/srcWithSolnTag.h5m";
00057     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00058     // intx atm/ocn is not in e3sm yet, give a number
00059     //   6 * 100+ 18 = 618 : atmocnid
00060     // 9 LND, 10 CPLLND
00061     //   6 * 100 + 10 = 610  atmlndid:
00062     // cmpatm is for atm on atm pes
00063     // cmpocn is for ocean, on ocean pe
00064     // cplatm is for atm on coupler pes
00065     // cplocn is for ocean on coupelr pes
00066     // atmocnid is for intx atm / ocn on coupler pes
00067     //
00068     int rankInAtmComm = -1;
00069     int cmpatm        = 5,
00070         cplatm        = 6;  // component ids are unique over all pes, and established in advance;
00071 
00072     std::string ocnFilename = TestDir + "unittest/outTri15_8.h5m";
00073     std::string mapFilename = TestDir + "unittest/mapNE20_FV15.nc";  // this is a netcdf file!
00074 
00075     std::string baseline = TestDir + "unittest/baseline2.txt";
00076     int rankInOcnComm    = -1;
00077     int cmpocn = 17, cplocn = 18,
00078         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00079 
00080     int rankInCouComm = -1;
00081 
00082     int nghlay = 0;  // number of ghost layers for loading the file
00083     std::vector< int > groupTasks;
00084     int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
00085 
00086     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00087     int context_id;                        // used now for freeing buffers
00088 
00089     int repartitioner_scheme = 0;
00090 #ifdef MOAB_HAVE_ZOLTAN
00091     repartitioner_scheme = 2;  // use the graph partitioner in that caseS
00092 #endif
00093 
00094     // default: load atm / source on 2 proc, ocean / target on 2,
00095     // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
00096     // probably all source cells will be involved in coverage mesh on both tasks
00097 
00098     ProgOptions opts;
00099     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00100     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00101     opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
00102 
00103     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00104     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00105 
00106     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00107     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00108 
00109     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00110     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00111 
00112     int types[2]       = { 3, 3 };  // type of source and target;  1 = SE, 2,= PC, 3 = FV
00113     int disc_orders[2] = { 1, 1 };  // 1 is for FV and PC; 4 could be for SE
00114     opts.addOpt< int >( "typeSource,x", "source type", &types[0] );
00115     opts.addOpt< int >( "typeTarget,y", "target type", &types[1] );
00116     opts.addOpt< int >( "orderSource,u", "source order", &disc_orders[0] );
00117     opts.addOpt< int >( "orderTarget,v", "target oorder", &disc_orders[1] );
00118     bool analytic_field = false;
00119     opts.addOpt< void >( "analytic,q", "analytic field", &analytic_field );
00120 
00121     bool no_regression_test = false;
00122     opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
00123     opts.parseCommandLine( argc, argv );
00124 
00125     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00126 
00127     if( !rankInGlobalComm )
00128     {
00129         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1
00130                   << "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2
00131                   << "\n map file:" << mapFilename << "\n     on tasks : " << startG4 << ":" << endG4 << "\n";
00132         if( !no_regression_test )
00133         {
00134             std::cout << " check projection against baseline: " << baseline << "\n";
00135         }
00136     }
00137 
00138     // load files on 3 different communicators, groups
00139     // first groups has task 0, second group tasks 0 and 1
00140     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00141     // first groups has task 0, second group tasks 0 and 1
00142     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00143     MPI_Group atmPEGroup;
00144     MPI_Comm atmComm;
00145     ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
00146     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00147 
00148     MPI_Group ocnPEGroup;
00149     MPI_Comm ocnComm;
00150     ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
00151     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00152 
00153     // we will always have a coupler
00154     MPI_Group couPEGroup;
00155     MPI_Comm couComm;
00156     ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
00157     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00158 
00159     // atm_coupler
00160     MPI_Group joinAtmCouGroup;
00161     MPI_Comm atmCouComm;
00162     ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
00163     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00164 
00165     // ocn_coupler
00166     MPI_Group joinOcnCouGroup;
00167     MPI_Comm ocnCouComm;
00168     ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
00169     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00170 
00171     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00172     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00173 
00174     int cmpAtmAppID       = -1;
00175     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00176     int cplAtmAppID       = -1;            // -1 means it is not initialized
00177     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00178 
00179     int cmpOcnAppID       = -1;
00180     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00181     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00182     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00183     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00184 
00185     if( couComm != MPI_COMM_NULL )
00186     {
00187         MPI_Comm_rank( couComm, &rankInCouComm );
00188         // Register all the applications on the coupler PEs
00189         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00190                                           cplAtmPID );  // atm on coupler pes
00191         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00192 
00193         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00194                                           cplOcnPID );  // ocn on coupler pes
00195         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00196     }
00197 
00198     if( atmComm != MPI_COMM_NULL )
00199     {
00200         MPI_Comm_rank( atmComm, &rankInAtmComm );
00201         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00202         CHECKIERR( ierr, "Cannot register ATM App" )
00203         ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
00204         CHECKIERR( ierr, "Cannot load atm mesh" )
00205     }
00206 
00207     if( ocnComm != MPI_COMM_NULL )
00208     {
00209         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00210         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00211         CHECKIERR( ierr, "Cannot register OCN App" )
00212     }
00213     MPI_Barrier( MPI_COMM_WORLD );
00214 
00215     ierr =
00216         setup_component_coupler_meshes( cmpOcnPID, cmpocn, cplOcnPID, cplocn, &ocnComm, &ocnPEGroup, &couComm,
00217                                         &couPEGroup, &ocnCouComm, ocnFilename, readopts, nghlay, repartitioner_scheme );
00218     CHECKIERR( ierr, "Cannot set-up target meshes" )
00219 #ifdef VERBOSE
00220     if( couComm != MPI_COMM_NULL )
00221     {
00222         char outputFileTgt3[] = "recvTgt.h5m";
00223         ierr                  = iMOAB_WriteMesh( cplOcnPID, outputFileTgt3, fileWriteOptions );
00224         CHECKIERR( ierr, "cannot write target mesh after receiving on coupler" )
00225     }
00226 #endif
00227     CHECKIERR( ierr, "Cannot load and distribute target mesh" )
00228     MPI_Barrier( MPI_COMM_WORLD );
00229 
00230     if( couComm != MPI_COMM_NULL )
00231     {
00232         // now load map between OCNx and ATMx on coupler PEs
00233         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00234         CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
00235     }
00236 
00237     const std::string intx_from_file_identifier = "map-from-file";
00238 
00239     if( couComm != MPI_COMM_NULL )
00240     {
00241         int col_or_row = 0;  // row based partition
00242         int type       = 3;  // target is FV cell with global ID as DOFs
00243         ierr           = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, cplOcnPID, &col_or_row, &type,
00244                                                            intx_from_file_identifier.c_str(), mapFilename.c_str() );
00245         CHECKIERR( ierr, "failed to load map file from disk" );
00246     }
00247 
00248     if( atmCouComm != MPI_COMM_NULL )
00249     {
00250         int type      = types[0];  // FV
00251         int direction = 1;         // from source to coupler; will create a mesh on cplAtmPID
00252         // because it is like "coverage", context will be cplocn
00253         ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
00254                                      &cmpatm, &cplocn, &direction );
00255         CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
00256 #ifdef VERBOSE
00257         if( *cplAtmPID >= 0 )
00258         {
00259             char prefix[] = "atmcov";
00260             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00261             CHECKIERR( ierr, "failed to write local mesh" );
00262         }
00263 #endif
00264     }
00265     MPI_Barrier( MPI_COMM_WORLD );
00266 
00267     int tagIndex[2];
00268     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00269     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] /*FV*/;
00270 
00271     const char* bottomTempField          = "AnalyticalSolnSrcExact";
00272     const char* bottomTempProjectedField = "Target_proj";
00273 
00274     if( couComm != MPI_COMM_NULL )
00275     {
00276         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00277         CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
00278 
00279         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00280         CHECKIERR( ierr, "failed to define the field tag Target_proj" );
00281     }
00282 
00283     if( analytic_field && ( atmComm != MPI_COMM_NULL ) )  // we are on source /atm  pes
00284     {
00285         // cmpOcnPID, "T_proj;u_proj;v_proj;"
00286         ierr = iMOAB_DefineTagStorage( cmpAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00287         CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
00288 
00289         int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00290         /*
00291          * Each process in the communicator will have access to a local mesh instance, which will contain the
00292          * original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
00293          * blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
00294          * for local, ghost and total numbers.
00295          */
00296         ierr = iMOAB_GetMeshInfo( cmpAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00297         CHECKIERR( ierr, "failed to get num primary elems" );
00298         int numAllElem = nelem[2];
00299         int eetype     = 1;
00300 
00301         if( types[0] == 2 )  // point cloud
00302         {
00303             numAllElem = nverts[2];
00304             eetype     = 0;
00305         }
00306         std::vector< double > vals;
00307         int storLeng = atmCompNDoFs * numAllElem;
00308         vals.resize( storLeng );
00309         for( int k = 0; k < storLeng; k++ )
00310             vals[k] = k;
00311 
00312         ierr = iMOAB_SetDoubleTagStorage( cmpAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
00313         CHECKIERR( ierr, "cannot make analytical tag" )
00314     }
00315 
00316     // need to make sure that the coverage mesh (created during intx method) received the tag that
00317     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00318     // need to change the migrate method to accommodate any GLL tag
00319     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00320     // (cplAtmPID), using the new coverage graph communicator
00321 
00322     // make the tag 0, to check we are actually sending needed data
00323     {
00324         if( cplAtmAppID >= 0 )
00325         {
00326             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00327             /*
00328              * Each process in the communicator will have access to a local mesh instance, which
00329              * will contain the original cells in the local partition and ghost entities. Number of
00330              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00331              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00332              * numbers.
00333              */
00334             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00335             CHECKIERR( ierr, "failed to get num primary elems" );
00336             int numAllElem = nelem[2];
00337             int eetype     = 1;
00338             if( types[0] == 2 )  // Point cloud
00339             {
00340                 eetype     = 0;  // vertices
00341                 numAllElem = nverts[2];
00342             }
00343             std::vector< double > vals;
00344             int storLeng = atmCompNDoFs * numAllElem;
00345 
00346             vals.resize( storLeng );
00347             for( int k = 0; k < storLeng; k++ )
00348                 vals[k] = 0.;
00349 
00350             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
00351             CHECKIERR( ierr, "cannot make tag nul" )
00352 
00353             // set the tag to 0
00354         }
00355     }
00356 
00357     const char* concat_fieldname  = "AnalyticalSolnSrcExact";
00358     const char* concat_fieldnameT = "Target_proj";
00359 
00360     {
00361 
00362         PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00363         if( atmComm != MPI_COMM_NULL )
00364         {
00365             // as always, use nonblocking sends
00366             // this is for projection to ocean:
00367             ierr = iMOAB_SendElementTag( cmpAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
00368             CHECKIERR( ierr, "cannot send tag values" )
00369         }
00370         if( couComm != MPI_COMM_NULL )
00371         {
00372             // receive on atm on coupler pes, that was redistributed according to coverage
00373             ierr = iMOAB_ReceiveElementTag( cplAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cmpatm );
00374             CHECKIERR( ierr, "cannot receive tag values" )
00375         }
00376 
00377         // we can now free the sender buffers
00378         if( atmComm != MPI_COMM_NULL )
00379         {
00380             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );  // context is for ocean
00381             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00382         }
00383         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00384 #ifdef VERBOSE
00385         if( *cplAtmPID >= 0 )
00386         {
00387             char prefix[] = "atmcov_withdata";
00388             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00389             CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
00390         }
00391 #endif
00392 
00393         if( couComm != MPI_COMM_NULL )
00394         {
00395             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00396                on the source mesh and get the projection on the target mesh */
00397             PUSH_TIMER( "Apply Scalar projection weights" )
00398             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
00399                                                        concat_fieldname, concat_fieldnameT );
00400             CHECKIERR( ierr, "failed to compute projection weight application" );
00401             POP_TIMER( couComm, rankInCouComm )
00402 
00403             {
00404                 char outputFileTgt[] = "fOcnOnCpl5.h5m";
00405                 ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00406                 CHECKIERR( ierr, "could not write fOcnOnCpl.h5m to disk" )
00407             }
00408         }
00409 
00410         // send the projected tag back to ocean pes, with send/receive tag
00411         if( ocnComm != MPI_COMM_NULL )
00412         {
00413             int tagIndexIn2;
00414             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
00415                                            &tagIndexIn2 );
00416             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00417                              "Target_proj on ocn pes" );
00418         }
00419         // send the tag to ocean pes, from ocean mesh on coupler pes
00420         //   from couComm, using common joint comm ocn_coupler
00421         // as always, use nonblocking sends
00422         // original graph (context is -1_
00423         if( couComm != MPI_COMM_NULL )
00424         {
00425             // need to use ocean comp id for context
00426             context_id = cmpocn;  // id for ocean on comp
00427             ierr       = iMOAB_SendElementTag( cplOcnPID, "Target_proj", &ocnCouComm, &context_id );
00428             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00429         }
00430 
00431         // receive on component 2, ocean
00432         if( ocnComm != MPI_COMM_NULL )
00433         {
00434             context_id = cplocn;  // id for ocean on coupler
00435             ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, "Target_proj", &ocnCouComm, &context_id );
00436             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00437         }
00438 
00439         if( couComm != MPI_COMM_NULL )
00440         {
00441             context_id = cmpocn;
00442             ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00443             CHECKIERR( ierr, "cannot free buffers for Target_proj tag migration " )
00444         }
00445         MPI_Barrier( MPI_COMM_WORLD );
00446 
00447         if( ocnComm != MPI_COMM_NULL )
00448         {
00449 #ifdef VERBOSE
00450             char outputFileOcn[] = "OcnWithProj.h5m";
00451             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00452             CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
00453 #endif
00454             // test results only for n == 1, for bottomTempProjectedField
00455             if( !no_regression_test )
00456             {
00457                 // the same as remap test
00458                 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
00459                 // first get GlobalIds from ocn, and fields:
00460                 int nverts[3], nelem[3];
00461                 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
00462                 CHECKIERR( ierr, "failed to get ocn mesh info" );
00463                 std::vector< int > gidElems;
00464                 gidElems.resize( nelem[2] );
00465                 std::vector< double > tempElems;
00466                 tempElems.resize( nelem[2] );
00467                 // get global id storage
00468                 const std::string GidStr = "GLOBAL_ID";  // hard coded too
00469                 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
00470                 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
00471                 CHECKIERR( ierr, "failed to define global id tag" );
00472 
00473                 int ent_type = 1;
00474                 ierr         = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
00475                 CHECKIERR( ierr, "failed to get global ids" );
00476                 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, bottomTempProjectedField, &nelem[2], &ent_type,
00477                                                   &tempElems[0] );
00478                 CHECKIERR( ierr, "failed to get temperature field" );
00479                 int err_code = 1;
00480                 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
00481                 if( 0 == err_code )
00482                     std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
00483             }
00484         }
00485 
00486     }  // end loop iterations n
00487 
00488     if( couComm != MPI_COMM_NULL )
00489     {
00490         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00491         CHECKIERR( ierr, "cannot deregister app intx AO" )
00492     }
00493     if( ocnComm != MPI_COMM_NULL )
00494     {
00495         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00496         CHECKIERR( ierr, "cannot deregister app OCN1" )
00497     }
00498 
00499     if( atmComm != MPI_COMM_NULL )
00500     {
00501         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00502         CHECKIERR( ierr, "cannot deregister app ATM1" )
00503     }
00504 
00505     if( couComm != MPI_COMM_NULL )
00506     {
00507         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00508         CHECKIERR( ierr, "cannot deregister app OCNX" )
00509     }
00510 
00511     if( couComm != MPI_COMM_NULL )
00512     {
00513         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00514         CHECKIERR( ierr, "cannot deregister app ATMX" )
00515     }
00516 
00517     //#endif
00518     ierr = iMOAB_Finalize();
00519     CHECKIERR( ierr, "did not finalize iMOAB" )
00520 
00521     // free atm coupler group and comm
00522     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00523     MPI_Group_free( &joinAtmCouGroup );
00524     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00525 
00526     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00527     // free ocn - coupler group and comm
00528     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00529     MPI_Group_free( &joinOcnCouGroup );
00530 
00531     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00532 
00533     MPI_Group_free( &atmPEGroup );
00534 
00535     MPI_Group_free( &ocnPEGroup );
00536 
00537     MPI_Group_free( &couPEGroup );
00538     MPI_Group_free( &jgroup );
00539 
00540     MPI_Finalize();
00541 
00542     return 0;
00543 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines