MOAB: Mesh Oriented datABase  (version 5.4.1)
imoab_map2.cpp
Go to the documentation of this file.
00001 /*
00002  * This imoab_map2 test will simulate coupling between 2 components
00003  * 2 meshes will be loaded from 2 files (src, tgt), and one map file
00004  * after the map is read, in parallel, on coupler pes, with distributed rows, the
00005  * coupler meshes for source and target will be generated, in a migration step,
00006  * in which we will migrate from target pes according to row ids, to coupler target mesh,
00007  *  and from source to coverage mesh mesh on coupler. During this migration, par comm graphs
00008  *  will be established between source and coupler and target and coupler, which will assist
00009  *  in field transfer from source to target, through coupler
00010  *
00011  */
00012 
00013 #include "moab/Core.hpp"
00014 #ifndef MOAB_HAVE_MPI
00015 #error mbtempest tool requires MPI configuration
00016 #endif
00017 
00018 // MPI includes
00019 #include "moab_mpi.h"
00020 #include "moab/ParallelComm.hpp"
00021 #include "MBParallelConventions.h"
00022 
00023 #include "moab/iMOAB.h"
00024 #include "TestUtil.hpp"
00025 #include "moab/CpuTimer.hpp"
00026 #include "moab/ProgOptions.hpp"
00027 #include <iostream>
00028 #include <sstream>
00029 
00030 #include "imoab_coupler_utils.hpp"
00031 
00032 #ifndef MOAB_HAVE_TEMPESTREMAP
00033 #error The climate coupler test example requires MOAB configuration with TempestRemap
00034 #endif
00035 
00036 int main( int argc, char* argv[] )
00037 {
00038     int ierr;
00039     int rankInGlobalComm, numProcesses;
00040     MPI_Group jgroup;
00041     std::string readopts( "PARALLEL=READ_PART;PARTITION=PARALLEL_PARTITION;PARALLEL_RESOLVE_SHARED_ENTS" );
00042 
00043     // Timer data
00044     moab::CpuTimer timer;
00045     double timer_ops;
00046     std::string opName;
00047 
00048     MPI_Init( &argc, &argv );
00049     MPI_Comm_rank( MPI_COMM_WORLD, &rankInGlobalComm );
00050     MPI_Comm_size( MPI_COMM_WORLD, &numProcesses );
00051 
00052     MPI_Comm_group( MPI_COMM_WORLD, &jgroup );  // all processes in jgroup
00053 
00054     std::string atmFilename = TestDir + "unittest/srcWithSolnTag.h5m";
00055     // on a regular case,  5 ATM, 6 CPLATM (ATMX), 17 OCN     , 18 CPLOCN (OCNX)  ;
00056     // intx atm/ocn is not in e3sm yet, give a number
00057     //   6 * 100+ 18 = 618 : atmocnid
00058     // 9 LND, 10 CPLLND
00059     //   6 * 100 + 10 = 610  atmlndid:
00060     // cmpatm is for atm on atm pes
00061     // cmpocn is for ocean, on ocean pe
00062     // cplatm is for atm on coupler pes
00063     // cplocn is for ocean on coupelr pes
00064     // atmocnid is for intx atm / ocn on coupler pes
00065     //
00066     int rankInAtmComm = -1;
00067     int cmpatm        = 5,
00068         cplatm        = 6;  // component ids are unique over all pes, and established in advance;
00069 
00070     std::string ocnFilename = TestDir + "unittest/outTri15_8.h5m";
00071     std::string mapFilename = TestDir + "unittest/mapNE20_FV15.nc";  // this is a netcdf file!
00072 
00073     std::string baseline = TestDir + "unittest/baseline2.txt";
00074     int rankInOcnComm    = -1;
00075     int cmpocn = 17, cplocn = 18,
00076         atmocnid = 618;  // component ids are unique over all pes, and established in advance;
00077 
00078     int rankInCouComm = -1;
00079 
00080     int nghlay = 0;  // number of ghost layers for loading the file
00081     std::vector< int > groupTasks;
00082     int startG1 = 0, startG2 = 0, endG1 = numProcesses - 1, endG2 = numProcesses - 1;
00083 
00084     int startG4 = startG1, endG4 = endG1;  // these are for coupler layout
00085     int context_id = -1;                   // used now for freeing buffers
00086 
00087     // default: load atm / source on 2 proc, ocean / target on 2,
00088     // load map on 2 also, in parallel, distributed by rows (which is very bad actually for ocean mesh, because
00089     // probably all source cells will be involved in coverage mesh on both tasks
00090 
00091     ProgOptions opts;
00092     opts.addOpt< std::string >( "atmosphere,t", "atm mesh filename (source)", &atmFilename );
00093     opts.addOpt< std::string >( "ocean,m", "ocean mesh filename (target)", &ocnFilename );
00094     opts.addOpt< std::string >( "map_file,w", "map file from source to target", &mapFilename );
00095 
00096     opts.addOpt< int >( "startAtm,a", "start task for atmosphere layout", &startG1 );
00097     opts.addOpt< int >( "endAtm,b", "end task for atmosphere layout", &endG1 );
00098 
00099     opts.addOpt< int >( "startOcn,c", "start task for ocean layout", &startG2 );
00100     opts.addOpt< int >( "endOcn,d", "end task for ocean layout", &endG2 );
00101 
00102     opts.addOpt< int >( "startCoupler,g", "start task for coupler layout", &startG4 );
00103     opts.addOpt< int >( "endCoupler,j", "end task for coupler layout", &endG4 );
00104 
00105     int types[2]       = { 3, 3 };  // type of source and target;  1 = SE, 2,= PC, 3 = FV
00106     int disc_orders[2] = { 1, 1 };  // 1 is for FV and PC; 4 could be for SE
00107     opts.addOpt< int >( "typeSource,x", "source type", &types[0] );
00108     opts.addOpt< int >( "typeTarget,y", "target type", &types[1] );
00109     opts.addOpt< int >( "orderSource,u", "source order", &disc_orders[0] );
00110     opts.addOpt< int >( "orderTarget,v", "target oorder", &disc_orders[1] );
00111     bool analytic_field = false;
00112     opts.addOpt< void >( "analytic,q", "analytic field", &analytic_field );
00113 
00114     bool no_regression_test = false;
00115     opts.addOpt< void >( "no_regression,r", "do not do regression test against baseline 1", &no_regression_test );
00116     opts.parseCommandLine( argc, argv );
00117 
00118     char fileWriteOptions[] = "PARALLEL=WRITE_PART";
00119 
00120     if( !rankInGlobalComm )
00121     {
00122         std::cout << " atm file: " << atmFilename << "\n   on tasks : " << startG1 << ":" << endG1
00123                   << "\n ocn file: " << ocnFilename << "\n     on tasks : " << startG2 << ":" << endG2
00124                   << "\n map file:" << mapFilename << "\n     on tasks : " << startG4 << ":" << endG4 << "\n";
00125         if( !no_regression_test )
00126         {
00127             std::cout << " check projection against baseline: " << baseline << "\n";
00128         }
00129     }
00130 
00131     // load files on 3 different communicators, groups
00132     // first groups has task 0, second group tasks 0 and 1
00133     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00134     // first groups has task 0, second group tasks 0 and 1
00135     // coupler will be on joint tasks, will be on a third group (0 and 1, again)
00136     MPI_Group atmPEGroup;
00137     MPI_Comm atmComm;
00138     ierr = create_group_and_comm( startG1, endG1, jgroup, &atmPEGroup, &atmComm );
00139     CHECKIERR( ierr, "Cannot create atm MPI group and communicator " )
00140 
00141     MPI_Group ocnPEGroup;
00142     MPI_Comm ocnComm;
00143     ierr = create_group_and_comm( startG2, endG2, jgroup, &ocnPEGroup, &ocnComm );
00144     CHECKIERR( ierr, "Cannot create ocn MPI group and communicator " )
00145 
00146     // we will always have a coupler
00147     MPI_Group couPEGroup;
00148     MPI_Comm couComm;
00149     ierr = create_group_and_comm( startG4, endG4, jgroup, &couPEGroup, &couComm );
00150     CHECKIERR( ierr, "Cannot create cpl MPI group and communicator " )
00151 
00152     // atm_coupler
00153     MPI_Group joinAtmCouGroup;
00154     MPI_Comm atmCouComm;
00155     ierr = create_joint_comm_group( atmPEGroup, couPEGroup, &joinAtmCouGroup, &atmCouComm );
00156     CHECKIERR( ierr, "Cannot create joint atm cou communicator" )
00157 
00158     // ocn_coupler
00159     MPI_Group joinOcnCouGroup;
00160     MPI_Comm ocnCouComm;
00161     ierr = create_joint_comm_group( ocnPEGroup, couPEGroup, &joinOcnCouGroup, &ocnCouComm );
00162     CHECKIERR( ierr, "Cannot create joint ocn cou communicator" )
00163 
00164     ierr = iMOAB_Initialize( argc, argv );  // not really needed anything from argc, argv, yet; maybe we should
00165     CHECKIERR( ierr, "Cannot initialize iMOAB" )
00166 
00167     int cmpAtmAppID       = -1;
00168     iMOAB_AppID cmpAtmPID = &cmpAtmAppID;  // atm
00169     int cplAtmAppID       = -1;            // -1 means it is not initialized
00170     iMOAB_AppID cplAtmPID = &cplAtmAppID;  // atm on coupler PEs
00171 
00172     int cmpOcnAppID       = -1;
00173     iMOAB_AppID cmpOcnPID = &cmpOcnAppID;        // ocn
00174     int cplOcnAppID = -1, cplAtmOcnAppID = -1;   // -1 means it is not initialized
00175     iMOAB_AppID cplOcnPID    = &cplOcnAppID;     // ocn on coupler PEs
00176     iMOAB_AppID cplAtmOcnPID = &cplAtmOcnAppID;  // intx atm -ocn on coupler PEs
00177 
00178     if( couComm != MPI_COMM_NULL )
00179     {
00180         MPI_Comm_rank( couComm, &rankInCouComm );
00181         // Register all the applications on the coupler PEs
00182         ierr = iMOAB_RegisterApplication( "ATMX", &couComm, &cplatm,
00183                                           cplAtmPID );  // atm on coupler pes
00184         CHECKIERR( ierr, "Cannot register ATM over coupler PEs" )
00185 
00186         ierr = iMOAB_RegisterApplication( "OCNX", &couComm, &cplocn,
00187                                           cplOcnPID );  // ocn on coupler pes
00188         CHECKIERR( ierr, "Cannot register OCN over coupler PEs" )
00189     }
00190 
00191     if( atmComm != MPI_COMM_NULL )
00192     {
00193         MPI_Comm_rank( atmComm, &rankInAtmComm );
00194         ierr = iMOAB_RegisterApplication( "ATM1", &atmComm, &cmpatm, cmpAtmPID );
00195         CHECKIERR( ierr, "Cannot register ATM App" )
00196         ierr = iMOAB_LoadMesh( cmpAtmPID, atmFilename.c_str(), readopts.c_str(), &nghlay );
00197         CHECKIERR( ierr, "Cannot load atm mesh" )
00198     }
00199 
00200     MPI_Barrier( MPI_COMM_WORLD );
00201     if( ocnComm != MPI_COMM_NULL )
00202     {
00203         MPI_Comm_rank( ocnComm, &rankInOcnComm );
00204         ierr = iMOAB_RegisterApplication( "OCN1", &ocnComm, &cmpocn, cmpOcnPID );
00205         CHECKIERR( ierr, "Cannot register OCN App" )
00206         ierr = iMOAB_LoadMesh( cmpOcnPID, ocnFilename.c_str(), readopts.c_str(), &nghlay );
00207         CHECKIERR( ierr, "Cannot load ocn mesh" )
00208     }
00209 
00210     MPI_Barrier( MPI_COMM_WORLD );
00211 
00212     if( couComm != MPI_COMM_NULL )
00213     {
00214         // now load map between OCNx and ATMx on coupler PEs
00215         ierr = iMOAB_RegisterApplication( "ATMOCN", &couComm, &atmocnid, cplAtmOcnPID );
00216         CHECKIERR( ierr, "Cannot register ocn_atm map instance over coupler pes " )
00217     }
00218 
00219     const std::string intx_from_file_identifier = "map-from-file";
00220 
00221     if( couComm != MPI_COMM_NULL )
00222     {
00223         int dummyCpl     = -1;
00224         int dummy_rowcol = -1;
00225         int dummyType    = 0;
00226         ierr             = iMOAB_LoadMappingWeightsFromFile( cplAtmOcnPID, &dummyCpl, &dummy_rowcol, &dummyType,
00227                                                              intx_from_file_identifier.c_str(), mapFilename.c_str() );
00228         CHECKIERR( ierr, "failed to load map file from disk" );
00229     }
00230 
00231     if( atmCouComm != MPI_COMM_NULL )
00232     {
00233         int type      = types[0];  // FV
00234         int direction = 1;         // from source to coupler; will create a mesh on cplAtmPID
00235         // because it is like "coverage", context will be cplocn
00236         ierr = iMOAB_MigrateMapMesh( cmpAtmPID, cplAtmOcnPID, cplAtmPID, &atmCouComm, &atmPEGroup, &couPEGroup, &type,
00237                                      &cmpatm, &cplocn, &direction );
00238         CHECKIERR( ierr, "failed to migrate mesh for atm on coupler" );
00239 #ifdef VERBOSE
00240         if( *cplAtmPID >= 0 )
00241         {
00242             char prefix[] = "atmcov";
00243             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00244             CHECKIERR( ierr, "failed to write local mesh" );
00245         }
00246 #endif
00247     }
00248     MPI_Barrier( MPI_COMM_WORLD );
00249 
00250     if( ocnCouComm != MPI_COMM_NULL )
00251     {
00252         int type      = types[1];  // cells with GLOBAL_ID in ocean / target set
00253         int direction = 2;         // from coupler to target; will create a mesh on cplOcnPID
00254         // it will be like initial migrate cmpocn <-> cplocn
00255         ierr = iMOAB_MigrateMapMesh( cmpOcnPID, cplAtmOcnPID, cplOcnPID, &ocnCouComm, &ocnPEGroup, &couPEGroup, &type,
00256                                      &cmpocn, &cplocn, &direction );
00257         CHECKIERR( ierr, "failed to migrate mesh for ocn on coupler" );
00258 
00259 #ifdef VERBOSE
00260         if( *cplOcnPID >= 0 )
00261         {
00262             char prefix[] = "ocntgt";
00263             ierr          = iMOAB_WriteLocalMesh( cplOcnPID, prefix );
00264             CHECKIERR( ierr, "failed to write local ocean mesh" );
00265             char outputFileRec[] = "CoupOcn.h5m";
00266             ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileRec, fileWriteOptions );
00267             CHECKIERR( ierr, "failed to write ocean global mesh file" );
00268         }
00269 #endif
00270     }
00271     MPI_Barrier( MPI_COMM_WORLD );
00272 
00273     int tagIndex[2];
00274     int tagTypes[2]  = { DENSE_DOUBLE, DENSE_DOUBLE };
00275     int atmCompNDoFs = disc_orders[0] * disc_orders[0], ocnCompNDoFs = disc_orders[1] * disc_orders[1] /*FV*/;
00276 
00277     const char* bottomTempField          = "AnalyticalSolnSrcExact";
00278     const char* bottomTempProjectedField = "Target_proj";
00279 
00280     if( couComm != MPI_COMM_NULL )
00281     {
00282         ierr = iMOAB_DefineTagStorage( cplAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00283         CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
00284 
00285         ierr = iMOAB_DefineTagStorage( cplOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs, &tagIndex[1] );
00286         CHECKIERR( ierr, "failed to define the field tag Target_proj" );
00287     }
00288 
00289     if( analytic_field && ( atmComm != MPI_COMM_NULL ) )  // we are on source /atm  pes
00290     {
00291         // cmpOcnPID, "T_proj;u_proj;v_proj;"
00292         ierr = iMOAB_DefineTagStorage( cmpAtmPID, bottomTempField, &tagTypes[0], &atmCompNDoFs, &tagIndex[0] );
00293         CHECKIERR( ierr, "failed to define the field tag AnalyticalSolnSrcExact" );
00294 
00295         int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00296         /*
00297          * Each process in the communicator will have access to a local mesh instance, which will contain the
00298          * original cells in the local partition and ghost entities. Number of vertices, primary cells, visible
00299          * blocks, number of sidesets and nodesets boundary conditions will be returned in numProcesses 3 arrays,
00300          * for local, ghost and total numbers.
00301          */
00302         ierr = iMOAB_GetMeshInfo( cmpAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00303         CHECKIERR( ierr, "failed to get num primary elems" );
00304         int numAllElem = nelem[2];
00305         int eetype     = 1;
00306 
00307         if( types[0] == 2 )  // point cloud
00308         {
00309             numAllElem = nverts[2];
00310             eetype     = 0;
00311         }
00312         std::vector< double > vals;
00313         int storLeng = atmCompNDoFs * numAllElem;
00314         vals.resize( storLeng );
00315         for( int k = 0; k < storLeng; k++ )
00316             vals[k] = k;
00317 
00318         ierr = iMOAB_SetDoubleTagStorage( cmpAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
00319         CHECKIERR( ierr, "cannot make analytical tag" )
00320     }
00321 
00322     // need to make sure that the coverage mesh (created during intx method) received the tag that
00323     // need to be projected to target so far, the coverage mesh has only the ids and global dofs;
00324     // need to change the migrate method to accommodate any GLL tag
00325     // now send a tag from original atmosphere (cmpAtmPID) towards migrated coverage mesh
00326     // (cplAtmPID), using the new coverage graph communicator
00327 
00328     // make the tag 0, to check we are actually sending needed data
00329     {
00330         if( cplAtmAppID >= 0 )
00331         {
00332             int nverts[3], nelem[3], nblocks[3], nsbc[3], ndbc[3];
00333             /*
00334              * Each process in the communicator will have access to a local mesh instance, which
00335              * will contain the original cells in the local partition and ghost entities. Number of
00336              * vertices, primary cells, visible blocks, number of sidesets and nodesets boundary
00337              * conditions will be returned in numProcesses 3 arrays, for local, ghost and total
00338              * numbers.
00339              */
00340             ierr = iMOAB_GetMeshInfo( cplAtmPID, nverts, nelem, nblocks, nsbc, ndbc );
00341             CHECKIERR( ierr, "failed to get num primary elems" );
00342             int numAllElem = nelem[2];
00343             int eetype     = 1;
00344             if( types[0] == 2 )  // Point cloud
00345             {
00346                 eetype     = 0;  // vertices
00347                 numAllElem = nverts[2];
00348             }
00349             std::vector< double > vals;
00350             int storLeng = atmCompNDoFs * numAllElem;
00351 
00352             vals.resize( storLeng );
00353             for( int k = 0; k < storLeng; k++ )
00354                 vals[k] = 0.;
00355 
00356             ierr = iMOAB_SetDoubleTagStorage( cplAtmPID, bottomTempField, &storLeng, &eetype, &vals[0] );
00357             CHECKIERR( ierr, "cannot make tag nul" )
00358 
00359             // set the tag to 0
00360         }
00361     }
00362 
00363     const char* concat_fieldname  = "AnalyticalSolnSrcExact";
00364     const char* concat_fieldnameT = "Target_proj";
00365 
00366     {
00367 
00368         PUSH_TIMER( "Send/receive data from atm component to coupler in ocn context" )
00369         if( atmComm != MPI_COMM_NULL )
00370         {
00371             // as always, use nonblocking sends
00372             // this is for projection to ocean:
00373             ierr = iMOAB_SendElementTag( cmpAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cplocn );
00374             CHECKIERR( ierr, "cannot send tag values" )
00375         }
00376         if( couComm != MPI_COMM_NULL )
00377         {
00378             // receive on atm on coupler pes, that was redistributed according to coverage
00379             ierr = iMOAB_ReceiveElementTag( cplAtmPID, "AnalyticalSolnSrcExact", &atmCouComm, &cmpatm );
00380             CHECKIERR( ierr, "cannot receive tag values" )
00381         }
00382 
00383         // we can now free the sender buffers
00384         if( atmComm != MPI_COMM_NULL )
00385         {
00386             ierr = iMOAB_FreeSenderBuffers( cmpAtmPID, &cplocn );  // context is for ocean
00387             CHECKIERR( ierr, "cannot free buffers used to resend atm tag towards the coverage mesh" )
00388         }
00389         POP_TIMER( MPI_COMM_WORLD, rankInGlobalComm )
00390 #ifdef VERBOSE
00391         if( *cplAtmPID >= 0 )
00392         {
00393             char prefix[] = "atmcov_withdata";
00394             ierr          = iMOAB_WriteLocalMesh( cplAtmPID, prefix );
00395             CHECKIERR( ierr, "failed to write local atm cov mesh with data" );
00396         }
00397 #endif
00398 
00399         if( couComm != MPI_COMM_NULL )
00400         {
00401             /* We have the remapping weights now. Let us apply the weights onto the tag we defined
00402                on the source mesh and get the projection on the target mesh */
00403             PUSH_TIMER( "Apply Scalar projection weights" )
00404             ierr = iMOAB_ApplyScalarProjectionWeights( cplAtmOcnPID, intx_from_file_identifier.c_str(),
00405                                                        concat_fieldname, concat_fieldnameT );
00406             CHECKIERR( ierr, "failed to compute projection weight application" );
00407             POP_TIMER( couComm, rankInCouComm )
00408 
00409             {
00410                 char outputFileTgt[] = "fOcnOnCpl6.h5m";
00411                 ierr                 = iMOAB_WriteMesh( cplOcnPID, outputFileTgt, fileWriteOptions );
00412                 CHECKIERR( ierr, "could not write fOcnOnCpl6.h5m to disk" )
00413             }
00414         }
00415 
00416         // send the projected tag back to ocean pes, with send/receive tag
00417         if( ocnComm != MPI_COMM_NULL )
00418         {
00419             int tagIndexIn2;
00420             ierr = iMOAB_DefineTagStorage( cmpOcnPID, bottomTempProjectedField, &tagTypes[1], &ocnCompNDoFs,
00421                                            &tagIndexIn2 );
00422             CHECKIERR( ierr, "failed to define the field tag for receiving back the tag "
00423                              "Target_proj on ocn pes" );
00424         }
00425         // send the tag to ocean pes, from ocean mesh on coupler pes
00426         //   from couComm, using common joint comm ocn_coupler
00427         // as always, use nonblocking sends
00428         // original graph (context is -1_
00429         if( couComm != MPI_COMM_NULL )
00430         {
00431             // need to use ocean comp id for context
00432             context_id = cmpocn;  // id for ocean on comp
00433             ierr       = iMOAB_SendElementTag( cplOcnPID, "Target_proj", &ocnCouComm, &context_id );
00434             CHECKIERR( ierr, "cannot send tag values back to ocean pes" )
00435         }
00436 
00437         // receive on component 2, ocean
00438         if( ocnComm != MPI_COMM_NULL )
00439         {
00440             context_id = cplocn;  // id for ocean on coupler
00441             ierr       = iMOAB_ReceiveElementTag( cmpOcnPID, "Target_proj", &ocnCouComm, &context_id );
00442             CHECKIERR( ierr, "cannot receive tag values from ocean mesh on coupler pes" )
00443         }
00444 
00445         if( couComm != MPI_COMM_NULL )
00446         {
00447             context_id = cmpocn;
00448             ierr       = iMOAB_FreeSenderBuffers( cplOcnPID, &context_id );
00449         }
00450         MPI_Barrier( MPI_COMM_WORLD );
00451 
00452         if( ocnComm != MPI_COMM_NULL )
00453         {
00454 #ifdef VERBOSE
00455             char outputFileOcn[] = "OcnWithProj.h5m";
00456             ierr                 = iMOAB_WriteMesh( cmpOcnPID, outputFileOcn, fileWriteOptions );
00457             CHECKIERR( ierr, "could not write OcnWithProj.h5m to disk" )
00458 #endif
00459             // test results only for n == 1, for bottomTempProjectedField
00460             if( !no_regression_test )
00461             {
00462                 // the same as remap test
00463                 // get temp field on ocean, from conservative, the global ids, and dump to the baseline file
00464                 // first get GlobalIds from ocn, and fields:
00465                 int nverts[3], nelem[3];
00466                 ierr = iMOAB_GetMeshInfo( cmpOcnPID, nverts, nelem, 0, 0, 0 );
00467                 CHECKIERR( ierr, "failed to get ocn mesh info" );
00468                 std::vector< int > gidElems;
00469                 gidElems.resize( nelem[2] );
00470                 std::vector< double > tempElems;
00471                 tempElems.resize( nelem[2] );
00472                 // get global id storage
00473                 const std::string GidStr = "GLOBAL_ID";  // hard coded too
00474                 int tag_type = DENSE_INTEGER, ncomp = 1, tagInd = 0;
00475                 ierr = iMOAB_DefineTagStorage( cmpOcnPID, GidStr.c_str(), &tag_type, &ncomp, &tagInd );
00476                 CHECKIERR( ierr, "failed to define global id tag" );
00477 
00478                 int ent_type = 1;
00479                 ierr         = iMOAB_GetIntTagStorage( cmpOcnPID, GidStr.c_str(), &nelem[2], &ent_type, &gidElems[0] );
00480                 CHECKIERR( ierr, "failed to get global ids" );
00481                 ierr = iMOAB_GetDoubleTagStorage( cmpOcnPID, bottomTempProjectedField, &nelem[2], &ent_type,
00482                                                   &tempElems[0] );
00483                 CHECKIERR( ierr, "failed to get temperature field" );
00484                 int err_code = 1;
00485                 check_baseline_file( baseline, gidElems, tempElems, 1.e-9, err_code );
00486                 if( 0 == err_code )
00487                     std::cout << " passed baseline test atm2ocn on ocean task " << rankInOcnComm << "\n";
00488             }
00489         }
00490 
00491     }  // end loop iterations n
00492 
00493     if( couComm != MPI_COMM_NULL )
00494     {
00495         ierr = iMOAB_DeregisterApplication( cplAtmOcnPID );
00496         CHECKIERR( ierr, "cannot deregister app intx AO" )
00497     }
00498     if( ocnComm != MPI_COMM_NULL )
00499     {
00500         ierr = iMOAB_DeregisterApplication( cmpOcnPID );
00501         CHECKIERR( ierr, "cannot deregister app OCN1" )
00502     }
00503 
00504     if( atmComm != MPI_COMM_NULL )
00505     {
00506         ierr = iMOAB_DeregisterApplication( cmpAtmPID );
00507         CHECKIERR( ierr, "cannot deregister app ATM1" )
00508     }
00509 
00510     if( couComm != MPI_COMM_NULL )
00511     {
00512         ierr = iMOAB_DeregisterApplication( cplOcnPID );
00513         CHECKIERR( ierr, "cannot deregister app OCNX" )
00514     }
00515 
00516     if( couComm != MPI_COMM_NULL )
00517     {
00518         ierr = iMOAB_DeregisterApplication( cplAtmPID );
00519         CHECKIERR( ierr, "cannot deregister app ATMX" )
00520     }
00521 
00522     //#endif
00523     ierr = iMOAB_Finalize();
00524     CHECKIERR( ierr, "did not finalize iMOAB" )
00525 
00526     // free atm coupler group and comm
00527     if( MPI_COMM_NULL != atmCouComm ) MPI_Comm_free( &atmCouComm );
00528     MPI_Group_free( &joinAtmCouGroup );
00529     if( MPI_COMM_NULL != atmComm ) MPI_Comm_free( &atmComm );
00530 
00531     if( MPI_COMM_NULL != ocnComm ) MPI_Comm_free( &ocnComm );
00532     // free ocn - coupler group and comm
00533     if( MPI_COMM_NULL != ocnCouComm ) MPI_Comm_free( &ocnCouComm );
00534     MPI_Group_free( &joinOcnCouGroup );
00535 
00536     if( MPI_COMM_NULL != couComm ) MPI_Comm_free( &couComm );
00537 
00538     MPI_Group_free( &atmPEGroup );
00539 
00540     MPI_Group_free( &ocnPEGroup );
00541 
00542     MPI_Group_free( &couPEGroup );
00543     MPI_Group_free( &jgroup );
00544 
00545     MPI_Finalize();
00546 
00547     return 0;
00548 }
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines