MOAB: Mesh Oriented datABase  (version 5.2.1)
iMOAB.cpp
Go to the documentation of this file.
00001 /** \file iMOAB.cpp
00002  */
00003 
00004 #include "moab/MOABConfig.h"
00005 #include "moab/Core.hpp"
00006 
00007 #ifdef MOAB_HAVE_MPI
00008 #include "moab_mpi.h"
00009 #include "moab/ParallelComm.hpp"
00010 #include "moab/ParCommGraph.hpp"
00011 #include "moab/ParallelMergeMesh.hpp"
00012 #include "moab/IntxMesh/IntxUtils.hpp"
00013 #endif
00014 #include "DebugOutput.hpp"
00015 #include "moab/iMOAB.h"
00016 
00017 /* this is needed because of direct access to hdf5/mhdf */
00018 #ifdef MOAB_HAVE_HDF5
00019 #include "mhdf.h"
00020 #include <H5Tpublic.h>
00021 #endif
00022 
00023 #include "moab/CartVect.hpp"
00024 #include "MBTagConventions.hpp"
00025 #include "moab/MeshTopoUtil.hpp"
00026 #include "moab/ReadUtilIface.hpp"
00027 #include "moab/MergeMesh.hpp"
00028 
00029 #ifdef MOAB_HAVE_TEMPESTREMAP
00030 #include "STLStringHelper.h"
00031 #include "moab/IntxMesh/IntxUtils.hpp"
00032 
00033 #include "moab/Remapping/TempestRemapper.hpp"
00034 #include "moab/Remapping/TempestOnlineMap.hpp"
00035 #endif
00036 
00037 // C++ includes
00038 #include <cassert>
00039 #include <sstream>
00040 #include <iostream>
00041 
00042 using namespace moab;
00043 
00044 // #define VERBOSE
00045 
00046 // global variables ; should they be organized in a structure, for easier references?
00047 // or how do we keep them global?
00048 
00049 #ifdef __cplusplus
00050 extern "C" {
00051 #endif
00052 
00053 #define CHKERRVAL( ierr )                        \
00054     {                                            \
00055         if( moab::MB_SUCCESS != ierr ) return 1; \
00056     }
00057 #define CHKIERRVAL( ierr )        \
00058     {                             \
00059         if( 0 != ierr ) return 1; \
00060     }
00061 
00062 #ifdef MOAB_HAVE_TEMPESTREMAP
00063 struct TempestMapAppData
00064 {
00065     moab::TempestRemapper* remapper;
00066     std::map< std::string, moab::TempestOnlineMap* > weightMaps;
00067     iMOAB_AppID pid_src;
00068     iMOAB_AppID pid_dest;
00069 };
00070 #endif
00071 
00072 struct appData
00073 {
00074     EntityHandle file_set;
00075     int global_id;  // external component id, unique for application
00076     std::string name;
00077     Range all_verts;
00078     Range local_verts;  // it could include shared, but not owned at the interface
00079     // these vertices would be all_verts if no ghosting was required
00080     Range ghost_vertices;  // locally ghosted from other processors
00081     Range primary_elems;
00082     Range owned_elems;
00083     Range ghost_elems;
00084     int dimension;             // 2 or 3, dimension of primary elements (redundant?)
00085     long num_global_elements;  // reunion of all elements in primary_elements; either from hdf5
00086                                // reading or from reduce
00087     long num_global_vertices;  // reunion of all nodes, after sharing is resolved; it could be
00088                                // determined from hdf5 reading
00089     Range mat_sets;
00090     std::map< int, int > matIndex;  // map from global block id to index in mat_sets
00091     Range neu_sets;
00092     Range diri_sets;
00093     std::map< std::string, Tag > tagMap;
00094     std::vector< Tag > tagList;
00095     bool point_cloud;
00096 
00097 #ifdef MOAB_HAVE_MPI
00098     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
00099     // application
00100     std::map< int, ParCommGraph* > pgraph;  // map from context () to the parcommgraph*
00101 #endif
00102 
00103 #ifdef MOAB_HAVE_TEMPESTREMAP
00104     TempestMapAppData tempestData;
00105 #endif
00106 };
00107 
00108 struct GlobalContext
00109 {
00110     // are there reasons to have multiple moab inits? Is ref count needed?
00111     Interface* MBI;
00112     // we should also have the default tags stored, initialized
00113     Tag material_tag, neumann_tag, dirichlet_tag,
00114         globalID_tag;  // material, neumann, dirichlet,  globalID
00115     int refCountMB;
00116     int iArgc;
00117     iMOAB_String* iArgv;
00118     int unused_pid;
00119 
00120     std::map< std::string, int > appIdMap;  // from app string (uppercase) to app id
00121     std::map< int, int > appIdCompMap;      // from component id to app id
00122 
00123 #ifdef MOAB_HAVE_MPI
00124     std::vector< ParallelComm* > pcomms;  // created in order of applications, one moab::ParallelComm for each
00125 #endif
00126 
00127     std::vector< appData > appDatas;  // the same order as pcomms
00128     int globalrank, worldprocs;
00129     bool MPI_initialized;
00130 
00131     GlobalContext()
00132     {
00133         MBI        = 0;
00134         refCountMB = 0;
00135         unused_pid = 0;
00136     }
00137 };
00138 
00139 static struct GlobalContext context;
00140 
00141 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
00142 {
00143     context.iArgc = argc;
00144     context.iArgv = argv;  // shallow copy
00145 
00146     if( 0 == context.refCountMB )
00147     {
00148         context.MBI = new( std::nothrow ) moab::Core;
00149         // retrieve the default tags
00150         const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
00151                                                      DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
00152         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
00153         Tag gtags[4];
00154 
00155         for( int i = 0; i < 4; i++ )
00156         {
00157 
00158             ErrorCode rval =
00159                 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );CHKERRVAL( rval );
00160         }
00161 
00162         context.material_tag  = gtags[0];
00163         context.neumann_tag   = gtags[1];
00164         context.dirichlet_tag = gtags[2];
00165         context.globalID_tag  = gtags[3];
00166     }
00167 
00168     context.MPI_initialized = false;
00169 #ifdef MOAB_HAVE_MPI
00170     int flagInit;
00171     MPI_Initialized( &flagInit );
00172 
00173     if( flagInit && !context.MPI_initialized )
00174     {
00175         MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
00176         MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
00177         context.MPI_initialized = true;
00178     }
00179 #endif
00180 
00181     context.refCountMB++;
00182     return 0;
00183 }
00184 
00185 ErrCode iMOAB_InitializeFortran()
00186 {
00187     return iMOAB_Initialize( 0, 0 );
00188 }
00189 
00190 ErrCode iMOAB_Finalize()
00191 {
00192     context.refCountMB--;
00193 
00194     if( 0 == context.refCountMB ) { delete context.MBI; }
00195 
00196     return MB_SUCCESS;
00197 }
00198 
00199 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
00200 #ifdef MOAB_HAVE_MPI
00201                                    MPI_Comm* comm,
00202 #endif
00203                                    int* compid, iMOAB_AppID pid )
00204 {
00205     // will create a parallel comm for this application too, so there will be a
00206     // mapping from *pid to file set and to parallel comm instances
00207     std::string name( app_name );
00208 
00209     if( context.appIdMap.find( name ) != context.appIdMap.end() )
00210     {
00211         std::cout << " application " << name << " already registered \n";
00212         return 1;
00213     }
00214 
00215     *pid                   = context.unused_pid++;
00216     context.appIdMap[name] = *pid;
00217     int rankHere           = 0;
00218 #ifdef MOAB_HAVE_MPI
00219     MPI_Comm_rank( *comm, &rankHere );
00220 #endif
00221     if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
00222     if( *compid <= 0 )
00223     {
00224         std::cout << " convention for external application is to have its id positive \n";
00225         return 1;
00226     }
00227 
00228     if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
00229     {
00230         std::cout << " external application with comp id " << *compid << " is already registered\n";
00231         return 1;
00232     }
00233 
00234     context.appIdCompMap[*compid] = *pid;
00235 
00236     // now create ParallelComm and a file set for this application
00237 #ifdef MOAB_HAVE_MPI
00238     if( comm )
00239     {
00240         ParallelComm* pco = new ParallelComm( context.MBI, *comm );
00241 
00242 #ifndef NDEBUG
00243         int index = pco->get_id();  // it could be useful to get app id from pcomm instance ...
00244         assert( index == *pid );
00245         // here, we assert the the pid is the same as the id of the ParallelComm instance
00246         // useful for writing in parallel
00247 #endif
00248         context.pcomms.push_back( pco );
00249     }
00250     else
00251     {
00252         context.pcomms.push_back( 0 );
00253     }
00254 #endif
00255 
00256     // create now the file set that will be used for loading the model in
00257     EntityHandle file_set;
00258     ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );CHKERRVAL( rval );
00259 
00260     appData app_data;
00261     app_data.file_set  = file_set;
00262     app_data.global_id = *compid;  // will be used mostly for par comm graph
00263     app_data.name      = name;     // save the name of application
00264 
00265 #ifdef MOAB_HAVE_TEMPESTREMAP
00266     app_data.tempestData.remapper = NULL;  // Only allocate as needed
00267 #endif
00268 
00269     app_data.point_cloud = false;
00270 
00271     context.appDatas.push_back(
00272         app_data );  // it will correspond to app_FileSets[*pid] will be the file set of interest
00273     return 0;
00274 }
00275 
00276 ErrCode iMOAB_RegisterFortranApplication( const iMOAB_String app_name,
00277 #ifdef MOAB_HAVE_MPI
00278                                           int* comm,
00279 #endif
00280                                           int* compid, iMOAB_AppID pid, int app_name_length )
00281 {
00282     std::string name( app_name );
00283 
00284     if( (int)strlen( app_name ) > app_name_length )
00285     {
00286         std::cout << " length of string issue \n";
00287         return 1;
00288     }
00289 
00290 #ifdef MOAB_HAVE_MPI
00291     MPI_Comm ccomm;
00292     if( comm )
00293     {
00294         // convert from Fortran communicator to a C communicator
00295         // see transfer of handles
00296         // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
00297         ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
00298     }
00299 #endif
00300 
00301     // now call C style registration function:
00302     return iMOAB_RegisterApplication( app_name,
00303 #ifdef MOAB_HAVE_MPI
00304                                       &ccomm,
00305 #endif
00306                                       compid, pid );
00307 }
00308 
00309 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
00310 {
00311     // the file set , parallel comm are all in vectors indexed by *pid
00312     // assume we did not delete anything yet
00313     // *pid will not be reused if we register another application
00314     appData& data = context.appDatas[*pid];
00315     int rankHere  = 0;
00316 #ifdef MOAB_HAVE_MPI
00317     ParallelComm* pco = context.pcomms[*pid];
00318     rankHere          = pco->rank();
00319 #endif
00320     if( !rankHere )
00321         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
00322                   << " is de-registered now \n";
00323 
00324     EntityHandle fileSet = data.file_set;
00325     // get all entities part of the file set
00326     Range fileents;
00327     ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );CHKERRVAL( rval );
00328 
00329     fileents.insert( fileSet );
00330 
00331     rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );CHKERRVAL( rval );  // append all mesh sets
00332 
00333 #ifdef MOAB_HAVE_TEMPESTREMAP
00334     if( data.tempestData.remapper ) delete data.tempestData.remapper;
00335     if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
00336 #endif
00337 
00338 #ifdef MOAB_HAVE_MPI
00339 
00340     // we could get the pco also with
00341     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
00342 
00343     std::map< int, ParCommGraph* >& pargs = data.pgraph;
00344 
00345     // free the parallel comm graphs associated with this app
00346     for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
00347     {
00348         ParCommGraph* pgr = mt->second;
00349         delete pgr;
00350         pgr = NULL;
00351     }
00352     if( pco ) delete pco;
00353 #endif
00354 
00355     // delete first all except vertices
00356     Range vertices = fileents.subset_by_type( MBVERTEX );
00357     Range noverts  = subtract( fileents, vertices );
00358 
00359     rval = context.MBI->delete_entities( noverts );CHKERRVAL( rval );
00360     // now retrieve connected elements that still exist (maybe in other sets, pids?)
00361     Range adj_ents_left;
00362     rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
00363     rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
00364     rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );CHKERRVAL( rval );
00365 
00366     if( !adj_ents_left.empty() )
00367     {
00368         Range conn_verts;
00369         rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );CHKERRVAL( rval );
00370         vertices = subtract( vertices, conn_verts );
00371     }
00372 
00373     rval = context.MBI->delete_entities( vertices );CHKERRVAL( rval );
00374 
00375     std::map< std::string, int >::iterator mit;
00376 
00377     for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
00378     {
00379         int pidx = mit->second;
00380 
00381         if( *pid == pidx ) { break; }
00382     }
00383 
00384     context.appIdMap.erase( mit );
00385     std::map< int, int >::iterator mit1;
00386 
00387     for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
00388     {
00389         int pidx = mit1->second;
00390 
00391         if( *pid == pidx ) { break; }
00392     }
00393 
00394     context.appIdCompMap.erase( mit1 );
00395 
00396     context.unused_pid--;  // we have to go backwards always TODO
00397     context.appDatas.pop_back();
00398 #ifdef MOAB_HAVE_MPI
00399     context.pcomms.pop_back();
00400 #endif
00401     return 0;
00402 }
00403 
00404 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename, int* num_global_vertices, int* num_global_elements,
00405                               int* num_dimension, int* num_parts, int filename_length )
00406 {
00407     assert( filename_length != 0 );
00408 #ifdef MOAB_HAVE_HDF5
00409     std::string filen( filename );
00410 
00411     if( filename_length < (int)strlen( filename ) ) { filen = filen.substr( 0, filename_length ); }
00412 
00413     *num_global_vertices = 0;
00414     int edges            = 0;
00415     int faces            = 0;
00416     int regions          = 0;
00417     *num_global_elements = 0;
00418     *num_dimension       = 0;
00419     *num_parts           = 0;
00420 
00421     mhdf_FileHandle file;
00422     mhdf_Status status;
00423     unsigned long max_id;
00424     struct mhdf_FileDesc* data;
00425 
00426     file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
00427 
00428     if( mhdf_isError( &status ) )
00429     {
00430         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00431         return 1;
00432     }
00433 
00434     data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
00435                                 1 );  // will use extra set info; will get parallel partition tag info too!
00436 
00437     if( mhdf_isError( &status ) )
00438     {
00439         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00440         return 1;
00441     }
00442 
00443     *num_dimension       = data->nodes.vals_per_ent;
00444     *num_global_vertices = (int)data->nodes.count;
00445 
00446     for( int i = 0; i < data->num_elem_desc; i++ )
00447     {
00448         struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
00449         struct mhdf_EntDesc* ent_d    = &( el_desc->desc );
00450 
00451         if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) ) { edges += ent_d->count; }
00452 
00453         if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) ) { faces += ent_d->count; }
00454 
00455         if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) ) { faces += ent_d->count; }
00456 
00457         if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) ) { faces += ent_d->count; }
00458 
00459         if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) ) { regions += ent_d->count; }
00460 
00461         if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) ) { regions += ent_d->count; }
00462 
00463         if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) ) { regions += ent_d->count; }
00464 
00465         if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) ) { regions += ent_d->count; }
00466 
00467         if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) ) { regions += ent_d->count; }
00468 
00469         if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
00470 
00471         if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) ) { regions += ent_d->count; }
00472     }
00473 
00474     *num_parts = data->numEntSets[0];
00475 
00476     // is this required?
00477     if( edges > 0 )
00478     {
00479         *num_dimension       = 1;  // I don't think it will ever return 1
00480         *num_global_elements = edges;
00481     }
00482 
00483     if( faces > 0 )
00484     {
00485         *num_dimension       = 2;
00486         *num_global_elements = faces;
00487     }
00488 
00489     if( regions > 0 )
00490     {
00491         *num_dimension       = 3;
00492         *num_global_elements = regions;
00493     }
00494 
00495     mhdf_closeFile( file, &status );
00496 
00497     free( data );
00498 
00499 #else
00500     std::cout << filename
00501               << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
00502                  "formats other than a h5m file.\n";
00503     *num_global_vertices = *num_global_elements = *num_dimension = *num_parts = 0;
00504 #endif
00505 
00506     return 0;
00507 }
00508 
00509 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String read_options,
00510                         int* num_ghost_layers, int filename_length, int read_options_length )
00511 {
00512     assert( filename_length != 0 );
00513 
00514     if( (int)strlen( filename ) > filename_length )
00515     {
00516         std::cout << " filename length issue\n";
00517         return 1;
00518     }
00519 
00520     if( (int)strlen( read_options ) > read_options_length )
00521     {
00522         std::cout << " read options length issue\n";
00523         return 1;
00524     }
00525 
00526     // make sure we use the file set and pcomm associated with the *pid
00527     std::ostringstream newopts;
00528     newopts << read_options;
00529 
00530 #ifdef MOAB_HAVE_MPI
00531 
00532     if( context.MPI_initialized )
00533     {
00534         if( context.worldprocs > 1 )
00535         {
00536             std::string opts( read_options );
00537             std::string pcid( "PARALLEL_COMM=" );
00538             std::size_t found = opts.find( pcid );
00539 
00540             if( found != std::string::npos )
00541             {
00542                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00543                 return 1;
00544             }
00545 
00546             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
00547             // files (used in test_remapping)
00548             std::string filen( filename );
00549             std::string::size_type idx = filen.rfind( '.' );
00550 
00551             if( idx != std::string::npos )
00552             {
00553                 std::string extension = filen.substr( idx + 1 );
00554                 if( extension == std::string( "h5m" ) ) newopts << ";;PARALLEL_COMM=" << *pid;
00555             }
00556 
00557             if( *num_ghost_layers >= 1 )
00558             {
00559                 // if we want ghosts, we will want additional entities, the last .1
00560                 // because the addl ents can be edges, faces that are part of the neumann sets
00561                 std::string pcid2( "PARALLEL_GHOSTS=" );
00562                 std::size_t found2 = opts.find( pcid2 );
00563 
00564                 if( found2 != std::string::npos )
00565                 {
00566                     std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
00567                                  "number of layers \n";
00568                 }
00569                 else
00570                 {
00571                     // dimension of primary entities is 3 here, but it could be 2 for climate
00572                     // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
00573                     // example:  ";PARALLEL_GHOSTS=2.0.1"
00574                     newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
00575                 }
00576             }
00577         }
00578     }
00579 
00580 #endif
00581 
00582     // Now let us actually load the MOAB file with the appropriate read options
00583     ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );CHKERRVAL( rval );
00584 
00585 #ifdef VERBOSE
00586 
00587     // some debugging stuff
00588     std::ostringstream outfile;
00589 #ifdef MOAB_HAVE_MPI
00590     int rank   = context.pcomms[*pid]->rank();
00591     int nprocs = context.pcomms[*pid]->size();
00592     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
00593 #else
00594     outfile << "TaskMesh_n1.0.h5m";
00595 #endif
00596     // the mesh contains ghosts too, but they are not part of mat/neumann set
00597     // write in serial the file, to see what tags are missing
00598     rval = context.MBI->write_file( outfile.str().c_str() );CHKERRVAL( rval );  // everything on current task, written in serial
00599 
00600 #endif
00601     int rc = iMOAB_UpdateMeshInfo( pid );
00602     return rc;
00603 }
00604 
00605 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, iMOAB_String filename, iMOAB_String write_options, int filename_length,
00606                          int write_options_length )
00607 {
00608     // maybe do some processing of strings and lengths
00609     if( (int)strlen( filename ) > filename_length )
00610     {
00611         std::cout << " file name length issue\n";
00612         return 1;
00613     }
00614 
00615     if( (int)strlen( write_options ) > write_options_length )
00616     {
00617         std::cout << " write options issue\n";
00618         return 1;
00619     }
00620 
00621     std::ostringstream newopts;
00622 #ifdef MOAB_HAVE_MPI
00623     std::string write_opts( write_options );
00624     std::string pcid( "PARALLEL_COMM=" );
00625     std::size_t found = write_opts.find( pcid );
00626 
00627     if( found != std::string::npos )
00628     {
00629         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00630         return 1;
00631     }
00632 
00633     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
00634     std::string pw( "PARALLEL=WRITE_PART" );
00635     found = write_opts.find( pw );
00636 
00637     if( found != std::string::npos ) { newopts << "PARALLEL_COMM=" << *pid << ";"; }
00638 
00639 #endif
00640 
00641     newopts << write_options;
00642 
00643     // Now let us actually write the file to disk with appropriate options
00644     ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &context.appDatas[*pid].file_set, 1 );CHKERRVAL( rval );
00645 
00646     return 0;
00647 }
00648 
00649 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
00650 {
00651     // this will include ghost elements info
00652     //
00653     appData& data        = context.appDatas[*pid];
00654     EntityHandle fileSet = data.file_set;
00655     // first clear all data ranges; this can be called after ghosting
00656     data.all_verts.clear();
00657     data.primary_elems.clear();
00658     data.local_verts.clear();
00659     data.ghost_vertices.clear();
00660     data.owned_elems.clear();
00661     data.ghost_elems.clear();
00662     data.mat_sets.clear();
00663     data.neu_sets.clear();
00664     data.diri_sets.clear();
00665 
00666     // Let us get all the vertex entities
00667     ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );CHKERRVAL( rval );  // recursive
00668 
00669     // Let us check first entities of dimension = 3
00670     data.dimension = 3;
00671     rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval );  // recursive
00672 
00673     if( data.primary_elems.empty() )
00674     {
00675         // Now 3-D elements. Let us check entities of dimension = 2
00676         data.dimension = 2;
00677         rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval );  // recursive
00678 
00679         if( data.primary_elems.empty() )
00680         {
00681             // Now 3-D/2-D elements. Let us check entities of dimension = 1
00682             data.dimension = 1;
00683             rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );CHKERRVAL( rval );  // recursive
00684 
00685             if( data.primary_elems.empty() )
00686             {
00687                 // no elements of dimension 1 or 2 or 3; it could happen for point clouds
00688                 data.dimension = 0;
00689             }
00690         }
00691     }
00692 
00693     // check if the current mesh is just a point cloud
00694     data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
00695 
00696 #ifdef MOAB_HAVE_MPI
00697 
00698     if( context.MPI_initialized )
00699     {
00700         ParallelComm* pco = context.pcomms[*pid];
00701 
00702         // filter ghost vertices, from local
00703         rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );CHKERRVAL( rval );
00704 
00705         // Store handles for all ghosted entities
00706         data.ghost_vertices = subtract( data.all_verts, data.local_verts );
00707 
00708         // filter ghost elements, from local
00709         rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );CHKERRVAL( rval );
00710 
00711         data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
00712     }
00713     else
00714     {
00715         data.local_verts = data.all_verts;
00716         data.owned_elems = data.primary_elems;
00717     }
00718 
00719 #else
00720 
00721     data.local_verts = data.all_verts;
00722     data.owned_elems = data.primary_elems;
00723 
00724 #endif
00725 
00726     // Get the references for some standard internal tags such as material blocks, BCs, etc
00727     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00728                                                       data.mat_sets, Interface::UNION );CHKERRVAL( rval );
00729 
00730     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00731                                                       data.neu_sets, Interface::UNION );CHKERRVAL( rval );
00732 
00733     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00734                                                       data.diri_sets, Interface::UNION );CHKERRVAL( rval );
00735 
00736     return 0;
00737 }
00738 
00739 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid, int* num_visible_vertices, int* num_visible_elements,
00740                            int* num_visible_blocks, int* num_visible_surfaceBC, int* num_visible_vertexBC )
00741 {
00742     ErrorCode rval;
00743     appData& data        = context.appDatas[*pid];
00744     EntityHandle fileSet = data.file_set;
00745 
00746     // this will include ghost elements
00747     // first clear all data ranges; this can be called after ghosting
00748     if( num_visible_elements )
00749     {
00750         num_visible_elements[2] = (int)data.primary_elems.size();
00751         // separate ghost and local/owned primary elements
00752         num_visible_elements[0] = (int)data.owned_elems.size();
00753         num_visible_elements[1] = (int)data.ghost_elems.size();
00754     }
00755     if( num_visible_vertices )
00756     {
00757         num_visible_vertices[2] = (int)data.all_verts.size();
00758         num_visible_vertices[1] = (int)data.ghost_vertices.size();
00759         num_visible_vertices[0] =
00760             num_visible_vertices[2] - num_visible_vertices[1];  // local are those that are not ghosts
00761     }
00762 
00763     if( num_visible_blocks )
00764     {
00765         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00766                                                           data.mat_sets, Interface::UNION );CHKERRVAL( rval );
00767 
00768         num_visible_blocks[2] = data.mat_sets.size();
00769         num_visible_blocks[0] = num_visible_blocks[2];
00770         num_visible_blocks[1] = 0;
00771     }
00772 
00773     if( num_visible_surfaceBC )
00774     {
00775         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00776                                                           data.neu_sets, Interface::UNION );CHKERRVAL( rval );
00777 
00778         num_visible_surfaceBC[2] = 0;
00779         // count how many faces are in each neu set, and how many regions are
00780         // adjacent to them;
00781         int numNeuSets = (int)data.neu_sets.size();
00782 
00783         for( int i = 0; i < numNeuSets; i++ )
00784         {
00785             Range subents;
00786             EntityHandle nset = data.neu_sets[i];
00787             rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );CHKERRVAL( rval );
00788 
00789             for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
00790             {
00791                 EntityHandle subent = *it;
00792                 Range adjPrimaryEnts;
00793                 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL( rval );
00794 
00795                 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
00796             }
00797         }
00798 
00799         num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
00800         num_visible_surfaceBC[1] = 0;
00801     }
00802 
00803     if( num_visible_vertexBC )
00804     {
00805         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00806                                                           data.diri_sets, Interface::UNION );CHKERRVAL( rval );
00807 
00808         num_visible_vertexBC[2] = 0;
00809         int numDiriSets         = (int)data.diri_sets.size();
00810 
00811         for( int i = 0; i < numDiriSets; i++ )
00812         {
00813             Range verts;
00814             EntityHandle diset = data.diri_sets[i];
00815             rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );CHKERRVAL( rval );
00816 
00817             num_visible_vertexBC[2] += (int)verts.size();
00818         }
00819 
00820         num_visible_vertexBC[0] = num_visible_vertexBC[2];
00821         num_visible_vertexBC[1] = 0;
00822     }
00823 
00824     return 0;
00825 }
00826 
00827 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
00828 {
00829     Range& verts = context.appDatas[*pid].all_verts;
00830 
00831     if( (int)verts.size() != *vertices_length ) { return 1; }  // problem with array length
00832 
00833     // global id tag is context.globalID_tag
00834     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );CHKERRVAL( rval );
00835 
00836     return 0;
00837 }
00838 
00839 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
00840 {
00841     Range& verts = context.appDatas[*pid].all_verts;
00842     int i        = 0;
00843 #ifdef MOAB_HAVE_MPI
00844     ParallelComm* pco = context.pcomms[*pid];
00845 
00846     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00847     {
00848         ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );CHKERRVAL( rval );
00849     }
00850 
00851     if( i != *vertices_length ) { return 1; }  // warning array allocation problem
00852 
00853 #else
00854 
00855     /* everything owned by proc 0 */
00856     if( (int)verts.size() != *vertices_length ) { return 1; }  // warning array allocation problem
00857 
00858     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00859     {
00860         visible_global_rank_ID[i] = 0;
00861     }  // all vertices are owned by processor 0, as this is serial run
00862 
00863 #endif
00864     return 0;
00865 }
00866 
00867 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
00868 {
00869     Range& verts = context.appDatas[*pid].all_verts;
00870 
00871     // interleaved coordinates, so that means deep copy anyway
00872     if( *coords_length != 3 * (int)verts.size() ) { return 1; }
00873 
00874     ErrorCode rval = context.MBI->get_coords( verts, coordinates );CHKERRVAL( rval );
00875 
00876     return 0;
00877 }
00878 
00879 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
00880 {
00881     // local id blocks? they are counted from 0 to number of visible blocks ...
00882     // will actually return material set tag value for global
00883     Range& matSets = context.appDatas[*pid].mat_sets;
00884 
00885     if( *block_length != (int)matSets.size() ) { return 1; }
00886 
00887     // return material set tag gtags[0 is material set tag
00888     ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );CHKERRVAL( rval );
00889 
00890     // populate map with index
00891     std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
00892     for( unsigned i = 0; i < matSets.size(); i++ )
00893     {
00894         matIdx[global_block_IDs[i]] = i;
00895     }
00896 
00897     return 0;
00898 }
00899 
00900 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* vertices_per_element,
00901                             int* num_elements_in_block )
00902 {
00903     std::map< int, int >& matMap      = context.appDatas[*pid].matIndex;
00904     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
00905 
00906     if( it == matMap.end() ) { return 1; }  // error in finding block with id
00907 
00908     int blockIndex          = matMap[*global_block_ID];
00909     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
00910     Range blo_elems;
00911     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
00912 
00913     if( MB_SUCCESS != rval || blo_elems.empty() ) { return 1; }
00914 
00915     EntityType type = context.MBI->type_from_handle( blo_elems[0] );
00916 
00917     if( !blo_elems.all_of_type( type ) ) { return 1; }  // not all of same  type
00918 
00919     const EntityHandle* conn = NULL;
00920     int num_verts            = 0;
00921     rval                     = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );CHKERRVAL( rval );
00922 
00923     *vertices_per_element  = num_verts;
00924     *num_elements_in_block = (int)blo_elems.size();
00925 
00926     return 0;
00927 }
00928 
00929 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid, int* num_visible_elements, iMOAB_GlobalID* element_global_IDs,
00930                                       int* ranks, iMOAB_GlobalID* block_IDs )
00931 {
00932     appData& data = context.appDatas[*pid];
00933 #ifdef MOAB_HAVE_MPI
00934     ParallelComm* pco = context.pcomms[*pid];
00935 #endif
00936 
00937     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );CHKERRVAL( rval );
00938 
00939     int i = 0;
00940 
00941     for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
00942     {
00943 #ifdef MOAB_HAVE_MPI
00944         rval = pco->get_owner( *eit, ranks[i] );CHKERRVAL( rval );
00945 
00946 #else
00947         /* everything owned by task 0 */
00948         ranks[i]             = 0;
00949 #endif
00950     }
00951 
00952     for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
00953     {
00954         EntityHandle matMeshSet = *mit;
00955         Range elems;
00956         rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
00957 
00958         int valMatTag;
00959         rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );CHKERRVAL( rval );
00960 
00961         for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
00962         {
00963             EntityHandle eh = *eit;
00964             int index       = data.primary_elems.index( eh );
00965 
00966             if( -1 == index ) { return 1; }
00967 
00968             if( -1 >= *num_visible_elements ) { return 1; }
00969 
00970             block_IDs[index] = valMatTag;
00971         }
00972     }
00973 
00974     return 0;
00975 }
00976 
00977 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* connectivity_length,
00978                                              int* element_connectivity )
00979 {
00980     appData& data                     = context.appDatas[*pid];
00981     std::map< int, int >& matMap      = data.matIndex;
00982     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
00983 
00984     if( it == matMap.end() ) { return 1; }  // error in finding block with id
00985 
00986     int blockIndex          = matMap[*global_block_ID];
00987     EntityHandle matMeshSet = data.mat_sets[blockIndex];
00988     std::vector< EntityHandle > elems;
00989 
00990     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
00991 
00992     if( elems.empty() ) { return 1; }
00993 
00994     std::vector< EntityHandle > vconnect;
00995     rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );CHKERRVAL( rval );
00996 
00997     if( *connectivity_length != (int)vconnect.size() ) { return 1; }  // mismatched sizes
00998 
00999     for( int i = 0; i < *connectivity_length; i++ )
01000     {
01001         int inx = data.all_verts.index( vconnect[i] );
01002 
01003         if( -1 == inx ) { return 1; }  // error, vertex not in local range
01004 
01005         element_connectivity[i] = inx;
01006     }
01007 
01008     return 0;
01009 }
01010 
01011 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid, iMOAB_LocalID* elem_index, int* connectivity_length,
01012                                       int* element_connectivity )
01013 {
01014     appData& data = context.appDatas[*pid];
01015     assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
01016 
01017     int num_nodes;
01018     const EntityHandle* conn;
01019 
01020     EntityHandle eh = data.primary_elems[*elem_index];
01021 
01022     ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );CHKERRVAL( rval );
01023 
01024     if( *connectivity_length < num_nodes ) { return 1; }  // wrong number of vertices
01025 
01026     for( int i = 0; i < num_nodes; i++ )
01027     {
01028         int index = data.all_verts.index( conn[i] );
01029 
01030         if( -1 == index ) { return 1; }
01031 
01032         element_connectivity[i] = index;
01033     }
01034 
01035     *connectivity_length = num_nodes;
01036 
01037     return 0;
01038 }
01039 
01040 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block,
01041                                    int* element_ownership )
01042 {
01043     std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
01044 
01045     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01046 
01047     if( it == matMap.end() ) { return 1; }  // error in finding block with id
01048 
01049     int blockIndex          = matMap[*global_block_ID];
01050     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01051     Range elems;
01052 
01053     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
01054 
01055     if( elems.empty() ) { return 1; }
01056 
01057     if( *num_elements_in_block != (int)elems.size() ) { return 1; }  // bad memory allocation
01058 
01059     int i = 0;
01060 #ifdef MOAB_HAVE_MPI
01061     ParallelComm* pco = context.pcomms[*pid];
01062 #endif
01063 
01064     for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
01065     {
01066 #ifdef MOAB_HAVE_MPI
01067         rval = pco->get_owner( *vit, element_ownership[i] );CHKERRVAL( rval );
01068 #else
01069         element_ownership[i] = 0; /* owned by 0 */
01070 #endif
01071     }
01072 
01073     return 0;
01074 }
01075 
01076 ErrCode iMOAB_GetElementID( iMOAB_AppID pid, iMOAB_GlobalID* global_block_ID, int* num_elements_in_block,
01077                             iMOAB_GlobalID* global_element_ID, iMOAB_LocalID* local_element_ID )
01078 {
01079     appData& data                = context.appDatas[*pid];
01080     std::map< int, int >& matMap = data.matIndex;
01081 
01082     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01083 
01084     if( it == matMap.end() ) { return 1; }  // error in finding block with id
01085 
01086     int blockIndex          = matMap[*global_block_ID];
01087     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01088     Range elems;
01089     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );CHKERRVAL( rval );
01090 
01091     if( elems.empty() ) { return 1; }
01092 
01093     if( *num_elements_in_block != (int)elems.size() ) { return 1; }  // bad memory allocation
01094 
01095     rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );CHKERRVAL( rval );
01096 
01097     // check that elems are among primary_elems in data
01098     for( int i = 0; i < *num_elements_in_block; i++ )
01099     {
01100         local_element_ID[i] = data.primary_elems.index( elems[i] );
01101 
01102         if( -1 == local_element_ID[i] ) { return 1; }  // error, not in local primary elements
01103     }
01104 
01105     return 0;
01106 }
01107 
01108 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid, int* surface_BC_length, iMOAB_LocalID* local_element_ID,
01109                                      int* reference_surface_ID, int* boundary_condition_value )
01110 {
01111     // we have to fill bc data for neumann sets;/
01112     ErrorCode rval;
01113 
01114     // it was counted above, in GetMeshInfo
01115     appData& data  = context.appDatas[*pid];
01116     int numNeuSets = (int)data.neu_sets.size();
01117 
01118     int index = 0;  // index [0, surface_BC_length) for the arrays returned
01119 
01120     for( int i = 0; i < numNeuSets; i++ )
01121     {
01122         Range subents;
01123         EntityHandle nset = data.neu_sets[i];
01124         rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );CHKERRVAL( rval );
01125 
01126         int neuVal;
01127         rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );CHKERRVAL( rval );
01128 
01129         for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
01130         {
01131             EntityHandle subent = *it;
01132             Range adjPrimaryEnts;
01133             rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );CHKERRVAL( rval );
01134 
01135             // get global id of the primary ents, and side number of the quad/subentity
01136             // this is moab ordering
01137             for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
01138             {
01139                 EntityHandle primaryEnt = *pit;
01140                 // get global id
01141                 /*int globalID;
01142                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
01143                 if (MB_SUCCESS!=rval)
01144                   return 1;
01145                 global_element_ID[index] = globalID;*/
01146 
01147                 // get local element id
01148                 local_element_ID[index] = data.primary_elems.index( primaryEnt );
01149 
01150                 if( -1 == local_element_ID[index] ) { return 1; }  // did not find the element locally
01151 
01152                 int side_number, sense, offset;
01153                 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );CHKERRVAL( rval );
01154 
01155                 reference_surface_ID[index]     = side_number + 1;  // moab is from 0 to 5, it needs 1 to 6
01156                 boundary_condition_value[index] = neuVal;
01157                 index++;
01158             }
01159         }
01160     }
01161 
01162     if( index != *surface_BC_length ) { return 1; }  // error in array allocations
01163 
01164     return 0;
01165 }
01166 
01167 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid, int* vertex_BC_length, iMOAB_LocalID* local_vertex_ID,
01168                                     int* boundary_condition_value )
01169 {
01170     ErrorCode rval;
01171 
01172     // it was counted above, in GetMeshInfo
01173     appData& data   = context.appDatas[*pid];
01174     int numDiriSets = (int)data.diri_sets.size();
01175     int index       = 0;  // index [0, *vertex_BC_length) for the arrays returned
01176 
01177     for( int i = 0; i < numDiriSets; i++ )
01178     {
01179         Range verts;
01180         EntityHandle diset = data.diri_sets[i];
01181         rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );CHKERRVAL( rval );
01182 
01183         int diriVal;
01184         rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );CHKERRVAL( rval );
01185 
01186         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01187         {
01188             EntityHandle vt = *vit;
01189             /*int vgid;
01190             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
01191             if (MB_SUCCESS!=rval)
01192               return 1;
01193             global_vertext_ID[index] = vgid;*/
01194             local_vertex_ID[index] = data.all_verts.index( vt );
01195 
01196             if( -1 == local_vertex_ID[index] ) { return 1; }  // vertex was not found
01197 
01198             boundary_condition_value[index] = diriVal;
01199             index++;
01200         }
01201     }
01202 
01203     if( *vertex_BC_length != index ) { return 1; }  // array allocation issue
01204 
01205     return 0;
01206 }
01207 
01208 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* tag_type,
01209                                 int* components_per_entity, int* tag_index, int tag_storage_name_length )
01210 {
01211     // see if the tag is already existing, and if yes, check the type, length
01212     if( *tag_type < 0 || *tag_type > 5 ) { return 1; }  // we have 6 types of tags supported so far
01213 
01214     DataType tagDataType;
01215     TagType tagType;
01216     void* defaultVal        = NULL;
01217     int* defInt             = new int[*components_per_entity];
01218     double* defDouble       = new double[*components_per_entity];
01219     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
01220 
01221     for( int i = 0; i < *components_per_entity; i++ )
01222     {
01223         defInt[i]    = 0;
01224         defDouble[i] = -1e+10;
01225         defHandle[i] = (EntityHandle)0;
01226     }
01227 
01228     switch( *tag_type )
01229     {
01230         case 0:
01231             tagDataType = MB_TYPE_INTEGER;
01232             tagType     = MB_TAG_DENSE;
01233             defaultVal  = defInt;
01234             break;
01235 
01236         case 1:
01237             tagDataType = MB_TYPE_DOUBLE;
01238             tagType     = MB_TAG_DENSE;
01239             defaultVal  = defDouble;
01240             break;
01241 
01242         case 2:
01243             tagDataType = MB_TYPE_HANDLE;
01244             tagType     = MB_TAG_DENSE;
01245             defaultVal  = defHandle;
01246             break;
01247 
01248         case 3:
01249             tagDataType = MB_TYPE_INTEGER;
01250             tagType     = MB_TAG_SPARSE;
01251             defaultVal  = defInt;
01252             break;
01253 
01254         case 4:
01255             tagDataType = MB_TYPE_DOUBLE;
01256             tagType     = MB_TAG_SPARSE;
01257             defaultVal  = defDouble;
01258             break;
01259 
01260         case 5:
01261             tagDataType = MB_TYPE_HANDLE;
01262             tagType     = MB_TAG_SPARSE;
01263             defaultVal  = defHandle;
01264             break;
01265 
01266         default: {
01267             delete[] defInt;
01268             delete[] defDouble;
01269             delete[] defHandle;
01270             return 1;
01271         }  // error
01272     }
01273 
01274     std::string tag_name( tag_storage_name );
01275 
01276     if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
01277     { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
01278 
01279     Tag tagHandle;
01280     ErrorCode rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
01281                                                   tagType, defaultVal );
01282 
01283     if( MB_TAG_NOT_FOUND == rval )
01284     {
01285         rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
01286                                             tagType | MB_TAG_CREAT, defaultVal );
01287     }
01288 
01289     // we don't need default values anymore, avoid leaks
01290     delete[] defInt;
01291     delete[] defDouble;
01292     delete[] defHandle;
01293 
01294     appData& data = context.appDatas[*pid];
01295 
01296     if( MB_ALREADY_ALLOCATED == rval )
01297     {
01298         std::map< std::string, Tag >& mTags        = data.tagMap;
01299         std::map< std::string, Tag >::iterator mit = mTags.find( tag_name );
01300 
01301         if( mit == mTags.end() )
01302         {
01303             // add it to the map
01304             mTags[tag_name] = tagHandle;
01305             // push it to the list of tags, too
01306             *tag_index = (int)data.tagList.size();
01307             data.tagList.push_back( tagHandle );
01308         }
01309 
01310         return 0;  // OK, we found it, and we have it stored in the map tag
01311     }
01312     else if( MB_SUCCESS == rval )
01313     {
01314         data.tagMap[tag_name] = tagHandle;
01315         *tag_index            = (int)data.tagList.size();
01316         data.tagList.push_back( tagHandle );
01317         return 0;
01318     }
01319     else
01320         return 1;  // some error, maybe the tag was not created
01321 }
01322 
01323 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
01324                                 int* ent_type, int* tag_storage_data, int tag_storage_name_length )
01325 {
01326     std::string tag_name( tag_storage_name );
01327 
01328     if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
01329     { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
01330 
01331     appData& data = context.appDatas[*pid];
01332 
01333     if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; }  // tag not defined
01334 
01335     Tag tag = data.tagMap[tag_name];
01336 
01337     int tagLength  = 0;
01338     ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
01339 
01340     DataType dtype;
01341     rval = context.MBI->tag_get_data_type( tag, dtype );
01342 
01343     if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER ) { return 1; }
01344 
01345     // set it on a subset of entities, based on type and length
01346     Range* ents_to_set;
01347 
01348     if( *ent_type == 0 )  // vertices
01349     { ents_to_set = &data.all_verts; }
01350     else  // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
01351     {
01352         ents_to_set = &data.primary_elems;
01353     }
01354 
01355     int nents_to_be_set = *num_tag_storage_length / tagLength;
01356 
01357     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01358     { return 1; }  // to many entities to be set or too few
01359 
01360     // restrict the range; everything is contiguous; or not?
01361 
01362     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01363     // 1 ) );
01364     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );CHKERRVAL( rval );
01365 
01366     return 0;  // no error
01367 }
01368 
01369 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
01370                                 int* ent_type, int* tag_storage_data, int tag_storage_name_length )
01371 {
01372     ErrorCode rval;
01373     std::string tag_name( tag_storage_name );
01374 
01375     if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
01376 
01377     appData& data = context.appDatas[*pid];
01378 
01379     if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; }  // tag not defined
01380 
01381     Tag tag = data.tagMap[tag_name];
01382 
01383     int tagLength = 0;
01384     rval          = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
01385 
01386     DataType dtype;
01387     rval = context.MBI->tag_get_data_type( tag, dtype );CHKERRVAL( rval );
01388 
01389     if( dtype != MB_TYPE_INTEGER ) { return 1; }
01390 
01391     // set it on a subset of entities, based on type and length
01392     Range* ents_to_get;
01393 
01394     if( *ent_type == 0 )  // vertices
01395     { ents_to_get = &data.all_verts; }
01396     else  // if (*ent_type == 1)
01397     {
01398         ents_to_get = &data.primary_elems;
01399     }
01400 
01401     int nents_to_get = *num_tag_storage_length / tagLength;
01402 
01403     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01404     { return 1; }  // to many entities to get, or too little
01405 
01406     // restrict the range; everything is contiguous; or not?
01407     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01408     // ) );
01409 
01410     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );CHKERRVAL( rval );
01411 
01412     return 0;  // no error
01413 }
01414 
01415 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
01416                                    int* ent_type, double* tag_storage_data, int tag_storage_name_length )
01417 {
01418     ErrorCode rval;
01419     // exactly the same code as for int tag :) maybe should check the type of tag too
01420     std::string tag_name( tag_storage_name );
01421 
01422     if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
01423 
01424     appData& data = context.appDatas[*pid];
01425 
01426     if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; }  // tag not defined
01427 
01428     Tag tag = data.tagMap[tag_name];
01429 
01430     int tagLength = 0;
01431     rval          = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
01432 
01433     DataType dtype;
01434     rval = context.MBI->tag_get_data_type( tag, dtype );
01435 
01436     if( MB_SUCCESS != rval || dtype != MB_TYPE_DOUBLE ) { return 1; }
01437 
01438     // set it on a subset of entities, based on type and length
01439     Range* ents_to_set = NULL;
01440 
01441     if( *ent_type == 0 )  // vertices
01442     { ents_to_set = &data.all_verts; }
01443     else if( *ent_type == 1 )
01444     {
01445         ents_to_set = &data.primary_elems;
01446     }
01447 
01448     int nents_to_be_set = *num_tag_storage_length / tagLength;
01449 
01450     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 ) { return 1; }  // to many entities to be set
01451 
01452     // restrict the range; everything is contiguous; or not?
01453     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01454     // 1 ) );
01455 
01456     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );CHKERRVAL( rval );
01457 
01458     return 0;  // no error
01459 }
01460 
01461 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid, const iMOAB_String tag_storage_name, int* num_tag_storage_length,
01462                                    int* ent_type, double* tag_storage_data, int tag_storage_name_length )
01463 {
01464     ErrorCode rval;
01465     // exactly the same code, except tag type check
01466     std::string tag_name( tag_storage_name );
01467 
01468     if( tag_storage_name_length < (int)tag_name.length() ) { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
01469 
01470     appData& data = context.appDatas[*pid];
01471 
01472     if( data.tagMap.find( tag_name ) == data.tagMap.end() ) { return 1; }  // tag not defined
01473 
01474     Tag tag = data.tagMap[tag_name];
01475 
01476     int tagLength = 0;
01477     rval          = context.MBI->tag_get_length( tag, tagLength );CHKERRVAL( rval );
01478 
01479     DataType dtype;
01480     rval = context.MBI->tag_get_data_type( tag, dtype );CHKERRVAL( rval );
01481 
01482     if( dtype != MB_TYPE_DOUBLE ) { return 1; }
01483 
01484     // set it on a subset of entities, based on type and length
01485     Range* ents_to_get = NULL;
01486 
01487     if( *ent_type == 0 )  // vertices
01488     { ents_to_get = &data.all_verts; }
01489     else if( *ent_type == 1 )
01490     {
01491         ents_to_get = &data.primary_elems;
01492     }
01493 
01494     int nents_to_get = *num_tag_storage_length / tagLength;
01495 
01496     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 ) { return 1; }  // to many entities to get
01497 
01498     // restrict the range; everything is contiguous; or not?
01499 
01500     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01501     // ) );
01502     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );CHKERRVAL( rval );
01503 
01504     return 0;  // no error
01505 }
01506 
01507 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
01508 {
01509 #ifdef MOAB_HAVE_MPI
01510     appData& data = context.appDatas[*pid];
01511     Range ent_exchange;
01512     std::vector< Tag > tags;
01513 
01514     for( int i = 0; i < *num_tag; i++ )
01515     {
01516         if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() ) { return 1; }  // error in tag index
01517 
01518         tags.push_back( data.tagList[tag_indices[i]] );
01519     }
01520 
01521     if( *ent_type == 0 ) { ent_exchange = data.all_verts; }
01522     else if( *ent_type == 1 )
01523     {
01524         ent_exchange = data.primary_elems;
01525     }
01526     else
01527     {
01528         return 1;
01529     }  // unexpected type
01530 
01531     ParallelComm* pco = context.pcomms[*pid];
01532 
01533     ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );CHKERRVAL( rval );
01534 
01535 #else
01536     /* do nothing if serial */
01537     // just silence the warning
01538     // do not call sync tags in serial!
01539     int k = *pid + *num_tag + *tag_indices + *ent_type;
01540     k++;
01541 #endif
01542 
01543     return 0;
01544 }
01545 
01546 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
01547 {
01548 #ifdef MOAB_HAVE_MPI
01549     appData& data = context.appDatas[*pid];
01550     Range ent_exchange;
01551 
01552     if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() ) { return 1; }  // error in tag index
01553 
01554     Tag tagh = data.tagList[*tag_index];
01555 
01556     if( *ent_type == 0 ) { ent_exchange = data.all_verts; }
01557     else if( *ent_type == 1 )
01558     {
01559         ent_exchange = data.primary_elems;
01560     }
01561     else
01562     {
01563         return 1;
01564     }  // unexpected type
01565 
01566     ParallelComm* pco = context.pcomms[*pid];
01567     // we could do different MPI_Op; do not bother now, we will call from fortran
01568     ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );CHKERRVAL( rval );
01569 
01570 #else
01571     /* do nothing if serial */
01572     // just silence the warning
01573     // do not call sync tags in serial!
01574     int k = *pid + *tag_index + *ent_type;
01575     k++;  // just do junk, to avoid complaints
01576 #endif
01577     return 0;
01578 }
01579 
01580 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid, iMOAB_LocalID* local_index, int* num_adjacent_elements,
01581                                    iMOAB_LocalID* adjacent_element_IDs )
01582 {
01583     ErrorCode rval;
01584 
01585     // one neighbor for each subentity of dimension-1
01586     MeshTopoUtil mtu( context.MBI );
01587     appData& data   = context.appDatas[*pid];
01588     EntityHandle eh = data.primary_elems[*local_index];
01589     Range adjs;
01590     rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );CHKERRVAL( rval );
01591 
01592     if( *num_adjacent_elements < (int)adjs.size() ) { return 1; }  // not dimensioned correctly
01593 
01594     *num_adjacent_elements = (int)adjs.size();
01595 
01596     for( int i = 0; i < *num_adjacent_elements; i++ )
01597     {
01598         adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
01599     }
01600 
01601     return 0;
01602 }
01603 
01604 #if 0
01605 
01606 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
01607 {
01608     return 0;
01609 }
01610 
01611 #endif
01612 
01613 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
01614 {
01615     ErrorCode rval;
01616     appData& data = context.appDatas[*pid];
01617 
01618     if( !data.local_verts.empty() )  // we should have no vertices in the app
01619     { return 1; }
01620 
01621     int nverts = *coords_len / *dim;
01622 
01623     rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );CHKERRVAL( rval );
01624 
01625     rval = context.MBI->add_entities( data.file_set, data.local_verts );CHKERRVAL( rval );
01626 
01627     // also add the vertices to the all_verts range
01628     data.all_verts.merge( data.local_verts );
01629     return 0;
01630 }
01631 
01632 ErrCode iMOAB_CreateElements( iMOAB_AppID pid, int* num_elem, int* type, int* num_nodes_per_element, int* connectivity,
01633                               int* block_ID )
01634 {
01635     // Create elements
01636     appData& data = context.appDatas[*pid];
01637 
01638     ReadUtilIface* read_iface;
01639     ErrorCode rval = context.MBI->query_interface( read_iface );CHKERRVAL( rval );
01640 
01641     EntityType mbtype = ( EntityType )( *type );
01642     EntityHandle actual_start_handle;
01643     EntityHandle* array = NULL;
01644     rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );CHKERRVAL( rval );
01645 
01646     // fill up with actual connectivity from input; assume the vertices are in order, and start
01647     // vertex is the first in the current data vertex range
01648     EntityHandle firstVertex = data.local_verts[0];
01649 
01650     for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
01651     {
01652         array[j] = connectivity[j] + firstVertex - 1;
01653     }  // assumes connectivity uses 1 based array (from fortran, mostly)
01654 
01655     Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
01656 
01657     rval = context.MBI->add_entities( data.file_set, new_elems );CHKERRVAL( rval );
01658 
01659     data.primary_elems.merge( new_elems );
01660 
01661     // add to adjacency
01662     rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );CHKERRVAL( rval );
01663     // organize all new elements in block, with the given block ID; if the block set is not
01664     // existing, create  a new mesh set;
01665     Range sets;
01666     int set_no            = *block_ID;
01667     const void* setno_ptr = &set_no;
01668     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
01669                                                       sets );
01670     EntityHandle block_set;
01671 
01672     if( MB_FAILURE == rval || sets.empty() )
01673     {
01674         // create a new set, with this block ID
01675         rval = context.MBI->create_meshset( MESHSET_SET, block_set );CHKERRVAL( rval );
01676 
01677         rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );CHKERRVAL( rval );
01678 
01679         // add the material set to file set
01680         rval = context.MBI->add_entities( data.file_set, &block_set, 1 );CHKERRVAL( rval );
01681     }
01682     else
01683     {
01684         block_set = sets[0];
01685     }  // first set is the one we want
01686 
01687     /// add the new ents to the clock set
01688     rval = context.MBI->add_entities( block_set, new_elems );CHKERRVAL( rval );
01689 
01690     return 0;
01691 }
01692 
01693 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
01694 {
01695     appData& data            = context.appDatas[*pid];
01696     data.num_global_vertices = *num_global_verts;
01697     data.num_global_elements = *num_global_elems;
01698     return 0;
01699 }
01700 
01701 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
01702 {
01703     appData& data = context.appDatas[*pid];
01704     if( NULL != num_global_verts ) { *num_global_verts = data.num_global_vertices; }
01705     if( NULL != num_global_elems ) { *num_global_elems = data.num_global_elements; }
01706 
01707     return 0;
01708 }
01709 
01710 static void split_tag_names( std::string input_names, std::string& separator,
01711                              std::vector< std::string >& list_tag_names )
01712 {
01713     size_t pos = 0;
01714     std::string token;
01715     while( ( pos = input_names.find( separator ) ) != std::string::npos )
01716     {
01717         token = input_names.substr( 0, pos );
01718         list_tag_names.push_back( token );
01719         // std::cout << token << std::endl;
01720         input_names.erase( 0, pos + separator.length() );
01721     }
01722     if( !input_names.empty() )
01723     {
01724         // if leftover something, or if not ended with delimiter
01725         list_tag_names.push_back( input_names );
01726     }
01727     return;
01728 }
01729 
01730 #ifdef MOAB_HAVE_MPI
01731 
01732 // this makes sense only for parallel runs
01733 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
01734 {
01735     appData& data     = context.appDatas[*pid];
01736     ParallelComm* pco = context.pcomms[*pid];
01737     EntityHandle cset = data.file_set;
01738     int dum_id        = 0;
01739     ErrorCode rval;
01740     if( data.primary_elems.empty() )
01741     {
01742         // skip actual resolve, assume vertices are distributed already ,
01743         // no need to share them
01744     }
01745     else
01746     {
01747         // create an integer tag for resolving ; maybe it can be a long tag in the future
01748         // (more than 2 B vertices;)
01749 
01750         Tag stag;
01751         rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
01752                                             &dum_id );CHKERRVAL( rval );
01753 
01754         if( *num_verts > (int)data.local_verts.size() ) { return 1; }  // we are not setting the size
01755 
01756         rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );CHKERRVAL( rval );  // assumes integer tag
01757 
01758         rval = pco->resolve_shared_ents( cset, -1, -1, &stag );CHKERRVAL( rval );
01759 
01760         rval = context.MBI->tag_delete( stag );CHKERRVAL( rval );
01761     }
01762     // provide partition tag equal to rank
01763     Tag part_tag;
01764     dum_id = -1;
01765     rval   = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
01766                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
01767 
01768     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
01769     {
01770         std::cout << " can't get par part tag.\n";
01771         return 1;
01772     }
01773 
01774     int rank = pco->rank();
01775     rval     = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );CHKERRVAL( rval );
01776 
01777     return 0;
01778 }
01779 
01780 // this assumes that this was not called before
01781 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
01782 {
01783     ErrorCode rval;
01784 
01785     // verify we have valid ghost layers input specified. If invalid, exit quick.
01786     if( *num_ghost_layers <= 0 ) { return 0; }  // nothing to do
01787 
01788     appData& data     = context.appDatas[*pid];
01789     ParallelComm* pco = context.pcomms[*pid];
01790 
01791     int addl_ents = 0;  // maybe we should be passing this too; most of the time we do not need additional ents
01792     // collective call
01793     rval =
01794         pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );CHKERRVAL( rval );
01795 
01796     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the
01797     // file set
01798     int rc = iMOAB_UpdateMeshInfo( pid );
01799     return rc;
01800 }
01801 
01802 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* receivingGroup, int* rcompid, int* method )
01803 {
01804     int ierr       = 0;
01805     ErrorCode rval = MB_SUCCESS;
01806     // appData & data = context.appDatas[*pid];
01807     ParallelComm* pco = context.pcomms[*pid];
01808 
01809     MPI_Comm sender = pco->comm();  // the sender comm is obtained from parallel comm in moab;
01810     // no need to pass it along
01811     // first see what are the processors in each group; get the sender group too, from the sender
01812     // communicator
01813     MPI_Group senderGroup;
01814     ierr = MPI_Comm_group( sender, &senderGroup );
01815     if( ierr != 0 ) return 1;
01816 
01817     // instantiate the par comm graph
01818     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
01819     // int coid2)
01820     ParCommGraph* cgraph =
01821         new ParCommGraph( *global, senderGroup, *receivingGroup, context.appDatas[*pid].global_id, *rcompid );
01822     // we should search if we have another pcomm with the same comp ids in the list already
01823     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
01824     context.appDatas[*pid].pgraph[-1] = cgraph;
01825 
01826     int sender_rank = -1;
01827     MPI_Comm_rank( sender, &sender_rank );
01828 
01829     // decide how to distribute elements to each processor
01830     // now, get the entities on local processor, and pack them into a buffer for various processors
01831     // we will do trivial partition: first get the total number of elements from "sender"
01832     std::vector< int > number_elems_per_part;
01833     // how to distribute local elements to receiving tasks?
01834     // trivial partition: compute first the total number of elements need to be sent
01835     Range owned = context.appDatas[*pid].owned_elems;
01836     if( owned.size() == 0 )
01837     {
01838         // must be vertices that we want to send then
01839         owned = context.appDatas[*pid].local_verts;
01840         // we should have some vertices here
01841     }
01842 
01843     if( *method == 0 )  // trivial partitioning, old method
01844     {
01845         int local_owned_elem = (int)owned.size();
01846         int size             = pco->size();
01847         int rank             = pco->rank();
01848         number_elems_per_part.resize( size );  //
01849         number_elems_per_part[rank] = local_owned_elem;
01850 #if( MPI_VERSION >= 2 )
01851         // Use "in place" option
01852         ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
01853 #else
01854         {
01855             std::vector< int > all_tmp( size );
01856             ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
01857             number_elems_per_part = all_tmp;
01858         }
01859 #endif
01860 
01861         if( ierr != 0 ) { return 1; }
01862 
01863         // every sender computes the trivial partition, it is cheap, and we need to send it anyway
01864         // to each sender
01865         rval = cgraph->compute_trivial_partition( number_elems_per_part );CHKERRVAL( rval );
01866 
01867         rval = cgraph->send_graph( *global );CHKERRVAL( rval );
01868     }
01869     else  // *method != 0, so it is either graph or geometric, parallel
01870     {
01871         // owned are the primary elements on this app
01872         rval = cgraph->compute_partition( pco, owned, *method );CHKERRVAL( rval );
01873 
01874         // basically, send the graph to the receiver side, with unblocking send
01875         rval = cgraph->send_graph_partition( pco, *global );CHKERRVAL( rval );
01876     }
01877     // pco is needed to pack, not for communication
01878     rval = cgraph->send_mesh_parts( *global, pco, owned );CHKERRVAL( rval );
01879 
01880     // mark for deletion
01881     MPI_Group_free( &senderGroup );
01882     return 0;
01883 }
01884 
01885 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* global, MPI_Group* sendingGroup, int* scompid )
01886 {
01887     appData& data          = context.appDatas[*pid];
01888     ParallelComm* pco      = context.pcomms[*pid];
01889     MPI_Comm receive       = pco->comm();
01890     EntityHandle local_set = data.file_set;
01891     ErrorCode rval;
01892 
01893     // first see what are the processors in each group; get the sender group too, from the sender
01894     // communicator
01895     MPI_Group receiverGroup;
01896     int ierr = MPI_Comm_group( receive, &receiverGroup );
01897     CHKIERRVAL( ierr );
01898 
01899     // instantiate the par comm graph
01900     ParCommGraph* cgraph =
01901         new ParCommGraph( *global, *sendingGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
01902     // TODO we should search if we have another pcomm with the same comp ids in the list already
01903     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
01904     context.appDatas[*pid].pgraph[-1] = cgraph;
01905 
01906     int receiver_rank = -1;
01907     MPI_Comm_rank( receive, &receiver_rank );
01908 
01909     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
01910     // knows what data to expect
01911     std::vector< int > pack_array;
01912     rval = cgraph->receive_comm_graph( *global, pco, pack_array );CHKERRVAL( rval );
01913 
01914     // senders across for the current receiver
01915     int current_receiver = cgraph->receiver( receiver_rank );
01916 
01917     std::vector< int > senders_local;
01918     size_t n = 0;
01919 
01920     while( n < pack_array.size() )
01921     {
01922         if( current_receiver == pack_array[n] )
01923         {
01924             for( int j = 0; j < pack_array[n + 1]; j++ )
01925             {
01926                 senders_local.push_back( pack_array[n + 2 + j] );
01927             }
01928 
01929             break;
01930         }
01931 
01932         n = n + 2 + pack_array[n + 1];
01933     }
01934 
01935 #ifdef VERBOSE
01936     std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
01937               << senders_local.size() << " tasks: ";
01938 
01939     for( int k = 0; k < (int)senders_local.size(); k++ )
01940     {
01941         std::cout << " " << senders_local[k];
01942     }
01943 
01944     std::cout << "\n";
01945 #endif
01946 
01947     if( senders_local.empty() )
01948     { std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n"; }
01949     rval = cgraph->receive_mesh( *global, pco, local_set, senders_local );CHKERRVAL( rval );
01950 
01951     // after we are done, we could merge vertices that come from different senders, but
01952     // have the same global id
01953     Tag idtag;
01954     rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );CHKERRVAL( rval );
01955 
01956     //   data.point_cloud = false;
01957     Range local_ents;
01958     rval = context.MBI->get_entities_by_handle( local_set, local_ents );CHKERRVAL( rval );
01959 
01960     // do not do merge if point cloud
01961     if( !local_ents.all_of_type( MBVERTEX ) )
01962     {
01963         if( (int)senders_local.size() >= 2 )  // need to remove duplicate vertices
01964         // that might come from different senders
01965         {
01966 
01967             Range local_verts = local_ents.subset_by_type( MBVERTEX );
01968             Range local_elems = subtract( local_ents, local_verts );
01969 
01970             // remove from local set the vertices
01971             rval = context.MBI->remove_entities( local_set, local_verts );CHKERRVAL( rval );
01972 
01973 #ifdef VERBOSE
01974             std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
01975 #endif
01976             MergeMesh mm( context.MBI );
01977 
01978             rval = mm.merge_using_integer_tag( local_verts, idtag );CHKERRVAL( rval );
01979 
01980             Range new_verts;  // local elems are local entities without vertices
01981             rval = context.MBI->get_connectivity( local_elems, new_verts );CHKERRVAL( rval );
01982 
01983 #ifdef VERBOSE
01984             std::cout << "after merging: new verts: " << new_verts.size() << "\n";
01985 #endif
01986             rval = context.MBI->add_entities( local_set, new_verts );CHKERRVAL( rval );
01987         }
01988     }
01989     else
01990         data.point_cloud = true;
01991 
01992     if( !data.point_cloud )
01993     {
01994         // still need to resolve shared entities (in this case, vertices )
01995         rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );CHKERRVAL( rval );
01996     }
01997     else
01998     {
01999         // if partition tag exists, set it to current rank; just to make it visible in VisIt
02000         Tag densePartTag;
02001         rval = context.MBI->tag_get_handle( "partition", densePartTag );
02002         if( NULL != densePartTag && MB_SUCCESS == rval )
02003         {
02004             Range local_verts;
02005             rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );CHKERRVAL( rval );
02006             std::vector< int > vals;
02007             int rank = pco->rank();
02008             vals.resize( local_verts.size(), rank );
02009             rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );CHKERRVAL( rval );
02010         }
02011     }
02012     // set the parallel partition tag
02013     Tag part_tag;
02014     int dum_id = -1;
02015     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02016                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02017 
02018     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02019     {
02020         std::cout << " can't get par part tag.\n";
02021         return 1;
02022     }
02023 
02024     int rank = pco->rank();
02025     rval     = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );CHKERRVAL( rval );
02026 
02027     // populate the mesh with current data info
02028     ierr = iMOAB_UpdateMeshInfo( pid );
02029     CHKIERRVAL( ierr );
02030 
02031     // mark for deletion
02032     MPI_Group_free( &receiverGroup );
02033 
02034     return 0;
02035 }
02036 
02037 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id,
02038                               int tag_storage_name_length )
02039 {
02040 
02041     appData& data                               = context.appDatas[*pid];
02042     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02043     if( mt == data.pgraph.end() ) { return 1; }
02044     ParCommGraph* cgraph = mt->second;
02045     ParallelComm* pco    = context.pcomms[*pid];
02046     Range owned          = data.owned_elems;
02047     ErrorCode rval;
02048     EntityHandle cover_set;
02049 
02050     if( data.point_cloud ) { owned = data.local_verts; }
02051     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02052     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02053     // from ints remapper
02054 #ifdef MOAB_HAVE_TEMPESTREMAP
02055     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02056     {
02057         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02058         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
02059         // should still have only quads ?
02060     }
02061 #else
02062     // now, in case we are sending from intx between ocn and atm, we involve coverage set
02063     // how do I know if this receiver already participated in an intersection driven by coupler?
02064     // also, what if this was the "source" mesh in intx?
02065     // in that case, the elements might have been instantiated in the coverage set locally, the
02066     // "owned" range can be different the elements are now in tempestRemap coverage_set
02067     cover_set = cgraph->get_cover_set();  // this will be non null only for intx app ?
02068 
02069     if( 0 != cover_set )
02070     {
02071         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
02072     }
02073 #endif
02074 
02075     std::string tag_name( tag_storage_name );
02076 
02077     if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
02078     { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
02079     // basically, we assume everything is defined already on the tag,
02080     //   and we can get the tags just by its name
02081     // we assume that there are separators ";" between the tag names
02082     std::vector< std::string > tagNames;
02083     std::vector< Tag > tagHandles;
02084     std::string separator( ";" );
02085     split_tag_names( tag_name, separator, tagNames );
02086     for( size_t i = 0; i < tagNames.size(); i++ )
02087     {
02088         Tag tagHandle;
02089         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02090         if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
02091         tagHandles.push_back( tagHandle );
02092     }
02093 
02094     // pco is needed to pack, and for moab instance, not for communication!
02095     // still use nonblocking communication, over the joint comm
02096     rval = cgraph->send_tag_values( *join, pco, owned, tagHandles );CHKERRVAL( rval );
02097     // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
02098 
02099     return 0;
02100 }
02101 
02102 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id,
02103                                  int tag_storage_name_length )
02104 {
02105     appData& data = context.appDatas[*pid];
02106 
02107     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02108     if( mt == data.pgraph.end() ) { return 1; }
02109     ParCommGraph* cgraph = mt->second;
02110 
02111     ParallelComm* pco = context.pcomms[*pid];
02112     Range owned       = data.owned_elems;
02113 
02114     // how do I know if this receiver already participated in an intersection driven by coupler?
02115     // also, what if this was the "source" mesh in intx?
02116     // in that case, the elements might have been instantiated in the coverage set locally, the
02117     // "owned" range can be different the elements are now in tempestRemap coverage_set
02118     EntityHandle cover_set = cgraph->get_cover_set();
02119     ErrorCode rval;
02120     if( 0 != cover_set )
02121     {
02122         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
02123     }
02124     if( data.point_cloud ) { owned = data.local_verts; }
02125     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02126     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02127     // from ints remapper
02128 #ifdef MOAB_HAVE_TEMPESTREMAP
02129     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02130     {
02131         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02132         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );CHKERRVAL( rval );
02133         // should still have only quads ?
02134     }
02135 #endif
02136     /*
02137      * data_intx.remapper exists though only on the intersection application
02138      *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate
02139      * mesh )
02140      */
02141 
02142     std::string tag_name( tag_storage_name );
02143 
02144     if( tag_storage_name_length < (int)strlen( tag_storage_name ) )
02145     { tag_name = tag_name.substr( 0, tag_storage_name_length ); }
02146 
02147     // we assume that there are separators ";" between the tag names
02148     std::vector< std::string > tagNames;
02149     std::vector< Tag > tagHandles;
02150     std::string separator( ";" );
02151     split_tag_names( tag_name, separator, tagNames );
02152     for( size_t i = 0; i < tagNames.size(); i++ )
02153     {
02154         Tag tagHandle;
02155         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02156         if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
02157         tagHandles.push_back( tagHandle );
02158     }
02159 
02160 #ifdef VERBOSE
02161     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
02162               << " and file set = " << ( data.file_set ) << "\n";
02163 #endif
02164     // pco is needed to pack, and for moab instance, not for communication!
02165     // still use nonblocking communication
02166     rval = cgraph->receive_tag_values( *join, pco, owned, tagHandles );CHKERRVAL( rval );
02167 
02168 #ifdef VERBOSE
02169     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
02170 #endif
02171 
02172     return 0;
02173 }
02174 
02175 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
02176 {
02177     // need first to find the pgraph that holds the information we need
02178     // this will be called on sender side only
02179     appData& data                               = context.appDatas[*pid];
02180     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02181     if( mt == data.pgraph.end() ) return 1;  // error
02182 
02183     mt->second->release_send_buffers();
02184     return 0;
02185 }
02186 
02187 /**
02188 \brief compute a comm graph between 2 moab apps, based on ID matching
02189 <B>Operations:</B> Collective
02190 */
02191 //#define VERBOSE
02192 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1, iMOAB_AppID pid2, MPI_Comm* join, MPI_Group* group1,
02193                                 MPI_Group* group2, int* type1, int* type2, int* comp1, int* comp2 )
02194 {
02195     ErrorCode rval = MB_SUCCESS;
02196     int localRank = 0, numProcs = 1;
02197     MPI_Comm_rank( *join, &localRank );
02198     MPI_Comm_size( *join, &numProcs );
02199     // instantiate the par comm graph
02200     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02201     // int coid2)
02202     ParCommGraph* cgraph = NULL;
02203     if( *pid1 >= 0 ) cgraph = new ParCommGraph( *join, *group1, *group2, *comp1, *comp2 );
02204     ParCommGraph* cgraph_rev = NULL;
02205     if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( *join, *group2, *group1, *comp2, *comp1 );
02206     // we should search if we have another pcomm with the same comp ids in the list already
02207     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02208     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
02209     if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
02210     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
02211     // model on the joint comm
02212     TupleList TLcomp1;
02213     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02214     TupleList TLcomp2;
02215     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02216     // will push_back a new tuple, if needed
02217 
02218     TLcomp1.enableWriteAccess();
02219 
02220     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
02221     Tag tagType1;
02222     rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", tagType1 );CHKERRVAL( rval );
02223     // find the values on first cell
02224     int lenTagType1 = 1;
02225     if( tagType1 )
02226     {
02227         rval = context.MBI->tag_get_length( tagType1, lenTagType1 );CHKERRVAL( rval );  // usually it is 16
02228     }
02229     Tag tagType2;
02230     rval = context.MBI->tag_get_handle( "GLOBAL_ID", tagType2 );CHKERRVAL( rval );
02231 
02232     std::vector< int > valuesComp1;
02233     // populate first tuple
02234     if( *pid1 >= 0 )
02235     {
02236         appData& data1     = context.appDatas[*pid1];
02237         EntityHandle fset1 = data1.file_set;
02238         // in case of tempest remap, get the coverage set
02239 #ifdef MOAB_HAVE_TEMPESTREMAP
02240         if( data1.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02241         {
02242             fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02243             // should still have only quads ?
02244         }
02245 #endif
02246         Range ents_of_interest;
02247         if( *type1 == 1 )
02248         {
02249             assert( tagType1 );
02250             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );CHKERRVAL( rval );
02251             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
02252             rval = context.MBI->tag_get_data( tagType1, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval );
02253         }
02254         else if( *type1 == 2 )
02255         {
02256             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );CHKERRVAL( rval );
02257             valuesComp1.resize( ents_of_interest.size() );
02258             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval );  // just global ids
02259         }
02260         else if( *type1 == 3 )  // for FV meshes, just get the global id of cell
02261         {
02262             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );CHKERRVAL( rval );
02263             valuesComp1.resize( ents_of_interest.size() );
02264             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );CHKERRVAL( rval );  // just global ids
02265         }
02266         else
02267         {
02268             CHKERRVAL( MB_FAILURE );  // we know only type 1 or 2 or 3
02269         }
02270         // now fill the tuple list with info and markers
02271         // because we will send only the ids, order and compress the list
02272         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
02273         TLcomp1.resize( uniq.size() );
02274         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02275         {
02276             // to proc, marker, element local index, index in el
02277             int marker               = *sit;
02278             int to_proc              = marker % numProcs;
02279             int n                    = TLcomp1.get_n();
02280             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
02281             TLcomp1.vi_wr[2 * n + 1] = marker;
02282             TLcomp1.inc_n();
02283         }
02284     }
02285 
02286     ProcConfig pc( *join );  // proc config does the crystal router
02287     pc.crystal_router()->gs_transfer( 1, TLcomp1,
02288                                       0 );  // communication towards joint tasks, with markers
02289     // sort by value (key 1)
02290 #ifdef VERBOSE
02291     std::stringstream ff1;
02292     ff1 << "TLcomp1_" << localRank << ".txt";
02293     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02294 #endif
02295     moab::TupleList::buffer sort_buffer;
02296     sort_buffer.buffer_init( TLcomp1.get_n() );
02297     TLcomp1.sort( 1, &sort_buffer );
02298     sort_buffer.reset();
02299 #ifdef VERBOSE
02300     // after sorting
02301     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02302 #endif
02303     // do the same, for the other component, number2, with type2
02304     // start copy
02305     TLcomp2.enableWriteAccess();
02306     // populate second tuple
02307     std::vector< int > valuesComp2;
02308     if( *pid2 >= 0 )
02309     {
02310         appData& data2     = context.appDatas[*pid2];
02311         EntityHandle fset2 = data2.file_set;
02312         // in case of tempest remap, get the coverage set
02313 #ifdef MOAB_HAVE_TEMPESTREMAP
02314         if( data2.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02315         {
02316             fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02317             // should still have only quads ?
02318         }
02319 #endif
02320 
02321         Range ents_of_interest;
02322         if( *type2 == 1 )
02323         {
02324             assert( tagType1 );
02325             rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );CHKERRVAL( rval );
02326             valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
02327             rval = context.MBI->tag_get_data( tagType1, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval );
02328         }
02329         else if( *type2 == 2 )
02330         {
02331             rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );CHKERRVAL( rval );
02332             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02333             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval );  // just global ids
02334         }
02335         else if( *type2 == 3 )
02336         {
02337             rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );CHKERRVAL( rval );
02338             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02339             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );CHKERRVAL( rval );  // just global ids
02340         }
02341         else
02342         {
02343             CHKERRVAL( MB_FAILURE );  // we know only type 1 or 2
02344         }
02345         // now fill the tuple list with info and markers
02346         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
02347         TLcomp2.resize( uniq.size() );
02348         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02349         {
02350             // to proc, marker, element local index, index in el
02351             int marker               = *sit;
02352             int to_proc              = marker % numProcs;
02353             int n                    = TLcomp2.get_n();
02354             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
02355             TLcomp2.vi_wr[2 * n + 1] = marker;
02356             TLcomp2.inc_n();
02357         }
02358     }
02359     pc.crystal_router()->gs_transfer( 1, TLcomp2,
02360                                       0 );  // communication towards joint tasks, with markers
02361     // sort by value (key 1)
02362 #ifdef VERBOSE
02363     std::stringstream ff2;
02364     ff2 << "TLcomp2_" << localRank << ".txt";
02365     TLcomp2.print_to_file( ff2.str().c_str() );
02366 #endif
02367     sort_buffer.buffer_reserve( TLcomp2.get_n() );
02368     TLcomp2.sort( 1, &sort_buffer );
02369     sort_buffer.reset();
02370     // end copy
02371 #ifdef VERBOSE
02372     TLcomp2.print_to_file( ff2.str().c_str() );
02373 #endif
02374     // need to send back the info, from the rendezvous point, for each of the values
02375     /* so go over each value, on local process in joint communicator
02376 
02377     now have to send back the info needed for communication;
02378      loop in in sync over both TLComp1 and TLComp2, in local process;
02379       So, build new tuple lists, to send synchronous communication
02380       populate them at the same time, based on marker, that is indexed
02381     */
02382 
02383     TupleList TLBackToComp1;
02384     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
02385     TLBackToComp1.enableWriteAccess();
02386 
02387     TupleList TLBackToComp2;
02388     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
02389     TLBackToComp2.enableWriteAccess();
02390 
02391     int n1 = TLcomp1.get_n();
02392     int n2 = TLcomp2.get_n();
02393 
02394     int indexInTLComp1 = 0;
02395     int indexInTLComp2 = 0;  // advance both, according to the marker
02396     if( n1 > 0 && n2 > 0 )
02397     {
02398 
02399         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
02400         {
02401             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
02402             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
02403             if( currentValue1 < currentValue2 )
02404             {
02405                 // we have a big problem; basically, we are saying that
02406                 // dof currentValue is on one model and not on the other
02407                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
02408                 indexInTLComp1++;
02409                 continue;
02410             }
02411             if( currentValue1 > currentValue2 )
02412             {
02413                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
02414                 indexInTLComp2++;
02415                 continue;
02416             }
02417             int size1 = 1;
02418             int size2 = 1;
02419             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
02420                 size1++;
02421             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
02422                 size2++;
02423             // must be found in both lists, find the start and end indices
02424             for( int i1 = 0; i1 < size1; i1++ )
02425             {
02426                 for( int i2 = 0; i2 < size2; i2++ )
02427                 {
02428                     // send the info back to components
02429                     int n = TLBackToComp1.get_n();
02430                     TLBackToComp1.reserve();
02431                     TLBackToComp1.vi_wr[3 * n] =
02432                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
02433                                                                      // came from, info from comp2
02434                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02435                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
02436                     n                              = TLBackToComp2.get_n();
02437                     TLBackToComp2.reserve();
02438                     TLBackToComp2.vi_wr[3 * n] =
02439                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
02440                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02441                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
02442                     // what if there are repeated markers in TLcomp2? increase just index2
02443                 }
02444             }
02445             indexInTLComp1 += size1;
02446             indexInTLComp2 += size2;
02447         }
02448     }
02449     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
02450     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
02451 
02452     if( *pid1 >= 0 )
02453     {
02454         // we are on original comp 1 tasks
02455         // before ordering
02456         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02457         // processors it communicates with
02458 #ifdef VERBOSE
02459         std::stringstream f1;
02460         f1 << "TLBack1_" << localRank << ".txt";
02461         TLBackToComp1.print_to_file( f1.str().c_str() );
02462 #endif
02463         sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
02464         TLBackToComp1.sort( 1, &sort_buffer );
02465         sort_buffer.reset();
02466 #ifdef VERBOSE
02467         TLBackToComp1.print_to_file( f1.str().c_str() );
02468 #endif
02469         // so we are now on pid1, we know now each marker were it has to go
02470         // add a new method to ParCommGraph, to set up the involved_IDs_map
02471         cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
02472     }
02473     if( *pid2 >= 0 )
02474     {
02475         // we are on original comp 1 tasks
02476         // before ordering
02477         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02478         // processors it communicates with
02479 #ifdef VERBOSE
02480         std::stringstream f2;
02481         f2 << "TLBack2_" << localRank << ".txt";
02482         TLBackToComp2.print_to_file( f2.str().c_str() );
02483 #endif
02484         sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
02485         TLBackToComp2.sort( 2, &sort_buffer );
02486         sort_buffer.reset();
02487 #ifdef VERBOSE
02488         TLBackToComp2.print_to_file( f2.str().c_str() );
02489 #endif
02490         cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
02491         //
02492     }
02493     return 0;
02494 }
02495 //#undef VERBOSE
02496 
02497 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
02498 {
02499     appData& data     = context.appDatas[*pid];
02500     ParallelComm* pco = context.pcomms[*pid];
02501     // collapse vertices and transform cells into triangles/quads /polys
02502     // tags we care about: area, frac, global id
02503     std::vector< Tag > tagsList;
02504     Tag tag;
02505     ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
02506     if( !tag || rval != MB_SUCCESS ) return 1;  // fatal error, abort
02507     tagsList.push_back( tag );
02508     rval = context.MBI->tag_get_handle( "area", tag );
02509     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02510     rval = context.MBI->tag_get_handle( "frac", tag );
02511     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02512     double tol = 1.0e-9;
02513     rval       = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );CHKERRVAL( rval );
02514 
02515     // clean material sets of cells that were deleted
02516     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
02517                                                       data.mat_sets, Interface::UNION );CHKERRVAL( rval );
02518 
02519     if( !data.mat_sets.empty() )
02520     {
02521         EntityHandle matSet = data.mat_sets[0];
02522         Range elems;
02523         rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );CHKERRVAL( rval );
02524         rval = context.MBI->remove_entities( matSet, elems );CHKERRVAL( rval );
02525         // put back only cells from data.file_set
02526         elems.clear();
02527         rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );CHKERRVAL( rval );
02528         rval = context.MBI->add_entities( matSet, elems );CHKERRVAL( rval );
02529     }
02530     int ierr = iMOAB_UpdateMeshInfo( pid );
02531     if( ierr > 0 ) return ierr;
02532     ParallelMergeMesh pmm( pco, tol );
02533     rval = pmm.merge( data.file_set,
02534                       /* do not do local merge*/ false,
02535                       /*  2d cells*/ 2 );CHKERRVAL( rval );
02536 
02537     // assign global ids only for vertices, cells have them fine
02538     rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );CHKERRVAL( rval );
02539 
02540     // set the partition tag on the file set
02541     Tag part_tag;
02542     int dum_id = -1;
02543     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02544                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02545 
02546     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02547     {
02548         std::cout << " can't get par part tag.\n";
02549         return 1;
02550     }
02551 
02552     int rank = pco->rank();
02553     rval     = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );CHKERRVAL( rval );
02554 
02555     return 0;
02556 }
02557 
02558 #ifdef MOAB_HAVE_TEMPESTREMAP
02559 
02560 // this call must be collective on the joint communicator
02561 //  intersection tasks on coupler will need to send to the components tasks the list of
02562 // id elements that are relevant: they intersected some of the target elements (which are not needed
02563 // here)
02564 //  in the intersection
02565 ErrCode iMOAB_CoverageGraph( MPI_Comm* join, iMOAB_AppID pid_src, iMOAB_AppID pid_migr, iMOAB_AppID pid_intx,
02566                              int* context_id )
02567 {
02568     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
02569     // exchange
02570     ErrorCode rval;
02571     std::vector< int > srcSenders;
02572     std::vector< int > receivers;
02573     ParCommGraph* sendGraph = NULL;
02574     int ierr;
02575     int default_context_id = -1;
02576 
02577     // First, find the original graph between PE sets
02578     // And based on this one, we will build the newly modified one for coverage
02579     if( *pid_src >= 0 )
02580     {
02581         sendGraph = context.appDatas[*pid_src].pgraph[default_context_id];  // maybe check if it does not exist
02582 
02583         // report the sender and receiver tasks in the joint comm
02584         srcSenders = sendGraph->senders();
02585         receivers  = sendGraph->receivers();
02586 #ifdef VERBOSE
02587         std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
02588 #endif
02589     }
02590 
02591     ParCommGraph* recvGraph = NULL;  // will be non null on receiver tasks (intx tasks)
02592     if( *pid_migr >= 0 )
02593     {
02594         // find the original one
02595         recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
02596         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
02597         srcSenders = recvGraph->senders();
02598         receivers  = recvGraph->receivers();
02599 #ifdef VERBOSE
02600         std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
02601 #endif
02602     }
02603 
02604     // loop over pid_intx elements, to see what original processors in joint comm have sent the
02605     // coverage mesh if we are on intx tasks, send coverage info towards original component tasks,
02606     // about needed cells
02607     TupleList TLcovIDs;
02608     TLcovIDs.initialize( 2, 0, 0, 0, 0 );  // to proc, GLOBAL ID; estimate about 100 IDs to be sent
02609     // will push_back a new tuple, if needed
02610     TLcovIDs.enableWriteAccess();
02611     // the crystal router will send ID cell to the original source, on the component task
02612     // if we are on intx tasks, loop over all intx elements and
02613 
02614     int currentRankInJointComm = -1;
02615     ierr                       = MPI_Comm_rank( *join, &currentRankInJointComm );
02616     CHKIERRVAL( ierr );
02617 
02618     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
02619     // need to send information towards component tasks
02620     if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
02621         receivers.end() )  // we are on receivers tasks, we can request intx info
02622     {
02623         // find the pcomm for the intx pid
02624         if( *pid_intx >= (int)context.appDatas.size() ) return 1;
02625 
02626         appData& dataIntx = context.appDatas[*pid_intx];
02627         Tag parentTag, orgSendProcTag;
02628 
02629         rval = context.MBI->tag_get_handle( "SourceParent", parentTag );CHKERRVAL( rval );          // global id of the blue, source element
02630         if( !parentTag ) return 1;  // fatal error, abort
02631 
02632         rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );CHKERRVAL( rval );
02633         if( !orgSendProcTag ) return 1;  // fatal error, abort
02634 
02635         // find the file set, red parents for intx cells, and put them in tuples
02636         EntityHandle intxSet = dataIntx.file_set;
02637         Range cells;
02638         // get all entities from the set, and look at their RedParent
02639         rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );CHKERRVAL( rval );
02640 
02641         std::map< int, std::set< int > > idsFromProcs;  // send that info back to enhance parCommGraph cache
02642         for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
02643         {
02644             EntityHandle intx_cell = *it;
02645             int gidCell, origProc;  // look at receivers
02646 
02647             rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );CHKERRVAL( rval );
02648             rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );CHKERRVAL( rval );
02649             // we have augmented the overlap set with ghost cells ; in that case, the
02650             // orig_sending_processor is not set so it will be -1;
02651             if( origProc < 0 ) continue;
02652 
02653             std::set< int >& setInts = idsFromProcs[origProc];
02654             setInts.insert( gidCell );
02655         }
02656 
02657         // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
02658         // from coverage set
02659         if( cells.empty() )
02660         {
02661             // get coverage set
02662             assert( *pid_intx >= 0 );
02663             appData& dataIntx      = context.appDatas[*pid_intx];
02664             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02665 
02666             // get all cells from coverage set
02667             Tag gidTag;
02668             rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );CHKERRVAL( rval );
02669             rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );CHKERRVAL( rval );
02670             // look at their orig_sending_processor
02671             for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
02672             {
02673                 EntityHandle covCell = *it;
02674                 int gidCell, origProc;  // look at o
02675 
02676                 rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );CHKERRVAL( rval );
02677                 rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );CHKERRVAL( rval );
02678                 // we have augmented the overlap set with ghost cells ; in that case, the
02679                 // orig_sending_processor is not set so it will be -1;
02680                 if( origProc < 0 )  // it cannot < 0, I think
02681                     continue;
02682                 std::set< int >& setInts = idsFromProcs[origProc];
02683                 setInts.insert( gidCell );
02684             }
02685         }
02686 
02687 #ifdef VERBOSE
02688         std::ofstream dbfile;
02689         std::stringstream outf;
02690         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
02691         dbfile.open( outf.str().c_str() );
02692         dbfile << "Writing this to a file.\n";
02693 
02694         dbfile << " map size:" << idsFromProcs.size()
02695                << std::endl;  // on the receiver side, these show how much data to receive
02696         // from the sender (how many ids, and how much tag data later; we need to size up the
02697         // receiver buffer) arrange in tuples , use map iterators to send the ids
02698         for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
02699         {
02700             std::set< int >& setIds = mt->second;
02701             dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
02702             int counter = 0;
02703             for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
02704             {
02705                 int valueID = *st;
02706                 dbfile << " " << valueID;
02707                 counter++;
02708                 if( counter % 10 == 0 ) dbfile << "\n";
02709             }
02710             dbfile << "\n";
02711         }
02712         dbfile.close();
02713 #endif
02714         if( NULL != recvGraph )
02715         {
02716             ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph );  // just copy
02717             recvGraph1->set_context_id( *context_id );
02718             recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
02719             // this par comm graph will need to use the coverage set
02720             // so we are for sure on intx pes (the receiver is the coupler mesh)
02721             assert( *pid_intx >= 0 );
02722             appData& dataIntx      = context.appDatas[*pid_intx];
02723             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02724             recvGraph1->set_cover_set( cover_set );
02725             context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1;
02726         }
02727         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
02728         {
02729             int procToSendTo       = mit->first;
02730             std::set< int >& idSet = mit->second;
02731             for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
02732             {
02733                 int n = TLcovIDs.get_n();
02734                 TLcovIDs.reserve();
02735                 TLcovIDs.vi_wr[2 * n]     = procToSendTo;  // send to processor
02736                 TLcovIDs.vi_wr[2 * n + 1] = *sit;          // global id needs index in the local_verts range
02737             }
02738         }
02739     }
02740 
02741     ProcConfig pc( *join );  // proc config does the crystal router
02742     pc.crystal_router()->gs_transfer( 1, TLcovIDs,
02743                                       0 );  // communication towards component tasks, with what ids are needed
02744     // for each task from receiver
02745 
02746     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
02747     if( NULL != sendGraph )
02748     {
02749         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
02750         // receiver task
02751         ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph );  // just copy
02752         sendGraph1->set_context_id( *context_id );
02753         context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
02754         rval                                           = sendGraph1->settle_send_graph( TLcovIDs );CHKERRVAL( rval );
02755     }
02756     return 0;  // success
02757 }
02758 
02759 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix,
02760                              int length_prefix )
02761 {
02762     ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
02763     std::string prefix_str( prefix );
02764     if( length_prefix < (int)strlen( prefix ) ) { prefix_str = prefix_str.substr( 0, length_prefix ); }
02765     if( NULL != cgraph )
02766         cgraph->dump_comm_information( prefix_str, *is_sender );
02767     else
02768     {
02769         std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
02770                   << " context: " << *context_id << "\n";
02771     }
02772     return 0;
02773 }
02774 
02775 #endif  // #ifdef MOAB_HAVE_TEMPESTREMAP
02776 
02777 #endif  // #ifdef MOAB_HAVE_MPI
02778 
02779 
02780 #ifdef MOAB_HAVE_TEMPESTREMAP
02781 
02782 #ifdef MOAB_HAVE_NETCDF
02783 
02784 ErrCode iMOAB_LoadMappingWeightsFromFile(
02785     iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
02786     const iMOAB_String remap_weights_filename, int* owned_dof_ids, int* owned_dof_ids_length, int* row_major_ownership,
02787     int solution_weights_identifier_length, int remap_weights_filename_length )
02788 {
02789     assert( remap_weights_filename_length > 0 && solution_weights_identifier_length > 0 );
02790     // assert( row_major_ownership != NULL );
02791 
02792     ErrorCode rval;
02793     bool row_based_partition = ( row_major_ownership != NULL && *row_major_ownership > 0 ? true : false );
02794 
02795     // Get the source and target data and pcomm objects
02796     appData& data_intx   = context.appDatas[*pid_intersection];
02797     TempestMapAppData& tdata = data_intx.tempestData;
02798 
02799     // Get the handle to the remapper object
02800     if( tdata.remapper == NULL )
02801     {
02802         // Now allocate and initialize the remapper object
02803 #ifdef MOAB_HAVE_MPI
02804         ParallelComm* pco = context.pcomms[*pid_intersection];
02805         tdata.remapper    = new moab::TempestRemapper( context.MBI, pco );
02806 #else
02807         tdata.remapper = new moab::TempestRemapper( context.MBI );
02808 #endif
02809         tdata.remapper->meshValidate     = true;
02810         tdata.remapper->constructEdgeMap = true;
02811 
02812         // Do not create new filesets; Use the sets from our respective applications
02813         tdata.remapper->initialize( false );
02814         // tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
02815         // tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
02816         tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
02817     }
02818 
02819     // Setup loading of weights onto TempestOnlineMap
02820     // Set the context for the remapping weights computation
02821     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
02822 
02823     // Now allocate and initialize the remapper object
02824     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
02825     assert( weightMap != NULL );
02826 
02827     // Check that both the DoF ownership array and length are NULL. Here we will assume a trivial partition for the map
02828     // If both are non-NULL, then ensure that the length of the array is greater than 0.
02829     assert( ( owned_dof_ids == NULL && owned_dof_ids_length == NULL ) ||
02830             ( owned_dof_ids != NULL && owned_dof_ids_length != NULL && *owned_dof_ids_length > 0 ) );
02831 
02832     // Invoke the actual call to read in the parallel map
02833     if( owned_dof_ids == NULL && owned_dof_ids_length == NULL )
02834     {
02835         std::vector< int > tmp_owned_ids;
02836         rval = weightMap->ReadParallelMap( remap_weights_filename,
02837                                            tmp_owned_ids,
02838                                            row_based_partition );CHKERRVAL( rval );
02839     }
02840     else
02841     {
02842         rval = weightMap->ReadParallelMap( remap_weights_filename,
02843                                            std::vector< int >( owned_dof_ids, owned_dof_ids + *owned_dof_ids_length ),
02844                                            row_based_partition );CHKERRVAL( rval );
02845     }
02846 
02847     return 0;
02848 }
02849 
02850 ErrCode iMOAB_WriteMappingWeightsToFile(
02851     iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
02852     const iMOAB_String remap_weights_filename, int solution_weights_identifier_length, int remap_weights_filename_length )
02853 {
02854     assert( remap_weights_filename_length > 0 && solution_weights_identifier_length > 0 );
02855 
02856     ErrorCode rval;
02857 
02858     // Get the source and target data and pcomm objects
02859     appData& data_intx       = context.appDatas[*pid_intersection];
02860     TempestMapAppData& tdata = data_intx.tempestData;
02861 
02862     // Get the handle to the remapper object
02863     assert( tdata.remapper != NULL );
02864 
02865     // Now get online weights object and ensure it is valid
02866     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
02867     assert( weightMap != NULL );
02868 
02869     std::string filename  = std::string( remap_weights_filename );
02870 
02871     // Write the map file to disk in parallel using either HDF5 or SCRIP interface
02872     rval = weightMap->WriteParallelMap( filename );CHKERRVAL( rval );
02873 
02874     return 0;
02875 }
02876 
02877 #endif  // #ifdef MOAB_HAVE_NETCDF
02878 
02879 #define USE_API
02880 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
02881 {
02882     ErrorCode rval;
02883     moab::CartVect pos;
02884 
02885     Range& verts                   = context.appDatas[*pid].all_verts;
02886     moab::EntityHandle firstVertex = ( verts[0] );
02887 
02888     // coordinate data
02889     rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );CHKERRVAL( rval );
02890 
02891     // compute the distance from origin
02892     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
02893     *radius = pos.length();
02894     return 0;
02895 }
02896 
02897 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
02898 {
02899     ErrorCode rval;
02900     ErrCode ierr;
02901     bool validate = true;
02902 
02903     double radius_source = 1.0;
02904     double radius_target = 1.0;
02905     const double epsrel  = 1e-15;
02906     const double boxeps  = 1.e-6;
02907 
02908     // Get the source and target data and pcomm objects
02909     appData& data_src  = context.appDatas[*pid_src];
02910     appData& data_tgt  = context.appDatas[*pid_tgt];
02911     appData& data_intx = context.appDatas[*pid_intx];
02912 #ifdef MOAB_HAVE_MPI
02913     ParallelComm* pco_intx = context.pcomms[*pid_intx];
02914 #endif
02915 
02916     // Mesh intersection has already been computed; Return early.
02917     TempestMapAppData& tdata = data_intx.tempestData;
02918     if( tdata.remapper != NULL ) return 0;
02919 
02920     bool is_parallel = false, is_root = true;
02921     int rank = 0;
02922 #ifdef MOAB_HAVE_MPI
02923     if( pco_intx )
02924     {
02925         rank        = pco_intx->rank();
02926         is_parallel = ( pco_intx->size() > 1 );
02927         is_root     = ( rank == 0 );
02928         rval        = pco_intx->check_all_shared_handles();CHKERRVAL( rval );
02929     }
02930 #endif
02931 
02932     moab::DebugOutput outputFormatter( std::cout, rank, 0 );
02933     outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
02934 
02935     ierr = iMOAB_UpdateMeshInfo( pid_src );
02936     CHKIERRVAL( ierr );
02937     ierr = iMOAB_UpdateMeshInfo( pid_tgt );
02938     CHKIERRVAL( ierr );
02939 
02940     // Rescale the radius of both to compute the intersection
02941     ComputeSphereRadius( pid_src, &radius_source );
02942     ComputeSphereRadius( pid_tgt, &radius_target );
02943 #ifdef VERBOSE
02944     if( is_root )
02945         outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
02946                                 radius_target );
02947 #endif
02948 
02949     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
02950      * unscale later. */
02951     bool radii_scaled  = false;
02952     bool defaultradius = 1.0;
02953     if( fabs( radius_source - radius_target ) > 1e-10 )
02954     { /* the radii are different */
02955         radii_scaled = true;
02956         rval         = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );CHKERRVAL( rval );
02957         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );CHKERRVAL( rval );
02958     }
02959 
02960     // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
02961 #ifdef MOAB_HAVE_TEMPESTREMAP
02962     IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
02963 #else
02964     IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
02965 #endif
02966 
02967     // print verbosely about the problem setting
02968     bool use_kdtree_search = false;
02969     double srctgt_areas[2], srctgt_areas_glb[2];
02970     {
02971         moab::Range rintxverts, rintxelems;
02972         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );CHKERRVAL( rval );
02973         rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );CHKERRVAL( rval );
02974         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );CHKERRVAL( rval );
02975         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );CHKERRVAL( rval );
02976         srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
02977 #ifdef VERBOSE
02978         if( is_root )
02979             outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
02980                                     rintxelems.size() );
02981 #endif
02982 
02983         moab::Range bintxverts, bintxelems;
02984         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );CHKERRVAL( rval );
02985         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );CHKERRVAL( rval );
02986         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );CHKERRVAL( rval );
02987         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );CHKERRVAL( rval );
02988         srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
02989 #ifdef VERBOSE
02990         if( is_root )
02991             outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
02992                                     bintxelems.size() );
02993 #endif
02994 #ifdef MOAB_HAVE_MPI
02995         MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
02996 #else
02997         srctgt_areas_glb[0] = srctgt_areas[0];
02998         srctgt_areas_glb[1] = srctgt_areas[1];
02999 #endif
03000         use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
03001     }
03002 
03003     data_intx.dimension = data_tgt.dimension;
03004     // set the context for the source and destination applications
03005     tdata.pid_src  = pid_src;
03006     tdata.pid_dest = pid_tgt;
03007 
03008     // Now allocate and initialize the remapper object
03009 #ifdef MOAB_HAVE_MPI
03010     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03011 #else
03012     tdata.remapper = new moab::TempestRemapper( context.MBI );
03013 #endif
03014     tdata.remapper->meshValidate     = true;
03015     tdata.remapper->constructEdgeMap = true;
03016 
03017     // Do not create new filesets; Use the sets from our respective applications
03018     tdata.remapper->initialize( false );
03019     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03020     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03021     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03022 
03023     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );CHKERRVAL( rval );
03024     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );CHKERRVAL( rval );
03025 
03026     // First, compute the covering source set.
03027     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );CHKERRVAL( rval );
03028 
03029     // Next, compute intersections with MOAB.
03030     rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );CHKERRVAL( rval );
03031 
03032     // Mapping computation done
03033     if( validate )
03034     {
03035         double local_area,
03036             global_areas[3];  // Array for Initial area, and through Method 1 and Method 2
03037         local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
03038 
03039         global_areas[0] = srctgt_areas_glb[0];
03040         global_areas[1] = srctgt_areas_glb[1];
03041 
03042 #ifdef MOAB_HAVE_MPI
03043         if( is_parallel ) { MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() ); }
03044         else
03045         {
03046             global_areas[2] = local_area;
03047         }
03048 #else
03049         global_areas[2] = local_area;
03050 #endif
03051         if( is_root )
03052         {
03053             outputFormatter.printf( 0,
03054                                     "initial area: source mesh = %12.14f, target mesh = %12.14f, "
03055                                     "overlap mesh = %12.14f\n",
03056                                     global_areas[0], global_areas[1], global_areas[2] );
03057             outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
03058                                     fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
03059                                     fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
03060         }
03061     }
03062 
03063     return 0;
03064 }
03065 
03066 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03067 {
03068     ErrorCode rval;
03069     ErrCode ierr;
03070 
03071     double radius_source = 1.0;
03072     double radius_target = 1.0;
03073     const double epsrel  = 1e-15;
03074     const double boxeps  = 1.e-8;
03075 
03076     // Get the source and target data and pcomm objects
03077     appData& data_src  = context.appDatas[*pid_src];
03078     appData& data_tgt  = context.appDatas[*pid_tgt];
03079     appData& data_intx = context.appDatas[*pid_intx];
03080 #ifdef MOAB_HAVE_MPI
03081     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03082 #endif
03083 
03084     // Mesh intersection has already been computed; Return early.
03085     TempestMapAppData& tdata = data_intx.tempestData;
03086     if( tdata.remapper != NULL ) return 0;
03087 
03088 #ifdef MOAB_HAVE_MPI
03089     if( pco_intx )
03090     {
03091         rval = pco_intx->check_all_shared_handles();CHKERRVAL( rval );
03092     }
03093 #endif
03094 
03095     ierr = iMOAB_UpdateMeshInfo( pid_src );CHKIERRVAL( ierr );
03096     ierr = iMOAB_UpdateMeshInfo( pid_tgt );CHKIERRVAL( ierr );
03097 
03098     // Rescale the radius of both to compute the intersection
03099     ComputeSphereRadius( pid_src, &radius_source );
03100     ComputeSphereRadius( pid_tgt, &radius_target );
03101 
03102     IntxAreaUtils areaAdaptor;
03103     // print verbosely about the problem setting
03104     {
03105         moab::Range rintxverts, rintxelems;
03106         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );CHKERRVAL( rval );
03107         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );CHKERRVAL( rval );
03108         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );CHKERRVAL( rval );
03109         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );CHKERRVAL( rval );
03110 #ifdef VERBOSE
03111         std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
03112                   << " elements \n";
03113 #endif
03114 
03115         moab::Range bintxverts, bintxelems;
03116         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );CHKERRVAL( rval );
03117         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );CHKERRVAL( rval );
03118         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );CHKERRVAL( rval );
03119         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );CHKERRVAL( rval );
03120 #ifdef VERBOSE
03121         std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
03122                   << " elements \n";
03123 #endif
03124     }
03125 
03126     data_intx.dimension = data_tgt.dimension;
03127     // set the context for the source and destination applications
03128     // set the context for the source and destination applications
03129     tdata.pid_src         = pid_src;
03130     tdata.pid_dest        = pid_tgt;
03131     data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
03132     assert( data_intx.point_cloud == true );
03133 
03134     // Now allocate and initialize the remapper object
03135 #ifdef MOAB_HAVE_MPI
03136     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03137 #else
03138     tdata.remapper = new moab::TempestRemapper( context.MBI );
03139 #endif
03140     tdata.remapper->meshValidate     = true;
03141     tdata.remapper->constructEdgeMap = true;
03142 
03143     // Do not create new filesets; Use the sets from our respective applications
03144     tdata.remapper->initialize( false );
03145     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03146     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03147     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03148 
03149     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
03150      * unscale later. */
03151     bool radii_scaled = false;
03152     if( fabs( radius_source - radius_target ) > 1e-10 )
03153     { /* the radii are different */
03154         radii_scaled = true;
03155         rval         = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );CHKERRVAL( rval );
03156         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );CHKERRVAL( rval );
03157     }
03158 
03159     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );CHKERRVAL( rval );
03160     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );CHKERRVAL( rval );
03161 
03162     // First, compute the covering source set.
03163     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );CHKERRVAL( rval );
03164 
03165 #ifdef MOAB_HAVE_MPI
03166     /* VSM: This context should be set on the data_src but it would overwrite the source
03167        covering set context in case it is coupled to another APP as well.
03168        This needs a redesign. */
03169     // data_intx.covering_set = tdata.remapper->GetCoveringSet();
03170     // data_src.covering_set = tdata.remapper->GetCoveringSet();
03171 #endif
03172 
03173     // Now let us re-convert the MOAB mesh back to Tempest representation
03174     rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
03175 
03176     return 0;
03177 }
03178 
03179 ErrCode iMOAB_ComputeScalarProjectionWeights(
03180     iMOAB_AppID pid_intx, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03181     const iMOAB_String disc_method_source, int* disc_order_source, const iMOAB_String disc_method_target,
03182     int* disc_order_target, int* fMonotoneTypeID, int* fVolumetric, int* fNoConservation, int* fValidate,
03183     const iMOAB_String source_solution_tag_dof_name, const iMOAB_String target_solution_tag_dof_name,
03184     int solution_weights_identifier_length, int disc_method_source_length, int disc_method_target_length,
03185     int source_solution_tag_dof_name_length, int target_solution_tag_dof_name_length )
03186 {
03187     moab::ErrorCode rval;
03188 
03189     assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
03190     assert( disc_method_source_length > 0 && disc_method_target_length > 0 );
03191     assert( solution_weights_identifier_length > 0 && source_solution_tag_dof_name_length > 0 &&
03192             target_solution_tag_dof_name_length > 0 );
03193 
03194     // Get the source and target data and pcomm objects
03195     appData& data_intx       = context.appDatas[*pid_intx];
03196     TempestMapAppData& tdata = data_intx.tempestData;
03197 
03198     // Setup computation of weights
03199     // Set the context for the remapping weights computation
03200     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03201 
03202     // Call to generate the remap weights with the tempest meshes
03203     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03204     assert( weightMap != NULL );
03205 
03206     // Now let us compute the local-global mapping and store it in the context
03207     // We need this mapping when computing matvec products and to do reductions in parallel
03208     // Additionally, the call below will also compute weights with TempestRemap
03209     rval = weightMap->GenerateRemappingWeights(
03210         std::string( disc_method_source ),
03211         std::string( disc_method_target ),               // std::string strInputType, std::string strOutputType,
03212         ( *disc_order_source ), ( *disc_order_target ),  // const int nPin, const int nPout,
03213         true,
03214         ( fMonotoneTypeID ? *fMonotoneTypeID : 0 ),          // bool fBubble=false, int fMonotoneTypeID=0,
03215         ( fVolumetric ? *fVolumetric > 0 : false ),          // bool fVolumetric=false,
03216         ( fNoConservation ? *fNoConservation > 0 : false ),  // bool fNoConservation=false,
03217         ( fValidate ? *fValidate : false ),                  // bool fNoCheck=false,
03218         source_solution_tag_dof_name, target_solution_tag_dof_name,
03219         "",              //"",   // std::string strVariables="", std::string strOutputMap="",
03220         "", "",          // std::string strInputData="", std::string strOutputData="",
03221         "", false,       // std::string strNColName="", bool fOutputDouble=false,
03222         "", false, 0.0,  // std::string strPreserveVariables="", bool fPreserveAll=false, double
03223                          // dFillValueOverride=0.0,
03224         false, false     // bool fInputConcave = false, bool fOutputConcave = false
03225     );CHKERRVAL( rval );
03226 
03227     return 0;
03228 }
03229 
03230 ErrCode iMOAB_ApplyScalarProjectionWeights(
03231     iMOAB_AppID pid_intersection, const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03232     const iMOAB_String source_solution_tag_name, const iMOAB_String target_solution_tag_name,
03233     int solution_weights_identifier_length, int source_solution_tag_name_length, int target_solution_tag_name_length )
03234 {
03235     moab::ErrorCode rval;
03236 
03237     assert( solution_weights_identifier_length > 0 && source_solution_tag_name_length > 0 &&
03238             target_solution_tag_name_length > 0 );
03239 
03240     // Get the source and target data and pcomm objects
03241     appData& data_intx       = context.appDatas[*pid_intersection];
03242     TempestMapAppData& tdata = data_intx.tempestData;
03243 
03244     // Now allocate and initialize the remapper object
03245     moab::TempestRemapper* remapper   = tdata.remapper;
03246     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03247 
03248     // we assume that there are separators ";" between the tag names
03249     std::vector< std::string > srcNames;
03250     std::vector< std::string > tgtNames;
03251     std::vector< Tag > srcTagHandles;
03252     std::vector< Tag > tgtTagHandles;
03253     std::string separator( ";" );
03254     std::string src_name( source_solution_tag_name );
03255     std::string tgt_name( target_solution_tag_name );
03256     split_tag_names( src_name, separator, srcNames );
03257     split_tag_names( tgt_name, separator, tgtNames );
03258     if( srcNames.size() != tgtNames.size() )
03259     {
03260         std::cout << " error in parsing source and target tag names. \n";
03261         return 1;
03262     }
03263 
03264     for( size_t i = 0; i < srcNames.size(); i++ )
03265     {
03266         Tag tagHandle;
03267         rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
03268         if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
03269         srcTagHandles.push_back( tagHandle );
03270         rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
03271         if( MB_SUCCESS != rval || NULL == tagHandle ) { return 1; }
03272         tgtTagHandles.push_back( tagHandle );
03273     }
03274 
03275     std::vector< double > solSTagVals;
03276     std::vector< double > solTTagVals;
03277 
03278     moab::Range sents, tents;
03279     if( data_intx.point_cloud )
03280     {
03281         appData& data_src = context.appDatas[*tdata.pid_src];
03282         appData& data_tgt = context.appDatas[*tdata.pid_dest];
03283         if( data_src.point_cloud )
03284         {
03285             moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
03286             solSTagVals.resize( covSrcEnts.size(), -1.0 );
03287             sents = covSrcEnts;
03288         }
03289         else
03290         {
03291             moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
03292             solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
03293                                     weightMap->GetSourceNDofsPerElement(),
03294                                 -1.0 );
03295             sents = covSrcEnts;
03296         }
03297         if( data_tgt.point_cloud )
03298         {
03299             moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
03300             solTTagVals.resize( tgtEnts.size(), -1.0 );
03301             tents = tgtEnts;
03302         }
03303         else
03304         {
03305             moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
03306             solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
03307                                     weightMap->GetDestinationNDofsPerElement(),
03308                                 -1.0 );
03309             tents = tgtEnts;
03310         }
03311     }
03312     else
03313     {
03314         moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
03315         moab::Range& tgtEnts    = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
03316         solSTagVals.resize(
03317             covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
03318         solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
03319                                 weightMap->GetDestinationNDofsPerElement(),
03320                             -1.0 );
03321 
03322         sents = covSrcEnts;
03323         tents = tgtEnts;
03324     }
03325 
03326     for( size_t i = 0; i < srcTagHandles.size(); i++ )
03327     {
03328         // The tag data is np*np*n_el_src
03329         Tag ssolnTag = srcTagHandles[i];
03330         Tag tsolnTag = tgtTagHandles[i];
03331         rval         = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );CHKERRVAL( rval );
03332 
03333         // Compute the application of weights on the suorce solution data and store it in the
03334         // destination solution vector data Optionally, can also perform the transpose application
03335         // of the weight matrix. Set the 3rd argument to true if this is needed
03336         rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );CHKERRVAL( rval );
03337 
03338         // The tag data is np*np*n_el_dest
03339         rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );CHKERRVAL( rval );
03340     }
03341 
03342 // #define VERBOSE
03343 #ifdef VERBOSE
03344     ParallelComm* pco_intx = context.pcomms[*pid_intersection];
03345 
03346     int ivar = 0;
03347     {
03348         Tag ssolnTag = srcTagHandles[ivar];
03349         std::stringstream sstr;
03350         sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
03351         std::ofstream output_file( sstr.str().c_str() );
03352         for( unsigned i = 0; i < sents.size(); ++i )
03353         {
03354             EntityHandle elem = sents[i];
03355             std::vector< double > locsolSTagVals( 16 );
03356             rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );CHKERRVAL( rval );
03357             output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
03358             for( unsigned j = 0; j < 16; ++j )
03359                 output_file << locsolSTagVals[j] << " ";
03360         }
03361         output_file.flush();  // required here
03362         output_file.close();
03363     }
03364     {
03365         std::stringstream sstr;
03366         sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
03367         EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
03368                                  context.appDatas[*tdata.pid_dest].file_set };
03369         rval                 = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
03370     }
03371     {
03372         std::stringstream sstr;
03373         sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
03374         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
03375         EntityHandle covering_set = remapper->GetCoveringSet();
03376         EntityHandle sets[2]      = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
03377         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
03378     }
03379     {
03380         std::stringstream sstr;
03381         sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
03382         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
03383         EntityHandle covering_set = remapper->GetCoveringSet();
03384         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
03385     }
03386     {
03387         std::stringstream sstr;
03388         sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
03389         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
03390         EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
03391         rval                    = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
03392     }
03393     {
03394         std::stringstream sstr;
03395         sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
03396         std::ofstream output_file( sstr.str().c_str() );
03397         for( unsigned i = 0; i < solSTagVals.size(); ++i )
03398             output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
03399                         << solSTagVals[i] << "\n";
03400         output_file.flush();  // required here
03401         output_file.close();
03402     }
03403 #endif
03404     // #undef VERBOSE
03405 
03406     return 0;
03407 }
03408 
03409 #endif  // MOAB_HAVE_TEMPESTREMAP
03410 
03411 #ifdef __cplusplus
03412 }
03413 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines