MOAB: Mesh Oriented datABase  (version 5.3.1)
iMOAB.cpp
Go to the documentation of this file.
00001 /** \file iMOAB.cpp
00002  */
00003 
00004 #include "moab/MOABConfig.h"
00005 #include "moab/Core.hpp"
00006 
00007 #ifdef MOAB_HAVE_MPI
00008 #include "moab_mpi.h"
00009 #include "moab/ParallelComm.hpp"
00010 #include "moab/ParCommGraph.hpp"
00011 #include "moab/ParallelMergeMesh.hpp"
00012 #include "moab/IntxMesh/IntxUtils.hpp"
00013 #endif
00014 #include "DebugOutput.hpp"
00015 #include "moab/iMOAB.h"
00016 
00017 /* this is needed because of direct access to hdf5/mhdf */
00018 #ifdef MOAB_HAVE_HDF5
00019 #include "mhdf.h"
00020 #include <H5Tpublic.h>
00021 #endif
00022 
00023 #include "moab/CartVect.hpp"
00024 #include "MBTagConventions.hpp"
00025 #include "moab/MeshTopoUtil.hpp"
00026 #include "moab/ReadUtilIface.hpp"
00027 #include "moab/MergeMesh.hpp"
00028 
00029 #ifdef MOAB_HAVE_TEMPESTREMAP
00030 #include "STLStringHelper.h"
00031 #include "moab/IntxMesh/IntxUtils.hpp"
00032 
00033 #include "moab/Remapping/TempestRemapper.hpp"
00034 #include "moab/Remapping/TempestOnlineMap.hpp"
00035 #endif
00036 
00037 // C++ includes
00038 #include <cassert>
00039 #include <sstream>
00040 #include <iostream>
00041 
00042 using namespace moab;
00043 
00044 // #define VERBOSE
00045 
00046 // global variables ; should they be organized in a structure, for easier references?
00047 // or how do we keep them global?
00048 
00049 #ifdef __cplusplus
00050 extern "C" {
00051 #endif
00052 
00053 #ifdef MOAB_HAVE_TEMPESTREMAP
00054 struct TempestMapAppData
00055 {
00056     moab::TempestRemapper* remapper;
00057     std::map< std::string, moab::TempestOnlineMap* > weightMaps;
00058     iMOAB_AppID pid_src;
00059     iMOAB_AppID pid_dest;
00060 };
00061 #endif
00062 
00063 struct appData
00064 {
00065     EntityHandle file_set;
00066     int global_id;  // external component id, unique for application
00067     std::string name;
00068     Range all_verts;
00069     Range local_verts;  // it could include shared, but not owned at the interface
00070     // these vertices would be all_verts if no ghosting was required
00071     Range ghost_vertices;  // locally ghosted from other processors
00072     Range primary_elems;
00073     Range owned_elems;
00074     Range ghost_elems;
00075     int dimension;             // 2 or 3, dimension of primary elements (redundant?)
00076     long num_global_elements;  // reunion of all elements in primary_elements; either from hdf5
00077                                // reading or from reduce
00078     long num_global_vertices;  // reunion of all nodes, after sharing is resolved; it could be
00079                                // determined from hdf5 reading
00080     Range mat_sets;
00081     std::map< int, int > matIndex;  // map from global block id to index in mat_sets
00082     Range neu_sets;
00083     Range diri_sets;
00084     std::map< std::string, Tag > tagMap;
00085     std::vector< Tag > tagList;
00086     bool point_cloud;
00087     bool is_fortran;
00088 
00089 #ifdef MOAB_HAVE_MPI
00090     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
00091     // application
00092     std::map< int, ParCommGraph* > pgraph;  // map from context () to the parcommgraph*
00093 #endif
00094 
00095 #ifdef MOAB_HAVE_TEMPESTREMAP
00096     TempestMapAppData tempestData;
00097 #endif
00098 };
00099 
00100 struct GlobalContext
00101 {
00102     // are there reasons to have multiple moab inits? Is ref count needed?
00103     Interface* MBI;
00104     // we should also have the default tags stored, initialized
00105     Tag material_tag, neumann_tag, dirichlet_tag,
00106         globalID_tag;  // material, neumann, dirichlet,  globalID
00107     int refCountMB;
00108     int iArgc;
00109     iMOAB_String* iArgv;
00110     int unused_pid;
00111 
00112     std::map< std::string, int > appIdMap;  // from app string (uppercase) to app id
00113     std::map< int, int > appIdCompMap;      // from component id to app id
00114 
00115 #ifdef MOAB_HAVE_MPI
00116     std::vector< ParallelComm* > pcomms;  // created in order of applications, one moab::ParallelComm for each
00117 #endif
00118 
00119     std::vector< appData > appDatas;  // the same order as pcomms
00120     int globalrank, worldprocs;
00121     bool MPI_initialized;
00122 
00123     GlobalContext()
00124     {
00125         MBI        = 0;
00126         refCountMB = 0;
00127         unused_pid = 0;
00128     }
00129 };
00130 
00131 static struct GlobalContext context;
00132 
00133 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
00134 {
00135     if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
00136 
00137     context.iArgc = argc;
00138     context.iArgv = argv;  // shallow copy
00139 
00140     if( 0 == context.refCountMB )
00141     {
00142         context.MBI = new( std::nothrow ) moab::Core;
00143         // retrieve the default tags
00144         const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
00145                                                      DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
00146         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
00147         Tag gtags[4];
00148 
00149         for( int i = 0; i < 4; i++ )
00150         {
00151 
00152             ErrorCode rval =
00153                 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
00154         }
00155 
00156         context.material_tag  = gtags[0];
00157         context.neumann_tag   = gtags[1];
00158         context.dirichlet_tag = gtags[2];
00159         context.globalID_tag  = gtags[3];
00160     }
00161 
00162     context.MPI_initialized = false;
00163 #ifdef MOAB_HAVE_MPI
00164     int flagInit;
00165     MPI_Initialized( &flagInit );
00166 
00167     if( flagInit && !context.MPI_initialized )
00168     {
00169         MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
00170         MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
00171         context.MPI_initialized = true;
00172     }
00173 #endif
00174 
00175     context.refCountMB++;
00176     return moab::MB_SUCCESS;
00177 }
00178 
00179 ErrCode iMOAB_InitializeFortran()
00180 {
00181     return iMOAB_Initialize( 0, 0 );
00182 }
00183 
00184 ErrCode iMOAB_Finalize()
00185 {
00186     context.refCountMB--;
00187 
00188     if( 0 == context.refCountMB )
00189     {
00190         delete context.MBI;
00191     }
00192 
00193     return MB_SUCCESS;
00194 }
00195 
00196 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
00197 #ifdef MOAB_HAVE_MPI
00198                                    MPI_Comm* comm,
00199 #endif
00200                                    int* compid,
00201                                    iMOAB_AppID pid )
00202 {
00203     IMOAB_CHECKPOINTER( app_name, 1 );
00204 #ifdef MOAB_HAVE_MPI
00205     IMOAB_CHECKPOINTER( comm, 2 );
00206     IMOAB_CHECKPOINTER( compid, 3 );
00207 #else
00208     IMOAB_CHECKPOINTER( compid, 2 );
00209 #endif
00210 
00211     // will create a parallel comm for this application too, so there will be a
00212     // mapping from *pid to file set and to parallel comm instances
00213     std::string name( app_name );
00214 
00215     if( context.appIdMap.find( name ) != context.appIdMap.end() )
00216     {
00217         std::cout << " application " << name << " already registered \n";
00218         return moab::MB_FAILURE;
00219     }
00220 
00221     *pid                   = context.unused_pid++;
00222     context.appIdMap[name] = *pid;
00223     int rankHere           = 0;
00224 #ifdef MOAB_HAVE_MPI
00225     MPI_Comm_rank( *comm, &rankHere );
00226 #endif
00227     if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
00228     if( *compid <= 0 )
00229     {
00230         std::cout << " convention for external application is to have its id positive \n";
00231         return moab::MB_FAILURE;
00232     }
00233 
00234     if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
00235     {
00236         std::cout << " external application with comp id " << *compid << " is already registered\n";
00237         return moab::MB_FAILURE;
00238     }
00239 
00240     context.appIdCompMap[*compid] = *pid;
00241 
00242     // now create ParallelComm and a file set for this application
00243 #ifdef MOAB_HAVE_MPI
00244     if( comm )
00245     {
00246         ParallelComm* pco = new ParallelComm( context.MBI, *comm );
00247 
00248 #ifndef NDEBUG
00249         int index = pco->get_id();  // it could be useful to get app id from pcomm instance ...
00250         assert( index == *pid );
00251         // here, we assert the the pid is the same as the id of the ParallelComm instance
00252         // useful for writing in parallel
00253 #endif
00254         context.pcomms.push_back( pco );
00255     }
00256     else
00257     {
00258         context.pcomms.push_back( 0 );
00259     }
00260 #endif
00261 
00262     // create now the file set that will be used for loading the model in
00263     EntityHandle file_set;
00264     ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
00265 
00266     appData app_data;
00267     app_data.file_set  = file_set;
00268     app_data.global_id = *compid;  // will be used mostly for par comm graph
00269     app_data.name      = name;     // save the name of application
00270 
00271 #ifdef MOAB_HAVE_TEMPESTREMAP
00272     app_data.tempestData.remapper = NULL;  // Only allocate as needed
00273 #endif
00274 
00275     app_data.point_cloud = false;
00276     app_data.is_fortran  = false;
00277 
00278     context.appDatas.push_back(
00279         app_data );  // it will correspond to app_FileSets[*pid] will be the file set of interest
00280     return moab::MB_SUCCESS;
00281 }
00282 
00283 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name,
00284 #ifdef MOAB_HAVE_MPI
00285                                           int* comm,
00286 #endif
00287                                           int* compid,
00288                                           iMOAB_AppID pid )
00289 {
00290     IMOAB_CHECKPOINTER( app_name, 1 );
00291 #ifdef MOAB_HAVE_MPI
00292     IMOAB_CHECKPOINTER( comm, 2 );
00293     IMOAB_CHECKPOINTER( compid, 3 );
00294 #else
00295     IMOAB_CHECKPOINTER( compid, 2 );
00296 #endif
00297 
00298     ErrCode err;
00299     assert( app_name != nullptr );
00300     std::string name( app_name );
00301 
00302 #ifdef MOAB_HAVE_MPI
00303     MPI_Comm ccomm;
00304     if( comm )
00305     {
00306         // convert from Fortran communicator to a C communicator
00307         // see transfer of handles
00308         // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
00309         ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
00310     }
00311 #endif
00312 
00313     // now call C style registration function:
00314     err = iMOAB_RegisterApplication( app_name,
00315 #ifdef MOAB_HAVE_MPI
00316                                      &ccomm,
00317 #endif
00318                                      compid, pid );
00319 
00320     // Now that we have created the application context, store that
00321     // the application being registered is from a Fortran context
00322     context.appDatas[*pid].is_fortran = true;
00323 
00324     return err;
00325 }
00326 
00327 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
00328 {
00329     // the file set , parallel comm are all in vectors indexed by *pid
00330     // assume we did not delete anything yet
00331     // *pid will not be reused if we register another application
00332     appData& data = context.appDatas[*pid];
00333     int rankHere  = 0;
00334 #ifdef MOAB_HAVE_MPI
00335     ParallelComm* pco = context.pcomms[*pid];
00336     rankHere          = pco->rank();
00337 #endif
00338     if( !rankHere )
00339         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
00340                   << " is de-registered now \n";
00341 
00342     EntityHandle fileSet = data.file_set;
00343     // get all entities part of the file set
00344     Range fileents;
00345     ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
00346 
00347     fileents.insert( fileSet );
00348 
00349     rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval );  // append all mesh sets
00350 
00351 #ifdef MOAB_HAVE_TEMPESTREMAP
00352     if( data.tempestData.remapper ) delete data.tempestData.remapper;
00353     if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
00354 #endif
00355 
00356 #ifdef MOAB_HAVE_MPI
00357 
00358     // we could get the pco also with
00359     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
00360 
00361     std::map< int, ParCommGraph* >& pargs = data.pgraph;
00362 
00363     // free the parallel comm graphs associated with this app
00364     for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
00365     {
00366         ParCommGraph* pgr = mt->second;
00367         delete pgr;
00368         pgr = NULL;
00369     }
00370     if( pco ) delete pco;
00371 #endif
00372 
00373     // delete first all except vertices
00374     Range vertices = fileents.subset_by_type( MBVERTEX );
00375     Range noverts  = subtract( fileents, vertices );
00376 
00377     rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
00378     // now retrieve connected elements that still exist (maybe in other sets, pids?)
00379     Range adj_ents_left;
00380     rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00381     rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00382     rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00383 
00384     if( !adj_ents_left.empty() )
00385     {
00386         Range conn_verts;
00387         rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
00388         vertices = subtract( vertices, conn_verts );
00389     }
00390 
00391     rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
00392 
00393     std::map< std::string, int >::iterator mit;
00394 
00395     for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
00396     {
00397         int pidx = mit->second;
00398 
00399         if( *pid == pidx )
00400         {
00401             break;
00402         }
00403     }
00404 
00405     context.appIdMap.erase( mit );
00406     std::map< int, int >::iterator mit1;
00407 
00408     for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
00409     {
00410         int pidx = mit1->second;
00411 
00412         if( *pid == pidx )
00413         {
00414             break;
00415         }
00416     }
00417 
00418     context.appIdCompMap.erase( mit1 );
00419 
00420     context.unused_pid--;  // we have to go backwards always TODO
00421     context.appDatas.pop_back();
00422 #ifdef MOAB_HAVE_MPI
00423     context.pcomms.pop_back();
00424 #endif
00425     return moab::MB_SUCCESS;
00426 }
00427 
00428 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid )
00429 {
00430     // release any Fortran specific allocations here before we pass it on
00431     context.appDatas[*pid].is_fortran = false;
00432 
00433     // release all datastructure allocations
00434     return iMOAB_DeregisterApplication( pid );
00435 }
00436 
00437 // Utility function
00438 static void split_tag_names( std::string input_names,
00439                              std::string& separator,
00440                              std::vector< std::string >& list_tag_names )
00441 {
00442     size_t pos = 0;
00443     std::string token;
00444     while( ( pos = input_names.find( separator ) ) != std::string::npos )
00445     {
00446         token = input_names.substr( 0, pos );
00447         if( !token.empty() ) list_tag_names.push_back( token );
00448         // std::cout << token << std::endl;
00449         input_names.erase( 0, pos + separator.length() );
00450     }
00451     if( !input_names.empty() )
00452     {
00453         // if leftover something, or if not ended with delimiter
00454         list_tag_names.push_back( input_names );
00455     }
00456     return;
00457 }
00458 
00459 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename,
00460                               int* num_global_vertices,
00461                               int* num_global_elements,
00462                               int* num_dimension,
00463                               int* num_parts )
00464 {
00465     IMOAB_CHECKPOINTER( filename, 1 );
00466     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00467 
00468 #ifdef MOAB_HAVE_HDF5
00469     std::string filen( filename );
00470 
00471     int edges   = 0;
00472     int faces   = 0;
00473     int regions = 0;
00474     if( num_global_vertices ) *num_global_vertices = 0;
00475     if( num_global_elements ) *num_global_elements = 0;
00476     if( num_dimension ) *num_dimension = 0;
00477     if( num_parts ) *num_parts = 0;
00478 
00479     mhdf_FileHandle file;
00480     mhdf_Status status;
00481     unsigned long max_id;
00482     struct mhdf_FileDesc* data;
00483 
00484     file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
00485 
00486     if( mhdf_isError( &status ) )
00487     {
00488         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00489         return moab::MB_FAILURE;
00490     }
00491 
00492     data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
00493                                 1 );  // will use extra set info; will get parallel partition tag info too!
00494 
00495     if( mhdf_isError( &status ) )
00496     {
00497         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00498         return moab::MB_FAILURE;
00499     }
00500 
00501     if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
00502     if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
00503 
00504     for( int i = 0; i < data->num_elem_desc; i++ )
00505     {
00506         struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
00507         struct mhdf_EntDesc* ent_d    = &( el_desc->desc );
00508 
00509         if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
00510         {
00511             edges += ent_d->count;
00512         }
00513 
00514         if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
00515         {
00516             faces += ent_d->count;
00517         }
00518 
00519         if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
00520         {
00521             faces += ent_d->count;
00522         }
00523 
00524         if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
00525         {
00526             faces += ent_d->count;
00527         }
00528 
00529         if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
00530         {
00531             regions += ent_d->count;
00532         }
00533 
00534         if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
00535         {
00536             regions += ent_d->count;
00537         }
00538 
00539         if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
00540         {
00541             regions += ent_d->count;
00542         }
00543 
00544         if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
00545         {
00546             regions += ent_d->count;
00547         }
00548 
00549         if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
00550         {
00551             regions += ent_d->count;
00552         }
00553 
00554         if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
00555         {
00556             regions += ent_d->count;
00557         }
00558 
00559         if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
00560         {
00561             regions += ent_d->count;
00562         }
00563     }
00564 
00565     if( num_parts ) *num_parts = data->numEntSets[0];
00566 
00567     // is this required?
00568     if( edges > 0 )
00569     {
00570         if( num_dimension ) *num_dimension = 1;  // I don't think it will ever return 1
00571         if( num_global_elements ) *num_global_elements = edges;
00572     }
00573 
00574     if( faces > 0 )
00575     {
00576         if( num_dimension ) *num_dimension = 2;
00577         if( num_global_elements ) *num_global_elements = faces;
00578     }
00579 
00580     if( regions > 0 )
00581     {
00582         if( num_dimension ) *num_dimension = 3;
00583         if( num_global_elements ) *num_global_elements = regions;
00584     }
00585 
00586     mhdf_closeFile( file, &status );
00587 
00588     free( data );
00589 
00590 #else
00591     std::cout << filename
00592               << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
00593                  "formats other than a h5m file.\n";
00594     if( num_global_vertices ) *num_global_vertices = 0;
00595     if( num_global_elements ) *num_global_elements = 0;
00596     if( num_dimension ) *num_dimension = 0;
00597     if( num_parts ) *num_parts = 0;
00598 #endif
00599 
00600     return moab::MB_SUCCESS;
00601 }
00602 
00603 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid,
00604                         const iMOAB_String filename,
00605                         const iMOAB_String read_options,
00606                         int* num_ghost_layers )
00607 {
00608     IMOAB_CHECKPOINTER( filename, 2 );
00609     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00610 
00611     // make sure we use the file set and pcomm associated with the *pid
00612     std::ostringstream newopts;
00613     if( read_options ) newopts << read_options;
00614 
00615 #ifdef MOAB_HAVE_MPI
00616 
00617     if( context.MPI_initialized )
00618     {
00619         if( context.worldprocs > 1 )
00620         {
00621             std::string opts( ( read_options ? read_options : "" ) );
00622             std::string pcid( "PARALLEL_COMM=" );
00623             std::size_t found = opts.find( pcid );
00624 
00625             if( found != std::string::npos )
00626             {
00627                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00628                 return moab::MB_FAILURE;
00629             }
00630 
00631             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
00632             // files (used in test_remapping)
00633             std::string filen( filename );
00634             std::string::size_type idx = filen.rfind( '.' );
00635 
00636             if( idx != std::string::npos )
00637             {
00638                 std::string extension = filen.substr( idx + 1 );
00639                 if( extension == std::string( "h5m" ) ) newopts << ";;PARALLEL_COMM=" << *pid;
00640             }
00641 
00642             if( *num_ghost_layers >= 1 )
00643             {
00644                 // if we want ghosts, we will want additional entities, the last .1
00645                 // because the addl ents can be edges, faces that are part of the neumann sets
00646                 std::string pcid2( "PARALLEL_GHOSTS=" );
00647                 std::size_t found2 = opts.find( pcid2 );
00648 
00649                 if( found2 != std::string::npos )
00650                 {
00651                     std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
00652                                  "number of layers \n";
00653                 }
00654                 else
00655                 {
00656                     // dimension of primary entities is 3 here, but it could be 2 for climate
00657                     // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
00658                     // example:  ";PARALLEL_GHOSTS=2.0.1"
00659                     newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
00660                 }
00661             }
00662         }
00663     }
00664 #else
00665     IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
00666 #endif
00667 
00668     // Now let us actually load the MOAB file with the appropriate read options
00669     ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
00670 
00671 #ifdef VERBOSE
00672     // some debugging stuff
00673     std::ostringstream outfile;
00674 #ifdef MOAB_HAVE_MPI
00675     int rank   = context.pcomms[*pid]->rank();
00676     int nprocs = context.pcomms[*pid]->size();
00677     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
00678 #else
00679     outfile << "TaskMesh_n1.0.h5m";
00680 #endif
00681     // the mesh contains ghosts too, but they are not part of mat/neumann set
00682     // write in serial the file, to see what tags are missing
00683     rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval );  // everything on current task, written in serial
00684 #endif
00685 
00686     // Update mesh information
00687     return iMOAB_UpdateMeshInfo( pid );
00688 }
00689 
00690 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
00691 {
00692     IMOAB_CHECKPOINTER( filename, 2 );
00693     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00694 
00695     appData& data        = context.appDatas[*pid];
00696     EntityHandle fileSet = data.file_set;
00697 
00698     std::ostringstream newopts;
00699 #ifdef MOAB_HAVE_MPI
00700     std::string write_opts( ( write_options ? write_options : "" ) );
00701     std::string pcid( "PARALLEL_COMM=" );
00702     std::size_t found = write_opts.find( pcid );
00703 
00704     if( found != std::string::npos )
00705     {
00706         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00707         return moab::MB_FAILURE;
00708     }
00709 
00710     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
00711     std::string pw( "PARALLEL=WRITE_PART" );
00712     found = write_opts.find( pw );
00713 
00714     if( found != std::string::npos )
00715     {
00716         newopts << "PARALLEL_COMM=" << *pid << ";";
00717     }
00718 
00719 #endif
00720 
00721     if( write_options ) newopts << write_options;
00722 
00723     std::vector< Tag > copyTagList = data.tagList;
00724     // append Global ID and Parallel Partition
00725     std::string gid_name_tag( "GLOBAL_ID" );
00726 
00727     // export global id tag, we need it always
00728     if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
00729     {
00730         Tag gid = context.MBI->globalId_tag();
00731         copyTagList.push_back( gid );
00732     }
00733     // also Parallel_Partition PARALLEL_PARTITION
00734     std::string pp_name_tag( "PARALLEL_PARTITION" );
00735 
00736     // write parallel part tag too, if it exists
00737     if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
00738     {
00739         Tag ptag;
00740         context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
00741         if( ptag ) copyTagList.push_back( ptag );
00742     }
00743 
00744     // Now let us actually write the file to disk with appropriate options
00745     ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
00746                                               (int)copyTagList.size() );MB_CHK_ERR( rval );
00747 
00748     return moab::MB_SUCCESS;
00749 }
00750 
00751 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix )
00752 {
00753     IMOAB_CHECKPOINTER( prefix, 2 );
00754     IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
00755 
00756     std::ostringstream file_name;
00757     int rank = 0, size = 1;
00758 #ifdef MOAB_HAVE_MPI
00759     rank = context.pcomms[*pid]->rank();
00760     size = context.pcomms[*pid]->size();
00761 #endif
00762     file_name << prefix << "_" << size << "_" << rank << ".h5m";
00763     // Now let us actually write the file to disk with appropriate options
00764     ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
00765 
00766     return moab::MB_SUCCESS;
00767 }
00768 
00769 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
00770 {
00771     // this will include ghost elements info
00772     appData& data        = context.appDatas[*pid];
00773     EntityHandle fileSet = data.file_set;
00774 
00775     // first clear all data ranges; this can be called after ghosting
00776     data.all_verts.clear();
00777     data.primary_elems.clear();
00778     data.local_verts.clear();
00779     data.ghost_vertices.clear();
00780     data.owned_elems.clear();
00781     data.ghost_elems.clear();
00782     data.mat_sets.clear();
00783     data.neu_sets.clear();
00784     data.diri_sets.clear();
00785 
00786     // Let us get all the vertex entities
00787     ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval );  // recursive
00788 
00789     // Let us check first entities of dimension = 3
00790     data.dimension = 3;
00791     rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00792 
00793     if( data.primary_elems.empty() )
00794     {
00795         // Now 3-D elements. Let us check entities of dimension = 2
00796         data.dimension = 2;
00797         rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00798 
00799         if( data.primary_elems.empty() )
00800         {
00801             // Now 3-D/2-D elements. Let us check entities of dimension = 1
00802             data.dimension = 1;
00803             rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00804 
00805             if( data.primary_elems.empty() )
00806             {
00807                 // no elements of dimension 1 or 2 or 3; it could happen for point clouds
00808                 data.dimension = 0;
00809             }
00810         }
00811     }
00812 
00813     // check if the current mesh is just a point cloud
00814     data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
00815 
00816 #ifdef MOAB_HAVE_MPI
00817 
00818     if( context.MPI_initialized )
00819     {
00820         ParallelComm* pco = context.pcomms[*pid];
00821 
00822         // filter ghost vertices, from local
00823         rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
00824 
00825         // Store handles for all ghosted entities
00826         data.ghost_vertices = subtract( data.all_verts, data.local_verts );
00827 
00828         // filter ghost elements, from local
00829         rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
00830 
00831         data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
00832     }
00833     else
00834     {
00835         data.local_verts = data.all_verts;
00836         data.owned_elems = data.primary_elems;
00837     }
00838 
00839 #else
00840 
00841     data.local_verts = data.all_verts;
00842     data.owned_elems = data.primary_elems;
00843 
00844 #endif
00845 
00846     // Get the references for some standard internal tags such as material blocks, BCs, etc
00847     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00848                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00849 
00850     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00851                                                       data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00852 
00853     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00854                                                       data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00855 
00856     return moab::MB_SUCCESS;
00857 }
00858 
00859 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid,
00860                            int* num_visible_vertices,
00861                            int* num_visible_elements,
00862                            int* num_visible_blocks,
00863                            int* num_visible_surfaceBC,
00864                            int* num_visible_vertexBC )
00865 {
00866     ErrorCode rval;
00867     appData& data        = context.appDatas[*pid];
00868     EntityHandle fileSet = data.file_set;
00869 
00870     // this will include ghost elements
00871     // first clear all data ranges; this can be called after ghosting
00872     if( num_visible_elements )
00873     {
00874         num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
00875         // separate ghost and local/owned primary elements
00876         num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
00877         num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
00878     }
00879     if( num_visible_vertices )
00880     {
00881         num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
00882         num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
00883         // local are those that are not ghosts
00884         num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
00885     }
00886 
00887     if( num_visible_blocks )
00888     {
00889         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00890                                                           data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00891 
00892         num_visible_blocks[2] = data.mat_sets.size();
00893         num_visible_blocks[0] = num_visible_blocks[2];
00894         num_visible_blocks[1] = 0;
00895     }
00896 
00897     if( num_visible_surfaceBC )
00898     {
00899         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00900                                                           data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00901 
00902         num_visible_surfaceBC[2] = 0;
00903         // count how many faces are in each neu set, and how many regions are
00904         // adjacent to them;
00905         int numNeuSets = (int)data.neu_sets.size();
00906 
00907         for( int i = 0; i < numNeuSets; i++ )
00908         {
00909             Range subents;
00910             EntityHandle nset = data.neu_sets[i];
00911             rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
00912 
00913             for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
00914             {
00915                 EntityHandle subent = *it;
00916                 Range adjPrimaryEnts;
00917                 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
00918 
00919                 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
00920             }
00921         }
00922 
00923         num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
00924         num_visible_surfaceBC[1] = 0;
00925     }
00926 
00927     if( num_visible_vertexBC )
00928     {
00929         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00930                                                           data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00931 
00932         num_visible_vertexBC[2] = 0;
00933         int numDiriSets         = (int)data.diri_sets.size();
00934 
00935         for( int i = 0; i < numDiriSets; i++ )
00936         {
00937             Range verts;
00938             EntityHandle diset = data.diri_sets[i];
00939             rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
00940 
00941             num_visible_vertexBC[2] += (int)verts.size();
00942         }
00943 
00944         num_visible_vertexBC[0] = num_visible_vertexBC[2];
00945         num_visible_vertexBC[1] = 0;
00946     }
00947 
00948     return moab::MB_SUCCESS;
00949 }
00950 
00951 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
00952 {
00953     IMOAB_CHECKPOINTER( vertices_length, 2 );
00954     IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
00955 
00956     const Range& verts = context.appDatas[*pid].all_verts;
00957     // check for problems with array length
00958     IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
00959 
00960     // global id tag is context.globalID_tag
00961     return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
00962 }
00963 
00964 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
00965 {
00966     assert( vertices_length && *vertices_length );
00967     assert( visible_global_rank_ID );
00968 
00969     Range& verts = context.appDatas[*pid].all_verts;
00970     int i        = 0;
00971 #ifdef MOAB_HAVE_MPI
00972     ParallelComm* pco = context.pcomms[*pid];
00973 
00974     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00975     {
00976         ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
00977     }
00978 
00979     if( i != *vertices_length )
00980     {
00981         return moab::MB_FAILURE;
00982     }  // warning array allocation problem
00983 
00984 #else
00985 
00986     /* everything owned by proc 0 */
00987     if( (int)verts.size() != *vertices_length )
00988     {
00989         return moab::MB_FAILURE;
00990     }  // warning array allocation problem
00991 
00992     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00993     {
00994         visible_global_rank_ID[i] = 0;
00995     }  // all vertices are owned by processor 0, as this is serial run
00996 
00997 #endif
00998 
00999     return moab::MB_SUCCESS;
01000 }
01001 
01002 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
01003 {
01004     Range& verts = context.appDatas[*pid].all_verts;
01005 
01006     // interleaved coordinates, so that means deep copy anyway
01007     if( *coords_length != 3 * (int)verts.size() )
01008     {
01009         return moab::MB_FAILURE;
01010     }
01011 
01012     ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
01013 
01014     return moab::MB_SUCCESS;
01015 }
01016 
01017 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
01018 {
01019     // local id blocks? they are counted from 0 to number of visible blocks ...
01020     // will actually return material set tag value for global
01021     Range& matSets = context.appDatas[*pid].mat_sets;
01022 
01023     if( *block_length != (int)matSets.size() )
01024     {
01025         return moab::MB_FAILURE;
01026     }
01027 
01028     // return material set tag gtags[0 is material set tag
01029     ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
01030 
01031     // populate map with index
01032     std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
01033     for( unsigned i = 0; i < matSets.size(); i++ )
01034     {
01035         matIdx[global_block_IDs[i]] = i;
01036     }
01037 
01038     return moab::MB_SUCCESS;
01039 }
01040 
01041 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid,
01042                             iMOAB_GlobalID* global_block_ID,
01043                             int* vertices_per_element,
01044                             int* num_elements_in_block )
01045 {
01046     assert( global_block_ID );
01047 
01048     std::map< int, int >& matMap      = context.appDatas[*pid].matIndex;
01049     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01050 
01051     if( it == matMap.end() )
01052     {
01053         return moab::MB_FAILURE;
01054     }  // error in finding block with id
01055 
01056     int blockIndex          = matMap[*global_block_ID];
01057     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01058     Range blo_elems;
01059     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
01060 
01061     if( MB_SUCCESS != rval || blo_elems.empty() )
01062     {
01063         return moab::MB_FAILURE;
01064     }
01065 
01066     EntityType type = context.MBI->type_from_handle( blo_elems[0] );
01067 
01068     if( !blo_elems.all_of_type( type ) )
01069     {
01070         return moab::MB_FAILURE;
01071     }  // not all of same  type
01072 
01073     const EntityHandle* conn = NULL;
01074     int num_verts            = 0;
01075     rval                     = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
01076 
01077     *vertices_per_element  = num_verts;
01078     *num_elements_in_block = (int)blo_elems.size();
01079 
01080     return moab::MB_SUCCESS;
01081 }
01082 
01083 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid,
01084                                       int* num_visible_elements,
01085                                       iMOAB_GlobalID* element_global_IDs,
01086                                       int* ranks,
01087                                       iMOAB_GlobalID* block_IDs )
01088 {
01089     appData& data = context.appDatas[*pid];
01090 #ifdef MOAB_HAVE_MPI
01091     ParallelComm* pco = context.pcomms[*pid];
01092 #endif
01093 
01094     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
01095 
01096     int i = 0;
01097 
01098     for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
01099     {
01100 #ifdef MOAB_HAVE_MPI
01101         rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
01102 
01103 #else
01104         /* everything owned by task 0 */
01105         ranks[i]             = 0;
01106 #endif
01107     }
01108 
01109     for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
01110     {
01111         EntityHandle matMeshSet = *mit;
01112         Range elems;
01113         rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01114 
01115         int valMatTag;
01116         rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
01117 
01118         for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
01119         {
01120             EntityHandle eh = *eit;
01121             int index       = data.primary_elems.index( eh );
01122 
01123             if( -1 == index )
01124             {
01125                 return moab::MB_FAILURE;
01126             }
01127 
01128             if( -1 >= *num_visible_elements )
01129             {
01130                 return moab::MB_FAILURE;
01131             }
01132 
01133             block_IDs[index] = valMatTag;
01134         }
01135     }
01136 
01137     return moab::MB_SUCCESS;
01138 }
01139 
01140 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid,
01141                                              iMOAB_GlobalID* global_block_ID,
01142                                              int* connectivity_length,
01143                                              int* element_connectivity )
01144 {
01145     assert( global_block_ID );      // ensure global block ID argument is not null
01146     assert( connectivity_length );  // ensure connectivity length argument is not null
01147 
01148     appData& data                     = context.appDatas[*pid];
01149     std::map< int, int >& matMap      = data.matIndex;
01150     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01151 
01152     if( it == matMap.end() )
01153     {
01154         return moab::MB_FAILURE;
01155     }  // error in finding block with id
01156 
01157     int blockIndex          = matMap[*global_block_ID];
01158     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01159     std::vector< EntityHandle > elems;
01160 
01161     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01162 
01163     if( elems.empty() )
01164     {
01165         return moab::MB_FAILURE;
01166     }
01167 
01168     std::vector< EntityHandle > vconnect;
01169     rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
01170 
01171     if( *connectivity_length != (int)vconnect.size() )
01172     {
01173         return moab::MB_FAILURE;
01174     }  // mismatched sizes
01175 
01176     for( int i = 0; i < *connectivity_length; i++ )
01177     {
01178         int inx = data.all_verts.index( vconnect[i] );
01179 
01180         if( -1 == inx )
01181         {
01182             return moab::MB_FAILURE;
01183         }  // error, vertex not in local range
01184 
01185         element_connectivity[i] = inx;
01186     }
01187 
01188     return moab::MB_SUCCESS;
01189 }
01190 
01191 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid,
01192                                       iMOAB_LocalID* elem_index,
01193                                       int* connectivity_length,
01194                                       int* element_connectivity )
01195 {
01196     assert( elem_index );           // ensure element index argument is not null
01197     assert( connectivity_length );  // ensure connectivity length argument is not null
01198 
01199     appData& data = context.appDatas[*pid];
01200     assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
01201 
01202     int num_nodes;
01203     const EntityHandle* conn;
01204 
01205     EntityHandle eh = data.primary_elems[*elem_index];
01206 
01207     ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
01208 
01209     if( *connectivity_length < num_nodes )
01210     {
01211         return moab::MB_FAILURE;
01212     }  // wrong number of vertices
01213 
01214     for( int i = 0; i < num_nodes; i++ )
01215     {
01216         int index = data.all_verts.index( conn[i] );
01217 
01218         if( -1 == index )
01219         {
01220             return moab::MB_FAILURE;
01221         }
01222 
01223         element_connectivity[i] = index;
01224     }
01225 
01226     *connectivity_length = num_nodes;
01227 
01228     return moab::MB_SUCCESS;
01229 }
01230 
01231 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid,
01232                                    iMOAB_GlobalID* global_block_ID,
01233                                    int* num_elements_in_block,
01234                                    int* element_ownership )
01235 {
01236     assert( global_block_ID );        // ensure global block ID argument is not null
01237     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01238 
01239     std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
01240 
01241     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01242 
01243     if( it == matMap.end() )
01244     {
01245         return moab::MB_FAILURE;
01246     }  // error in finding block with id
01247 
01248     int blockIndex          = matMap[*global_block_ID];
01249     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01250     Range elems;
01251 
01252     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01253 
01254     if( elems.empty() )
01255     {
01256         return moab::MB_FAILURE;
01257     }
01258 
01259     if( *num_elements_in_block != (int)elems.size() )
01260     {
01261         return moab::MB_FAILURE;
01262     }  // bad memory allocation
01263 
01264     int i = 0;
01265 #ifdef MOAB_HAVE_MPI
01266     ParallelComm* pco = context.pcomms[*pid];
01267 #endif
01268 
01269     for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
01270     {
01271 #ifdef MOAB_HAVE_MPI
01272         rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
01273 #else
01274         element_ownership[i] = 0; /* owned by 0 */
01275 #endif
01276     }
01277 
01278     return moab::MB_SUCCESS;
01279 }
01280 
01281 ErrCode iMOAB_GetElementID( iMOAB_AppID pid,
01282                             iMOAB_GlobalID* global_block_ID,
01283                             int* num_elements_in_block,
01284                             iMOAB_GlobalID* global_element_ID,
01285                             iMOAB_LocalID* local_element_ID )
01286 {
01287     assert( global_block_ID );        // ensure global block ID argument is not null
01288     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01289 
01290     appData& data                = context.appDatas[*pid];
01291     std::map< int, int >& matMap = data.matIndex;
01292 
01293     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01294 
01295     if( it == matMap.end() )
01296     {
01297         return moab::MB_FAILURE;
01298     }  // error in finding block with id
01299 
01300     int blockIndex          = matMap[*global_block_ID];
01301     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01302     Range elems;
01303     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01304 
01305     if( elems.empty() )
01306     {
01307         return moab::MB_FAILURE;
01308     }
01309 
01310     if( *num_elements_in_block != (int)elems.size() )
01311     {
01312         return moab::MB_FAILURE;
01313     }  // bad memory allocation
01314 
01315     rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
01316 
01317     // check that elems are among primary_elems in data
01318     for( int i = 0; i < *num_elements_in_block; i++ )
01319     {
01320         local_element_ID[i] = data.primary_elems.index( elems[i] );
01321 
01322         if( -1 == local_element_ID[i] )
01323         {
01324             return moab::MB_FAILURE;
01325         }  // error, not in local primary elements
01326     }
01327 
01328     return moab::MB_SUCCESS;
01329 }
01330 
01331 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid,
01332                                      int* surface_BC_length,
01333                                      iMOAB_LocalID* local_element_ID,
01334                                      int* reference_surface_ID,
01335                                      int* boundary_condition_value )
01336 {
01337     // we have to fill bc data for neumann sets;/
01338     ErrorCode rval;
01339 
01340     // it was counted above, in GetMeshInfo
01341     appData& data  = context.appDatas[*pid];
01342     int numNeuSets = (int)data.neu_sets.size();
01343 
01344     int index = 0;  // index [0, surface_BC_length) for the arrays returned
01345 
01346     for( int i = 0; i < numNeuSets; i++ )
01347     {
01348         Range subents;
01349         EntityHandle nset = data.neu_sets[i];
01350         rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
01351 
01352         int neuVal;
01353         rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
01354 
01355         for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
01356         {
01357             EntityHandle subent = *it;
01358             Range adjPrimaryEnts;
01359             rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
01360 
01361             // get global id of the primary ents, and side number of the quad/subentity
01362             // this is moab ordering
01363             for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
01364             {
01365                 EntityHandle primaryEnt = *pit;
01366                 // get global id
01367                 /*int globalID;
01368                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
01369                 if (MB_SUCCESS!=rval)
01370                   return moab::MB_FAILURE;
01371                 global_element_ID[index] = globalID;*/
01372 
01373                 // get local element id
01374                 local_element_ID[index] = data.primary_elems.index( primaryEnt );
01375 
01376                 if( -1 == local_element_ID[index] )
01377                 {
01378                     return moab::MB_FAILURE;
01379                 }  // did not find the element locally
01380 
01381                 int side_number, sense, offset;
01382                 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
01383 
01384                 reference_surface_ID[index]     = side_number + 1;  // moab is from 0 to 5, it needs 1 to 6
01385                 boundary_condition_value[index] = neuVal;
01386                 index++;
01387             }
01388         }
01389     }
01390 
01391     if( index != *surface_BC_length )
01392     {
01393         return moab::MB_FAILURE;
01394     }  // error in array allocations
01395 
01396     return moab::MB_SUCCESS;
01397 }
01398 
01399 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid,
01400                                     int* vertex_BC_length,
01401                                     iMOAB_LocalID* local_vertex_ID,
01402                                     int* boundary_condition_value )
01403 {
01404     ErrorCode rval;
01405 
01406     // it was counted above, in GetMeshInfo
01407     appData& data   = context.appDatas[*pid];
01408     int numDiriSets = (int)data.diri_sets.size();
01409     int index       = 0;  // index [0, *vertex_BC_length) for the arrays returned
01410 
01411     for( int i = 0; i < numDiriSets; i++ )
01412     {
01413         Range verts;
01414         EntityHandle diset = data.diri_sets[i];
01415         rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
01416 
01417         int diriVal;
01418         rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
01419 
01420         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01421         {
01422             EntityHandle vt = *vit;
01423             /*int vgid;
01424             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
01425             if (MB_SUCCESS!=rval)
01426               return moab::MB_FAILURE;
01427             global_vertext_ID[index] = vgid;*/
01428             local_vertex_ID[index] = data.all_verts.index( vt );
01429 
01430             if( -1 == local_vertex_ID[index] )
01431             {
01432                 return moab::MB_FAILURE;
01433             }  // vertex was not found
01434 
01435             boundary_condition_value[index] = diriVal;
01436             index++;
01437         }
01438     }
01439 
01440     if( *vertex_BC_length != index )
01441     {
01442         return moab::MB_FAILURE;
01443     }  // array allocation issue
01444 
01445     return moab::MB_SUCCESS;
01446 }
01447 
01448 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid,
01449                                 const iMOAB_String tag_storage_name,
01450                                 int* tag_type,
01451                                 int* components_per_entity,
01452                                 int* tag_index )
01453 {
01454     // see if the tag is already existing, and if yes, check the type, length
01455     if( *tag_type < 0 || *tag_type > 5 )
01456     {
01457         return moab::MB_FAILURE;
01458     }  // we have 6 types of tags supported so far
01459 
01460     DataType tagDataType;
01461     TagType tagType;
01462     void* defaultVal        = NULL;
01463     int* defInt             = new int[*components_per_entity];
01464     double* defDouble       = new double[*components_per_entity];
01465     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
01466 
01467     for( int i = 0; i < *components_per_entity; i++ )
01468     {
01469         defInt[i]    = 0;
01470         defDouble[i] = -1e+10;
01471         defHandle[i] = (EntityHandle)0;
01472     }
01473 
01474     switch( *tag_type )
01475     {
01476         case 0:
01477             tagDataType = MB_TYPE_INTEGER;
01478             tagType     = MB_TAG_DENSE;
01479             defaultVal  = defInt;
01480             break;
01481 
01482         case 1:
01483             tagDataType = MB_TYPE_DOUBLE;
01484             tagType     = MB_TAG_DENSE;
01485             defaultVal  = defDouble;
01486             break;
01487 
01488         case 2:
01489             tagDataType = MB_TYPE_HANDLE;
01490             tagType     = MB_TAG_DENSE;
01491             defaultVal  = defHandle;
01492             break;
01493 
01494         case 3:
01495             tagDataType = MB_TYPE_INTEGER;
01496             tagType     = MB_TAG_SPARSE;
01497             defaultVal  = defInt;
01498             break;
01499 
01500         case 4:
01501             tagDataType = MB_TYPE_DOUBLE;
01502             tagType     = MB_TAG_SPARSE;
01503             defaultVal  = defDouble;
01504             break;
01505 
01506         case 5:
01507             tagDataType = MB_TYPE_HANDLE;
01508             tagType     = MB_TAG_SPARSE;
01509             defaultVal  = defHandle;
01510             break;
01511 
01512         default: {
01513             delete[] defInt;
01514             delete[] defDouble;
01515             delete[] defHandle;
01516             return moab::MB_FAILURE;
01517         }  // error
01518     }
01519 
01520     Tag tagHandle;
01521     // split storage names if separated list
01522 
01523     std::string tag_name( tag_storage_name );
01524 
01525     //  first separate the names of the tags
01526     // we assume that there are separators ":" between the tag names
01527     std::vector< std::string > tagNames;
01528     std::string separator( ":" );
01529     split_tag_names( tag_name, separator, tagNames );
01530 
01531     ErrorCode rval = moab::MB_SUCCESS;  // assume success already :)
01532     appData& data  = context.appDatas[*pid];
01533 
01534     for( size_t i = 0; i < tagNames.size(); i++ )
01535     {
01536         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
01537                                             tagType, defaultVal );
01538 
01539         if( MB_TAG_NOT_FOUND == rval )
01540         {
01541             rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
01542                                                 tagType | MB_TAG_CREAT, defaultVal );
01543         }
01544 
01545         if( MB_ALREADY_ALLOCATED == rval )
01546         {
01547             std::map< std::string, Tag >& mTags        = data.tagMap;
01548             std::map< std::string, Tag >::iterator mit = mTags.find( tag_name );
01549 
01550             if( mit == mTags.end() )
01551             {
01552                 // add it to the map
01553                 mTags[tagNames[i]] = tagHandle;
01554                 // push it to the list of tags, too
01555                 *tag_index = (int)data.tagList.size();
01556                 data.tagList.push_back( tagHandle );
01557             }
01558             rval = MB_SUCCESS;
01559         }
01560         else if( MB_SUCCESS == rval )
01561         {
01562             data.tagMap[tagNames[i]] = tagHandle;
01563             *tag_index               = (int)data.tagList.size();
01564             data.tagList.push_back( tagHandle );
01565         }
01566         else
01567         {
01568             rval = moab::MB_FAILURE;  // some tags were not created
01569         }
01570     }
01571     // we don't need default values anymore, avoid leaks
01572     delete[] defInt;
01573     delete[] defDouble;
01574     delete[] defHandle;
01575     return rval;
01576 }
01577 
01578 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid,
01579                                 const iMOAB_String tag_storage_name,
01580                                 int* num_tag_storage_length,
01581                                 int* ent_type,
01582                                 int* tag_storage_data )
01583 {
01584     std::string tag_name( tag_storage_name );
01585 
01586     // Get the application data
01587     appData& data = context.appDatas[*pid];
01588 
01589     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01590     {
01591         return moab::MB_FAILURE;
01592     }  // tag not defined
01593 
01594     Tag tag = data.tagMap[tag_name];
01595 
01596     int tagLength  = 0;
01597     ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01598 
01599     DataType dtype;
01600     rval = context.MBI->tag_get_data_type( tag, dtype );
01601 
01602     if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
01603     {
01604         return moab::MB_FAILURE;
01605     }
01606 
01607     // set it on a subset of entities, based on type and length
01608     Range* ents_to_set;
01609 
01610     if( *ent_type == 0 )  // vertices
01611     {
01612         ents_to_set = &data.all_verts;
01613     }
01614     else  // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
01615     {
01616         ents_to_set = &data.primary_elems;
01617     }
01618 
01619     int nents_to_be_set = *num_tag_storage_length / tagLength;
01620 
01621     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01622     {
01623         return moab::MB_FAILURE;
01624     }  // to many entities to be set or too few
01625 
01626     // restrict the range; everything is contiguous; or not?
01627 
01628     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01629     // 1 ) );
01630     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
01631 
01632     return moab::MB_SUCCESS;  // no error
01633 }
01634 
01635 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid,
01636                                 const iMOAB_String tag_storage_name,
01637                                 int* num_tag_storage_length,
01638                                 int* ent_type,
01639                                 int* tag_storage_data )
01640 {
01641     ErrorCode rval;
01642     std::string tag_name( tag_storage_name );
01643 
01644     appData& data = context.appDatas[*pid];
01645 
01646     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01647     {
01648         return moab::MB_FAILURE;
01649     }  // tag not defined
01650 
01651     Tag tag = data.tagMap[tag_name];
01652 
01653     int tagLength = 0;
01654     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01655 
01656     DataType dtype;
01657     rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01658 
01659     if( dtype != MB_TYPE_INTEGER )
01660     {
01661         return moab::MB_FAILURE;
01662     }
01663 
01664     // set it on a subset of entities, based on type and length
01665     Range* ents_to_get;
01666 
01667     if( *ent_type == 0 )  // vertices
01668     {
01669         ents_to_get = &data.all_verts;
01670     }
01671     else  // if (*ent_type == 1)
01672     {
01673         ents_to_get = &data.primary_elems;
01674     }
01675 
01676     int nents_to_get = *num_tag_storage_length / tagLength;
01677 
01678     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01679     {
01680         return moab::MB_FAILURE;
01681     }  // to many entities to get, or too little
01682 
01683     // restrict the range; everything is contiguous; or not?
01684     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01685     // ) );
01686 
01687     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
01688 
01689     return moab::MB_SUCCESS;  // no error
01690 }
01691 
01692 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid,
01693                                    const iMOAB_String tag_storage_names,
01694                                    int* num_tag_storage_length,
01695                                    int* ent_type,
01696                                    double* tag_storage_data )
01697 {
01698     ErrorCode rval;
01699     std::string tag_names( tag_storage_names );
01700     // exactly the same code as for int tag :) maybe should check the type of tag too
01701     std::vector< std::string > tagNames;
01702     std::vector< Tag > tagHandles;
01703     std::string separator( ":" );
01704     split_tag_names( tag_names, separator, tagNames );
01705 
01706     appData& data      = context.appDatas[*pid];
01707     Range* ents_to_set = NULL;
01708 
01709     if( *ent_type == 0 )  // vertices
01710     {
01711         ents_to_set = &data.all_verts;
01712     }
01713     else if( *ent_type == 1 )
01714     {
01715         ents_to_set = &data.primary_elems;
01716     }
01717 
01718     int nents_to_be_set = (int)( *ents_to_set ).size();
01719     int position        = 0;
01720 
01721     for( size_t i = 0; i < tagNames.size(); i++ )
01722     {
01723         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01724         {
01725             return moab::MB_FAILURE;
01726         }  // some tag not defined yet in the app
01727 
01728         Tag tag = data.tagMap[tagNames[i]];
01729 
01730         int tagLength = 0;
01731         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01732 
01733         DataType dtype;
01734         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01735 
01736         if( dtype != MB_TYPE_DOUBLE )
01737         {
01738             return moab::MB_FAILURE;
01739         }
01740 
01741         // set it on the subset of entities, based on type and length
01742         if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
01743             return moab::MB_FAILURE;  // too many entity values to be set
01744 
01745         rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
01746         // increment position to next tag
01747         position = position + tagLength * nents_to_be_set;
01748     }
01749     return moab::MB_SUCCESS;  // no error
01750 }
01751 
01752 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid,
01753                                    const iMOAB_String tag_storage_names,
01754                                    int* num_tag_storage_length,
01755                                    int* ent_type,
01756                                    double* tag_storage_data )
01757 {
01758     ErrorCode rval;
01759     // exactly the same code, except tag type check
01760     std::string tag_names( tag_storage_names );
01761     // exactly the same code as for int tag :) maybe should check the type of tag too
01762     std::vector< std::string > tagNames;
01763     std::vector< Tag > tagHandles;
01764     std::string separator( ":" );
01765     split_tag_names( tag_names, separator, tagNames );
01766 
01767     appData& data = context.appDatas[*pid];
01768 
01769     // set it on a subset of entities, based on type and length
01770     Range* ents_to_get = NULL;
01771 
01772     if( *ent_type == 0 )  // vertices
01773     {
01774         ents_to_get = &data.all_verts;
01775     }
01776     else if( *ent_type == 1 )
01777     {
01778         ents_to_get = &data.primary_elems;
01779     }
01780     int nents_to_get = (int)ents_to_get->size();
01781     int position     = 0;
01782     for( size_t i = 0; i < tagNames.size(); i++ )
01783     {
01784         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01785         {
01786             return moab::MB_FAILURE;
01787         }  // tag not defined
01788 
01789         Tag tag = data.tagMap[tagNames[i]];
01790 
01791         int tagLength = 0;
01792         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01793 
01794         DataType dtype;
01795         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01796 
01797         if( dtype != MB_TYPE_DOUBLE )
01798         {
01799             return moab::MB_FAILURE;
01800         }
01801 
01802         if( position + nents_to_get * tagLength > *num_tag_storage_length )
01803             return moab::MB_FAILURE;  // too many entity values to get
01804 
01805         rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
01806         position = position + nents_to_get * tagLength;
01807     }
01808 
01809     return moab::MB_SUCCESS;  // no error
01810 }
01811 
01812 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
01813 {
01814 #ifdef MOAB_HAVE_MPI
01815     appData& data = context.appDatas[*pid];
01816     Range ent_exchange;
01817     std::vector< Tag > tags;
01818 
01819     for( int i = 0; i < *num_tag; i++ )
01820     {
01821         if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
01822         {
01823             return moab::MB_FAILURE;
01824         }  // error in tag index
01825 
01826         tags.push_back( data.tagList[tag_indices[i]] );
01827     }
01828 
01829     if( *ent_type == 0 )
01830     {
01831         ent_exchange = data.all_verts;
01832     }
01833     else if( *ent_type == 1 )
01834     {
01835         ent_exchange = data.primary_elems;
01836     }
01837     else
01838     {
01839         return moab::MB_FAILURE;
01840     }  // unexpected type
01841 
01842     ParallelComm* pco = context.pcomms[*pid];
01843 
01844     ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
01845 
01846 #else
01847     /* do nothing if serial */
01848     // just silence the warning
01849     // do not call sync tags in serial!
01850     int k = *pid + *num_tag + *tag_indices + *ent_type;
01851     k++;
01852 #endif
01853 
01854     return moab::MB_SUCCESS;
01855 }
01856 
01857 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
01858 {
01859 #ifdef MOAB_HAVE_MPI
01860     appData& data = context.appDatas[*pid];
01861     Range ent_exchange;
01862 
01863     if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
01864     {
01865         return moab::MB_FAILURE;
01866     }  // error in tag index
01867 
01868     Tag tagh = data.tagList[*tag_index];
01869 
01870     if( *ent_type == 0 )
01871     {
01872         ent_exchange = data.all_verts;
01873     }
01874     else if( *ent_type == 1 )
01875     {
01876         ent_exchange = data.primary_elems;
01877     }
01878     else
01879     {
01880         return moab::MB_FAILURE;
01881     }  // unexpected type
01882 
01883     ParallelComm* pco = context.pcomms[*pid];
01884     // we could do different MPI_Op; do not bother now, we will call from fortran
01885     ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
01886 
01887 #else
01888     /* do nothing if serial */
01889     // just silence the warning
01890     // do not call sync tags in serial!
01891     int k = *pid + *tag_index + *ent_type;
01892     k++;  // just do junk, to avoid complaints
01893 #endif
01894     return moab::MB_SUCCESS;
01895 }
01896 
01897 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid,
01898                                    iMOAB_LocalID* local_index,
01899                                    int* num_adjacent_elements,
01900                                    iMOAB_LocalID* adjacent_element_IDs )
01901 {
01902     ErrorCode rval;
01903 
01904     // one neighbor for each subentity of dimension-1
01905     MeshTopoUtil mtu( context.MBI );
01906     appData& data   = context.appDatas[*pid];
01907     EntityHandle eh = data.primary_elems[*local_index];
01908     Range adjs;
01909     rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
01910 
01911     if( *num_adjacent_elements < (int)adjs.size() )
01912     {
01913         return moab::MB_FAILURE;
01914     }  // not dimensioned correctly
01915 
01916     *num_adjacent_elements = (int)adjs.size();
01917 
01918     for( int i = 0; i < *num_adjacent_elements; i++ )
01919     {
01920         adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
01921     }
01922 
01923     return moab::MB_SUCCESS;
01924 }
01925 
01926 #if 0
01927 
01928 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
01929 {
01930     return moab::MB_SUCCESS;
01931 }
01932 
01933 #endif
01934 
01935 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
01936 {
01937     ErrorCode rval;
01938     appData& data = context.appDatas[*pid];
01939 
01940     if( !data.local_verts.empty() )  // we should have no vertices in the app
01941     {
01942         return moab::MB_FAILURE;
01943     }
01944 
01945     int nverts = *coords_len / *dim;
01946 
01947     rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
01948 
01949     rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
01950 
01951     // also add the vertices to the all_verts range
01952     data.all_verts.merge( data.local_verts );
01953     return moab::MB_SUCCESS;
01954 }
01955 
01956 ErrCode iMOAB_CreateElements( iMOAB_AppID pid,
01957                               int* num_elem,
01958                               int* type,
01959                               int* num_nodes_per_element,
01960                               int* connectivity,
01961                               int* block_ID )
01962 {
01963     // Create elements
01964     appData& data = context.appDatas[*pid];
01965 
01966     ReadUtilIface* read_iface;
01967     ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
01968 
01969     EntityType mbtype = ( EntityType )( *type );
01970     EntityHandle actual_start_handle;
01971     EntityHandle* array = NULL;
01972     rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
01973 
01974     // fill up with actual connectivity from input; assume the vertices are in order, and start
01975     // vertex is the first in the current data vertex range
01976     EntityHandle firstVertex = data.local_verts[0];
01977 
01978     for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
01979     {
01980         array[j] = connectivity[j] + firstVertex - 1;
01981     }  // assumes connectivity uses 1 based array (from fortran, mostly)
01982 
01983     Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
01984 
01985     rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
01986 
01987     data.primary_elems.merge( new_elems );
01988 
01989     // add to adjacency
01990     rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
01991     // organize all new elements in block, with the given block ID; if the block set is not
01992     // existing, create  a new mesh set;
01993     Range sets;
01994     int set_no            = *block_ID;
01995     const void* setno_ptr = &set_no;
01996     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
01997                                                       sets );
01998     EntityHandle block_set;
01999 
02000     if( MB_FAILURE == rval || sets.empty() )
02001     {
02002         // create a new set, with this block ID
02003         rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
02004 
02005         rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
02006 
02007         // add the material set to file set
02008         rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
02009     }
02010     else
02011     {
02012         block_set = sets[0];
02013     }  // first set is the one we want
02014 
02015     /// add the new ents to the clock set
02016     rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
02017 
02018     return moab::MB_SUCCESS;
02019 }
02020 
02021 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02022 {
02023     appData& data            = context.appDatas[*pid];
02024     data.num_global_vertices = *num_global_verts;
02025     data.num_global_elements = *num_global_elems;
02026     return moab::MB_SUCCESS;
02027 }
02028 
02029 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02030 {
02031     appData& data = context.appDatas[*pid];
02032     if( NULL != num_global_verts )
02033     {
02034         *num_global_verts = data.num_global_vertices;
02035     }
02036     if( NULL != num_global_elems )
02037     {
02038         *num_global_elems = data.num_global_elements;
02039     }
02040 
02041     return moab::MB_SUCCESS;
02042 }
02043 
02044 #ifdef MOAB_HAVE_MPI
02045 
02046 // this makes sense only for parallel runs
02047 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
02048 {
02049     appData& data     = context.appDatas[*pid];
02050     ParallelComm* pco = context.pcomms[*pid];
02051     EntityHandle cset = data.file_set;
02052     int dum_id        = 0;
02053     ErrorCode rval;
02054     if( data.primary_elems.empty() )
02055     {
02056         // skip actual resolve, assume vertices are distributed already ,
02057         // no need to share them
02058     }
02059     else
02060     {
02061         // create an integer tag for resolving ; maybe it can be a long tag in the future
02062         // (more than 2 B vertices;)
02063 
02064         Tag stag;
02065         rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
02066                                             &dum_id );MB_CHK_ERR( rval );
02067 
02068         if( *num_verts > (int)data.local_verts.size() )
02069         {
02070             return moab::MB_FAILURE;
02071         }  // we are not setting the size
02072 
02073         rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval );  // assumes integer tag
02074 
02075         rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
02076 
02077         rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
02078     }
02079     // provide partition tag equal to rank
02080     Tag part_tag;
02081     dum_id = -1;
02082     rval   = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02083                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02084 
02085     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02086     {
02087         std::cout << " can't get par part tag.\n";
02088         return moab::MB_FAILURE;
02089     }
02090 
02091     int rank = pco->rank();
02092     rval     = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
02093 
02094     return moab::MB_SUCCESS;
02095 }
02096 
02097 // this assumes that this was not called before
02098 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
02099 {
02100     ErrorCode rval;
02101 
02102     // verify we have valid ghost layers input specified. If invalid, exit quick.
02103     if( *num_ghost_layers <= 0 )
02104     {
02105         return moab::MB_SUCCESS;
02106     }  // nothing to do
02107 
02108     appData& data     = context.appDatas[*pid];
02109     ParallelComm* pco = context.pcomms[*pid];
02110 
02111     int addl_ents =
02112         0;  // maybe we should be passing this too; most of the time we do not need additional ents collective call
02113     rval =
02114         pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
02115 
02116     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
02117     return iMOAB_UpdateMeshInfo( pid );
02118 }
02119 
02120 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
02121 {
02122     assert( join != nullptr );
02123     assert( receivingGroup != nullptr );
02124     assert( rcompid != nullptr );
02125 
02126     ErrorCode rval;
02127     int ierr;
02128     appData& data     = context.appDatas[*pid];
02129     ParallelComm* pco = context.pcomms[*pid];
02130 
02131     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02132     MPI_Group recvGroup =
02133         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
02134     MPI_Comm sender = pco->comm();  // the sender comm is obtained from parallel comm in moab; no need to pass it along
02135     // first see what are the processors in each group; get the sender group too, from the sender communicator
02136     MPI_Group senderGroup;
02137     ierr = MPI_Comm_group( sender, &senderGroup );
02138     if( ierr != 0 ) return moab::MB_FAILURE;
02139 
02140     // instantiate the par comm graph
02141     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02142     // int coid2)
02143     ParCommGraph* cgraph =
02144         new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
02145     // we should search if we have another pcomm with the same comp ids in the list already
02146     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02147     context.appDatas[*pid].pgraph[*rcompid] = cgraph;
02148 
02149     int sender_rank = -1;
02150     MPI_Comm_rank( sender, &sender_rank );
02151 
02152     // decide how to distribute elements to each processor
02153     // now, get the entities on local processor, and pack them into a buffer for various processors
02154     // we will do trivial partition: first get the total number of elements from "sender"
02155     std::vector< int > number_elems_per_part;
02156     // how to distribute local elements to receiving tasks?
02157     // trivial partition: compute first the total number of elements need to be sent
02158     Range owned = context.appDatas[*pid].owned_elems;
02159     if( owned.size() == 0 )
02160     {
02161         // must be vertices that we want to send then
02162         owned = context.appDatas[*pid].local_verts;
02163         // we should have some vertices here
02164     }
02165 
02166     if( *method == 0 )  // trivial partitioning, old method
02167     {
02168         int local_owned_elem = (int)owned.size();
02169         int size             = pco->size();
02170         int rank             = pco->rank();
02171         number_elems_per_part.resize( size );  //
02172         number_elems_per_part[rank] = local_owned_elem;
02173 #if( MPI_VERSION >= 2 )
02174         // Use "in place" option
02175         ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
02176 #else
02177         {
02178             std::vector< int > all_tmp( size );
02179             ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
02180             number_elems_per_part = all_tmp;
02181         }
02182 #endif
02183 
02184         if( ierr != 0 )
02185         {
02186             return moab::MB_FAILURE;
02187         }
02188 
02189         // every sender computes the trivial partition, it is cheap, and we need to send it anyway
02190         // to each sender
02191         rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
02192 
02193         rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
02194     }
02195     else  // *method != 0, so it is either graph or geometric, parallel
02196     {
02197         // owned are the primary elements on this app
02198         rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
02199 
02200         // basically, send the graph to the receiver side, with unblocking send
02201         rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
02202     }
02203     // pco is needed to pack, not for communication
02204     rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
02205 
02206     // mark for deletion
02207     MPI_Group_free( &senderGroup );
02208     return moab::MB_SUCCESS;
02209 }
02210 
02211 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
02212 {
02213     assert( join != nullptr );
02214     assert( sendingGroup != nullptr );
02215     assert( scompid != nullptr );
02216 
02217     ErrorCode rval;
02218     appData& data          = context.appDatas[*pid];
02219     ParallelComm* pco      = context.pcomms[*pid];
02220     MPI_Comm receive       = pco->comm();
02221     EntityHandle local_set = data.file_set;
02222 
02223     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02224     MPI_Group sendGroup =
02225         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
02226 
02227     // first see what are the processors in each group; get the sender group too, from the sender
02228     // communicator
02229     MPI_Group receiverGroup;
02230     int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
02231 
02232     // instantiate the par comm graph
02233     ParCommGraph* cgraph =
02234         new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
02235     // TODO we should search if we have another pcomm with the same comp ids in the list already
02236     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02237     context.appDatas[*pid].pgraph[*scompid] = cgraph;
02238 
02239     int receiver_rank = -1;
02240     MPI_Comm_rank( receive, &receiver_rank );
02241 
02242     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
02243     // knows what data to expect
02244     std::vector< int > pack_array;
02245     rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
02246 
02247     // senders across for the current receiver
02248     int current_receiver = cgraph->receiver( receiver_rank );
02249 
02250     std::vector< int > senders_local;
02251     size_t n = 0;
02252 
02253     while( n < pack_array.size() )
02254     {
02255         if( current_receiver == pack_array[n] )
02256         {
02257             for( int j = 0; j < pack_array[n + 1]; j++ )
02258             {
02259                 senders_local.push_back( pack_array[n + 2 + j] );
02260             }
02261 
02262             break;
02263         }
02264 
02265         n = n + 2 + pack_array[n + 1];
02266     }
02267 
02268 #ifdef VERBOSE
02269     std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
02270               << senders_local.size() << " tasks: ";
02271 
02272     for( int k = 0; k < (int)senders_local.size(); k++ )
02273     {
02274         std::cout << " " << senders_local[k];
02275     }
02276 
02277     std::cout << "\n";
02278 #endif
02279 
02280     if( senders_local.empty() )
02281     {
02282         std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
02283     }
02284     rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
02285 
02286     // after we are done, we could merge vertices that come from different senders, but
02287     // have the same global id
02288     Tag idtag;
02289     rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
02290 
02291     //   data.point_cloud = false;
02292     Range local_ents;
02293     rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
02294 
02295     // do not do merge if point cloud
02296     if( !local_ents.all_of_type( MBVERTEX ) )
02297     {
02298         if( (int)senders_local.size() >= 2 )  // need to remove duplicate vertices
02299         // that might come from different senders
02300         {
02301 
02302             Range local_verts = local_ents.subset_by_type( MBVERTEX );
02303             Range local_elems = subtract( local_ents, local_verts );
02304 
02305             // remove from local set the vertices
02306             rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
02307 
02308 #ifdef VERBOSE
02309             std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
02310 #endif
02311             MergeMesh mm( context.MBI );
02312 
02313             rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
02314 
02315             Range new_verts;  // local elems are local entities without vertices
02316             rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
02317 
02318 #ifdef VERBOSE
02319             std::cout << "after merging: new verts: " << new_verts.size() << "\n";
02320 #endif
02321             rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
02322         }
02323     }
02324     else
02325         data.point_cloud = true;
02326 
02327     if( !data.point_cloud )
02328     {
02329         // still need to resolve shared entities (in this case, vertices )
02330         rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
02331     }
02332     else
02333     {
02334         // if partition tag exists, set it to current rank; just to make it visible in VisIt
02335         Tag densePartTag;
02336         rval = context.MBI->tag_get_handle( "partition", densePartTag );
02337         if( NULL != densePartTag && MB_SUCCESS == rval )
02338         {
02339             Range local_verts;
02340             rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
02341             std::vector< int > vals;
02342             int rank = pco->rank();
02343             vals.resize( local_verts.size(), rank );
02344             rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
02345         }
02346     }
02347     // set the parallel partition tag
02348     Tag part_tag;
02349     int dum_id = -1;
02350     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02351                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02352 
02353     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02354     {
02355         std::cout << " can't get par part tag.\n";
02356         return moab::MB_FAILURE;
02357     }
02358 
02359     int rank = pco->rank();
02360     rval     = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
02361 
02362     // populate the mesh with current data info
02363     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02364 
02365     // mark for deletion
02366     MPI_Group_free( &receiverGroup );
02367 
02368     return moab::MB_SUCCESS;
02369 }
02370 
02371 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02372 {
02373     appData& data                               = context.appDatas[*pid];
02374     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02375     if( mt == data.pgraph.end() )
02376     {
02377         return moab::MB_FAILURE;
02378     }
02379     ParCommGraph* cgraph = mt->second;
02380     ParallelComm* pco    = context.pcomms[*pid];
02381     Range owned          = data.owned_elems;
02382     ErrorCode rval;
02383     EntityHandle cover_set;
02384 
02385     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02386     if( data.point_cloud )
02387     {
02388         owned = data.local_verts;
02389     }
02390 
02391     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02392     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02393     // from ints remapper
02394 #ifdef MOAB_HAVE_TEMPESTREMAP
02395     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02396     {
02397         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02398         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02399         // should still have only quads ?
02400     }
02401 #else
02402     // now, in case we are sending from intx between ocn and atm, we involve coverage set
02403     // how do I know if this receiver already participated in an intersection driven by coupler?
02404     // also, what if this was the "source" mesh in intx?
02405     // in that case, the elements might have been instantiated in the coverage set locally, the
02406     // "owned" range can be different the elements are now in tempestRemap coverage_set
02407     cover_set = cgraph->get_cover_set();  // this will be non null only for intx app ?
02408 
02409     if( 0 != cover_set )
02410     {
02411         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02412     }
02413 #endif
02414 
02415     std::string tag_name( tag_storage_name );
02416 
02417     // basically, we assume everything is defined already on the tag,
02418     //   and we can get the tags just by its name
02419     // we assume that there are separators ":" between the tag names
02420     std::vector< std::string > tagNames;
02421     std::vector< Tag > tagHandles;
02422     std::string separator( ":" );
02423     split_tag_names( tag_name, separator, tagNames );
02424     for( size_t i = 0; i < tagNames.size(); i++ )
02425     {
02426         Tag tagHandle;
02427         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02428         if( MB_SUCCESS != rval || NULL == tagHandle )
02429         {
02430             return moab::MB_FAILURE;
02431         }
02432         tagHandles.push_back( tagHandle );
02433     }
02434 
02435     // pco is needed to pack, and for moab instance, not for communication!
02436     // still use nonblocking communication, over the joint comm
02437     rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02438     // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
02439 
02440     return moab::MB_SUCCESS;
02441 }
02442 
02443 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02444 {
02445     appData& data = context.appDatas[*pid];
02446 
02447     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02448     if( mt == data.pgraph.end() )
02449     {
02450         return moab::MB_FAILURE;
02451     }
02452     ParCommGraph* cgraph = mt->second;
02453 
02454     MPI_Comm global   = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02455     ParallelComm* pco = context.pcomms[*pid];
02456     Range owned       = data.owned_elems;
02457 
02458     // how do I know if this receiver already participated in an intersection driven by coupler?
02459     // also, what if this was the "source" mesh in intx?
02460     // in that case, the elements might have been instantiated in the coverage set locally, the
02461     // "owned" range can be different the elements are now in tempestRemap coverage_set
02462     EntityHandle cover_set = cgraph->get_cover_set();
02463     ErrorCode rval;
02464     if( 0 != cover_set )
02465     {
02466         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02467     }
02468     if( data.point_cloud )
02469     {
02470         owned = data.local_verts;
02471     }
02472     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02473     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02474     // from ints remapper
02475 #ifdef MOAB_HAVE_TEMPESTREMAP
02476     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02477     {
02478         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02479         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02480         // should still have only quads ?
02481     }
02482 #endif
02483     /*
02484      * data_intx.remapper exists though only on the intersection application
02485      *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate
02486      * mesh )
02487      */
02488 
02489     std::string tag_name( tag_storage_name );
02490 
02491     // we assume that there are separators ";" between the tag names
02492     std::vector< std::string > tagNames;
02493     std::vector< Tag > tagHandles;
02494     std::string separator( ":" );
02495     split_tag_names( tag_name, separator, tagNames );
02496     for( size_t i = 0; i < tagNames.size(); i++ )
02497     {
02498         Tag tagHandle;
02499         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02500         if( MB_SUCCESS != rval || NULL == tagHandle )
02501         {
02502             return moab::MB_FAILURE;
02503         }
02504         tagHandles.push_back( tagHandle );
02505     }
02506 
02507 #ifdef VERBOSE
02508     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
02509               << " and file set = " << ( data.file_set ) << "\n";
02510 #endif
02511     // pco is needed to pack, and for moab instance, not for communication!
02512     // still use nonblocking communication
02513     rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02514 
02515 #ifdef VERBOSE
02516     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
02517 #endif
02518 
02519     return moab::MB_SUCCESS;
02520 }
02521 
02522 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
02523 {
02524     // need first to find the pgraph that holds the information we need
02525     // this will be called on sender side only
02526     appData& data                               = context.appDatas[*pid];
02527     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02528     if( mt == data.pgraph.end() ) return moab::MB_FAILURE;  // error
02529 
02530     mt->second->release_send_buffers();
02531     return moab::MB_SUCCESS;
02532 }
02533 
02534 /**
02535 \brief compute a comm graph between 2 moab apps, based on ID matching
02536 <B>Operations:</B> Collective
02537 */
02538 //#define VERBOSE
02539 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
02540                                 iMOAB_AppID pid2,
02541                                 MPI_Comm* join,
02542                                 MPI_Group* group1,
02543                                 MPI_Group* group2,
02544                                 int* type1,
02545                                 int* type2,
02546                                 int* comp1,
02547                                 int* comp2 )
02548 {
02549     assert( join );
02550     assert( group1 );
02551     assert( group2 );
02552     ErrorCode rval = MB_SUCCESS;
02553     int localRank = 0, numProcs = 1;
02554 
02555     appData& sData = context.appDatas[*pid1];
02556     appData& tData = context.appDatas[*pid2];
02557 
02558     MPI_Comm global =
02559         ( ( sData.is_fortran || tData.is_fortran ) ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02560     MPI_Group srcGroup = ( sData.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
02561     MPI_Group tgtGroup = ( tData.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
02562 
02563     MPI_Comm_rank( global, &localRank );
02564     MPI_Comm_size( global, &numProcs );
02565     // instantiate the par comm graph
02566     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02567     // int coid2)
02568     ParCommGraph* cgraph = NULL;
02569     if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
02570     ParCommGraph* cgraph_rev = NULL;
02571     if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
02572     // we should search if we have another pcomm with the same comp ids in the list already
02573     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02574     if( *pid1 >= 0 ) sData.pgraph[*comp2] = cgraph;      // the context will be the other comp
02575     if( *pid2 >= 0 ) tData.pgraph[*comp1] = cgraph_rev;  // from 2 to 1
02576     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
02577     // model on the joint comm
02578     TupleList TLcomp1;
02579     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02580     TupleList TLcomp2;
02581     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02582     // will push_back a new tuple, if needed
02583 
02584     TLcomp1.enableWriteAccess();
02585 
02586     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
02587     Tag gdsTag;
02588     // find the values on first cell
02589     int lenTagType1 = 1;
02590     if( 1 == *type1 || 1 == *type2 )
02591     {
02592         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
02593         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
02594     }
02595     Tag tagType2 = context.MBI->globalId_tag();
02596 
02597     std::vector< int > valuesComp1;
02598     // populate first tuple
02599     if( *pid1 >= 0 )
02600     {
02601         appData& data1     = context.appDatas[*pid1];
02602         EntityHandle fset1 = data1.file_set;
02603         // in case of tempest remap, get the coverage set
02604 #ifdef MOAB_HAVE_TEMPESTREMAP
02605         if( data1.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02606         {
02607             fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02608             // should still have only quads ?
02609         }
02610 #endif
02611         Range ents_of_interest;
02612         if( *type1 == 1 )
02613         {
02614             assert( gdsTag );
02615             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02616             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
02617             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
02618         }
02619         else if( *type1 == 2 )
02620         {
02621             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02622             valuesComp1.resize( ents_of_interest.size() );
02623             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02624         }
02625         else if( *type1 == 3 )  // for FV meshes, just get the global id of cell
02626         {
02627             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
02628             valuesComp1.resize( ents_of_interest.size() );
02629             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02630         }
02631         else
02632         {
02633             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
02634         }
02635         // now fill the tuple list with info and markers
02636         // because we will send only the ids, order and compress the list
02637         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
02638         TLcomp1.resize( uniq.size() );
02639         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02640         {
02641             // to proc, marker, element local index, index in el
02642             int marker               = *sit;
02643             int to_proc              = marker % numProcs;
02644             int n                    = TLcomp1.get_n();
02645             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
02646             TLcomp1.vi_wr[2 * n + 1] = marker;
02647             TLcomp1.inc_n();
02648         }
02649     }
02650 
02651     ProcConfig pc( global );  // proc config does the crystal router
02652     pc.crystal_router()->gs_transfer( 1, TLcomp1,
02653                                       0 );  // communication towards joint tasks, with markers
02654     // sort by value (key 1)
02655 #ifdef VERBOSE
02656     std::stringstream ff1;
02657     ff1 << "TLcomp1_" << localRank << ".txt";
02658     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02659 #endif
02660     moab::TupleList::buffer sort_buffer;
02661     sort_buffer.buffer_init( TLcomp1.get_n() );
02662     TLcomp1.sort( 1, &sort_buffer );
02663     sort_buffer.reset();
02664 #ifdef VERBOSE
02665     // after sorting
02666     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02667 #endif
02668     // do the same, for the other component, number2, with type2
02669     // start copy
02670     TLcomp2.enableWriteAccess();
02671     // populate second tuple
02672     std::vector< int > valuesComp2;
02673     if( *pid2 >= 0 )
02674     {
02675         appData& data2     = context.appDatas[*pid2];
02676         EntityHandle fset2 = data2.file_set;
02677         // in case of tempest remap, get the coverage set
02678 #ifdef MOAB_HAVE_TEMPESTREMAP
02679         if( data2.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02680         {
02681             fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02682             // should still have only quads ?
02683         }
02684 #endif
02685 
02686         Range ents_of_interest;
02687         if( *type2 == 1 )
02688         {
02689             assert( gdsTag );
02690             rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02691             valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
02692             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
02693         }
02694         else if( *type2 == 2 )
02695         {
02696             rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02697             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02698             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02699         }
02700         else if( *type2 == 3 )
02701         {
02702             rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
02703             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02704             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02705         }
02706         else
02707         {
02708             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2
02709         }
02710         // now fill the tuple list with info and markers
02711         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
02712         TLcomp2.resize( uniq.size() );
02713         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02714         {
02715             // to proc, marker, element local index, index in el
02716             int marker               = *sit;
02717             int to_proc              = marker % numProcs;
02718             int n                    = TLcomp2.get_n();
02719             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
02720             TLcomp2.vi_wr[2 * n + 1] = marker;
02721             TLcomp2.inc_n();
02722         }
02723     }
02724     pc.crystal_router()->gs_transfer( 1, TLcomp2,
02725                                       0 );  // communication towards joint tasks, with markers
02726     // sort by value (key 1)
02727 #ifdef VERBOSE
02728     std::stringstream ff2;
02729     ff2 << "TLcomp2_" << localRank << ".txt";
02730     TLcomp2.print_to_file( ff2.str().c_str() );
02731 #endif
02732     sort_buffer.buffer_reserve( TLcomp2.get_n() );
02733     TLcomp2.sort( 1, &sort_buffer );
02734     sort_buffer.reset();
02735     // end copy
02736 #ifdef VERBOSE
02737     TLcomp2.print_to_file( ff2.str().c_str() );
02738 #endif
02739     // need to send back the info, from the rendezvous point, for each of the values
02740     /* so go over each value, on local process in joint communicator
02741 
02742     now have to send back the info needed for communication;
02743      loop in in sync over both TLComp1 and TLComp2, in local process;
02744       So, build new tuple lists, to send synchronous communication
02745       populate them at the same time, based on marker, that is indexed
02746     */
02747 
02748     TupleList TLBackToComp1;
02749     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
02750     TLBackToComp1.enableWriteAccess();
02751 
02752     TupleList TLBackToComp2;
02753     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
02754     TLBackToComp2.enableWriteAccess();
02755 
02756     int n1 = TLcomp1.get_n();
02757     int n2 = TLcomp2.get_n();
02758 
02759     int indexInTLComp1 = 0;
02760     int indexInTLComp2 = 0;  // advance both, according to the marker
02761     if( n1 > 0 && n2 > 0 )
02762     {
02763 
02764         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
02765         {
02766             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
02767             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
02768             if( currentValue1 < currentValue2 )
02769             {
02770                 // we have a big problem; basically, we are saying that
02771                 // dof currentValue is on one model and not on the other
02772                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
02773                 indexInTLComp1++;
02774                 continue;
02775             }
02776             if( currentValue1 > currentValue2 )
02777             {
02778                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
02779                 indexInTLComp2++;
02780                 continue;
02781             }
02782             int size1 = 1;
02783             int size2 = 1;
02784             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
02785                 size1++;
02786             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
02787                 size2++;
02788             // must be found in both lists, find the start and end indices
02789             for( int i1 = 0; i1 < size1; i1++ )
02790             {
02791                 for( int i2 = 0; i2 < size2; i2++ )
02792                 {
02793                     // send the info back to components
02794                     int n = TLBackToComp1.get_n();
02795                     TLBackToComp1.reserve();
02796                     TLBackToComp1.vi_wr[3 * n] =
02797                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
02798                                                                      // came from, info from comp2
02799                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02800                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
02801                     n                              = TLBackToComp2.get_n();
02802                     TLBackToComp2.reserve();
02803                     TLBackToComp2.vi_wr[3 * n] =
02804                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
02805                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02806                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
02807                     // what if there are repeated markers in TLcomp2? increase just index2
02808                 }
02809             }
02810             indexInTLComp1 += size1;
02811             indexInTLComp2 += size2;
02812         }
02813     }
02814     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
02815     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
02816 
02817     if( *pid1 >= 0 )
02818     {
02819         // we are on original comp 1 tasks
02820         // before ordering
02821         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02822         // processors it communicates with
02823 #ifdef VERBOSE
02824         std::stringstream f1;
02825         f1 << "TLBack1_" << localRank << ".txt";
02826         TLBackToComp1.print_to_file( f1.str().c_str() );
02827 #endif
02828         sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
02829         TLBackToComp1.sort( 1, &sort_buffer );
02830         sort_buffer.reset();
02831 #ifdef VERBOSE
02832         TLBackToComp1.print_to_file( f1.str().c_str() );
02833 #endif
02834         // so we are now on pid1, we know now each marker were it has to go
02835         // add a new method to ParCommGraph, to set up the involved_IDs_map
02836         cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
02837     }
02838     if( *pid2 >= 0 )
02839     {
02840         // we are on original comp 1 tasks
02841         // before ordering
02842         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02843         // processors it communicates with
02844 #ifdef VERBOSE
02845         std::stringstream f2;
02846         f2 << "TLBack2_" << localRank << ".txt";
02847         TLBackToComp2.print_to_file( f2.str().c_str() );
02848 #endif
02849         sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
02850         TLBackToComp2.sort( 2, &sort_buffer );
02851         sort_buffer.reset();
02852 #ifdef VERBOSE
02853         TLBackToComp2.print_to_file( f2.str().c_str() );
02854 #endif
02855         cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
02856         //
02857     }
02858     return moab::MB_SUCCESS;
02859 }
02860 
02861 //#undef VERBOSE
02862 
02863 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
02864 {
02865     appData& data     = context.appDatas[*pid];
02866     ParallelComm* pco = context.pcomms[*pid];
02867     // collapse vertices and transform cells into triangles/quads /polys
02868     // tags we care about: area, frac, global id
02869     std::vector< Tag > tagsList;
02870     Tag tag;
02871     ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
02872     if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE;  // fatal error, abort
02873     tagsList.push_back( tag );
02874     rval = context.MBI->tag_get_handle( "area", tag );
02875     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02876     rval = context.MBI->tag_get_handle( "frac", tag );
02877     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02878     double tol = 1.0e-9;
02879     rval       = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
02880 
02881     // clean material sets of cells that were deleted
02882     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
02883                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
02884 
02885     if( !data.mat_sets.empty() )
02886     {
02887         EntityHandle matSet = data.mat_sets[0];
02888         Range elems;
02889         rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
02890         rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
02891         // put back only cells from data.file_set
02892         elems.clear();
02893         rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
02894         rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
02895     }
02896     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02897 
02898     ParallelMergeMesh pmm( pco, tol );
02899     rval = pmm.merge( data.file_set,
02900                       /* do not do local merge*/ false,
02901                       /*  2d cells*/ 2 );MB_CHK_ERR( rval );
02902 
02903     // assign global ids only for vertices, cells have them fine
02904     rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
02905 
02906     // set the partition tag on the file set
02907     Tag part_tag;
02908     int dum_id = -1;
02909     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02910                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02911 
02912     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02913     {
02914         std::cout << " can't get par part tag.\n";
02915         return moab::MB_FAILURE;
02916     }
02917 
02918     int rank = pco->rank();
02919     rval     = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
02920 
02921     return moab::MB_SUCCESS;
02922 }
02923 
02924 #ifdef MOAB_HAVE_TEMPESTREMAP
02925 
02926 // this call must be collective on the joint communicator
02927 //  intersection tasks on coupler will need to send to the components tasks the list of
02928 // id elements that are relevant: they intersected some of the target elements (which are not needed
02929 // here)
02930 //  in the intersection
02931 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
02932                              iMOAB_AppID pid_src,
02933                              iMOAB_AppID pid_migr,
02934                              iMOAB_AppID pid_intx,
02935                              int* src_id,
02936                              int* migr_id,
02937                              int* context_id )
02938 {
02939     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
02940     // exchange
02941     ErrorCode rval;
02942     std::vector< int > srcSenders;
02943     std::vector< int > receivers;
02944     ParCommGraph* sendGraph = NULL;
02945     int ierr;
02946     int default_context_id  = -1;
02947     bool is_fortran_context = false;
02948 
02949     // First, find the original graph between PE sets
02950     // And based on this one, we will build the newly modified one for coverage
02951     if( *pid_src >= 0 )
02952     {
02953         default_context_id = *migr_id;  // the other one
02954         assert( context.appDatas[*pid_src].global_id == *src_id );
02955         is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
02956         sendGraph          = context.appDatas[*pid_src].pgraph[default_context_id];  // maybe check if it does not exist
02957 
02958         // report the sender and receiver tasks in the joint comm
02959         srcSenders = sendGraph->senders();
02960         receivers  = sendGraph->receivers();
02961 #ifdef VERBOSE
02962         std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
02963 #endif
02964     }
02965 
02966     ParCommGraph* recvGraph = NULL;  // will be non null on receiver tasks (intx tasks)
02967     if( *pid_migr >= 0 )
02968     {
02969         is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
02970         // find the original one
02971         default_context_id = *src_id;
02972         assert( context.appDatas[*pid_migr].global_id == *migr_id );
02973         recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
02974         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
02975         srcSenders = recvGraph->senders();
02976         receivers  = recvGraph->receivers();
02977 #ifdef VERBOSE
02978         std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
02979 #endif
02980     }
02981 
02982     if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
02983 
02984     // loop over pid_intx elements, to see what original processors in joint comm have sent the
02985     // coverage mesh;
02986     // If we are on intx tasks, send coverage info towards original component tasks,
02987     // about needed cells
02988     TupleList TLcovIDs;
02989     TLcovIDs.initialize( 2, 0, 0, 0, 0 );  // to proc, GLOBAL ID; estimate about 100 IDs to be sent
02990     // will push_back a new tuple, if needed
02991     TLcovIDs.enableWriteAccess();
02992     // the crystal router will send ID cell to the original source, on the component task
02993     // if we are on intx tasks, loop over all intx elements and
02994 
02995     MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02996     int currentRankInJointComm = -1;
02997     ierr                       = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
02998 
02999     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
03000     // need to send information towards component tasks
03001     if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
03002         receivers.end() )  // we are on receivers tasks, we can request intx info
03003     {
03004         // find the pcomm for the intx pid
03005         if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
03006 
03007         appData& dataIntx = context.appDatas[*pid_intx];
03008         Tag parentTag, orgSendProcTag;
03009 
03010         rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval );                        // global id of the blue, source element
03011         if( !parentTag ) return moab::MB_FAILURE;  // fatal error, abort
03012 
03013         rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
03014         if( !orgSendProcTag ) return moab::MB_FAILURE;  // fatal error, abort
03015 
03016         // find the file set, red parents for intx cells, and put them in tuples
03017         EntityHandle intxSet = dataIntx.file_set;
03018         Range cells;
03019         // get all entities from the set, and look at their RedParent
03020         rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
03021 
03022         std::map< int, std::set< int > > idsFromProcs;  // send that info back to enhance parCommGraph cache
03023         for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03024         {
03025             EntityHandle intx_cell = *it;
03026             int gidCell, origProc;  // look at receivers
03027 
03028             rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
03029             rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
03030             // we have augmented the overlap set with ghost cells ; in that case, the
03031             // orig_sending_processor is not set so it will be -1;
03032             if( origProc < 0 ) continue;
03033 
03034             std::set< int >& setInts = idsFromProcs[origProc];
03035             setInts.insert( gidCell );
03036         }
03037 
03038         // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
03039         // from coverage set
03040         if( cells.empty() )
03041         {
03042             // get coverage set
03043             assert( *pid_intx >= 0 );
03044             appData& dataIntx      = context.appDatas[*pid_intx];
03045             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03046 
03047             // get all cells from coverage set
03048             Tag gidTag;
03049             rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
03050             rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
03051             // look at their orig_sending_processor
03052             for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03053             {
03054                 EntityHandle covCell = *it;
03055                 int gidCell, origProc;  // look at o
03056 
03057                 rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
03058                 rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
03059                 // we have augmented the overlap set with ghost cells ; in that case, the
03060                 // orig_sending_processor is not set so it will be -1;
03061                 if( origProc < 0 )  // it cannot < 0, I think
03062                     continue;
03063                 std::set< int >& setInts = idsFromProcs[origProc];
03064                 setInts.insert( gidCell );
03065             }
03066         }
03067 
03068 #ifdef VERBOSE
03069         std::ofstream dbfile;
03070         std::stringstream outf;
03071         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
03072         dbfile.open( outf.str().c_str() );
03073         dbfile << "Writing this to a file.\n";
03074 
03075         dbfile << " map size:" << idsFromProcs.size()
03076                << std::endl;  // on the receiver side, these show how much data to receive
03077         // from the sender (how many ids, and how much tag data later; we need to size up the
03078         // receiver buffer) arrange in tuples , use map iterators to send the ids
03079         for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
03080         {
03081             std::set< int >& setIds = mt->second;
03082             dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
03083             int counter = 0;
03084             for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
03085             {
03086                 int valueID = *st;
03087                 dbfile << " " << valueID;
03088                 counter++;
03089                 if( counter % 10 == 0 ) dbfile << "\n";
03090             }
03091             dbfile << "\n";
03092         }
03093         dbfile.close();
03094 #endif
03095         if( NULL != recvGraph )
03096         {
03097             ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph );  // just copy
03098             recvGraph1->set_context_id( *context_id );
03099             recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
03100             // this par comm graph will need to use the coverage set
03101             // so we are for sure on intx pes (the receiver is the coupler mesh)
03102             assert( *pid_intx >= 0 );
03103             appData& dataIntx      = context.appDatas[*pid_intx];
03104             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03105             recvGraph1->set_cover_set( cover_set );
03106             context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1;
03107         }
03108         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
03109         {
03110             int procToSendTo       = mit->first;
03111             std::set< int >& idSet = mit->second;
03112             for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
03113             {
03114                 int n = TLcovIDs.get_n();
03115                 TLcovIDs.reserve();
03116                 TLcovIDs.vi_wr[2 * n]     = procToSendTo;  // send to processor
03117                 TLcovIDs.vi_wr[2 * n + 1] = *sit;          // global id needs index in the local_verts range
03118             }
03119         }
03120     }
03121 
03122     ProcConfig pc( global );  // proc config does the crystal router
03123     pc.crystal_router()->gs_transfer( 1, TLcovIDs,
03124                                       0 );  // communication towards component tasks, with what ids are needed
03125     // for each task from receiver
03126 
03127     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
03128     if( NULL != sendGraph )
03129     {
03130         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
03131         // receiver task
03132         ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph );  // just copy
03133         sendGraph1->set_context_id( *context_id );
03134         context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
03135         rval                                           = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
03136     }
03137     return moab::MB_SUCCESS;  // success
03138 }
03139 
03140 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
03141 {
03142     assert( prefix && strlen( prefix ) );
03143 
03144     ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
03145     std::string prefix_str( prefix );
03146 
03147     if( NULL != cgraph )
03148         cgraph->dump_comm_information( prefix_str, *is_sender );
03149     else
03150     {
03151         std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
03152                   << " context: " << *context_id << "\n";
03153     }
03154     return moab::MB_SUCCESS;
03155 }
03156 
03157 #endif  // #ifdef MOAB_HAVE_TEMPESTREMAP
03158 
03159 #endif  // #ifdef MOAB_HAVE_MPI
03160 
03161 #ifdef MOAB_HAVE_TEMPESTREMAP
03162 
03163 #ifdef MOAB_HAVE_NETCDF
03164 ErrCode iMOAB_LoadMappingWeightsFromFile(
03165     iMOAB_AppID pid_intersection,
03166     iMOAB_AppID pid_cpl,
03167     int* col_or_row,
03168     int* type,
03169     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03170     const iMOAB_String remap_weights_filename )
03171 {
03172     ErrorCode rval;
03173     bool row_based_partition = true;
03174     if( *col_or_row == 1 ) row_based_partition = false;  // do a column based partition;
03175 
03176     // get the local degrees of freedom, from the pid_cpl and type of mesh
03177 
03178     // Get the source and target data and pcomm objects
03179     appData& data_intx       = context.appDatas[*pid_intersection];
03180     TempestMapAppData& tdata = data_intx.tempestData;
03181 
03182     // Get the handle to the remapper object
03183     if( tdata.remapper == NULL )
03184     {
03185         // Now allocate and initialize the remapper object
03186 #ifdef MOAB_HAVE_MPI
03187         ParallelComm* pco = context.pcomms[*pid_intersection];
03188         tdata.remapper    = new moab::TempestRemapper( context.MBI, pco );
03189 #else
03190         tdata.remapper = new moab::TempestRemapper( context.MBI );
03191 #endif
03192         tdata.remapper->meshValidate     = true;
03193         tdata.remapper->constructEdgeMap = true;
03194         // Do not create new filesets; Use the sets from our respective applications
03195         tdata.remapper->initialize( false );
03196         tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03197     }
03198 
03199     // Setup loading of weights onto TempestOnlineMap
03200     // Set the context for the remapping weights computation
03201     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03202 
03203     // Now allocate and initialize the remapper object
03204     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03205     assert( weightMap != NULL );
03206 
03207     if( *pid_cpl >= 0 )  // it means we are looking for how to distribute the degrees of freedom, new map reader
03208     {
03209         appData& data1     = context.appDatas[*pid_cpl];
03210         EntityHandle fset1 = data1.file_set;  // this is source or target, depending on direction
03211 
03212         // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03213         Tag gdsTag;
03214 
03215         // find the values on first cell
03216         int lenTagType1 = 1;
03217         if( *type == 1 )
03218         {
03219             rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03220             rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03221         }
03222         Tag tagType2 = context.MBI->globalId_tag();
03223 
03224         std::vector< int > dofValues;
03225 
03226         // populate first tuple
03227         Range
03228             ents_of_interest;  // will be filled with entities on coupler, from which we will get the DOFs, based on type
03229         int ndofPerEl = 1;
03230 
03231         if( *type == 1 )
03232         {
03233             assert( gdsTag );
03234             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03235             dofValues.resize( ents_of_interest.size() * lenTagType1 );
03236             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
03237             ndofPerEl = lenTagType1;
03238         }
03239         else if( *type == 2 )
03240         {
03241             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03242             dofValues.resize( ents_of_interest.size() );
03243             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03244         }
03245         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03246         {
03247             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03248             dofValues.resize( ents_of_interest.size() );
03249             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03250         }
03251         else
03252         {
03253             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03254         }
03255         // pass ordered dofs, and unique
03256         std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
03257 
03258         std::sort( orderDofs.begin(), orderDofs.end() );
03259         orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() );  // remove duplicates
03260 
03261         rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
03262 
03263         // if we are on target mesh (row based partition)
03264         if( row_based_partition )
03265         {
03266             tdata.pid_dest = pid_cpl;
03267             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, ents_of_interest );
03268             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03269             weightMap->set_row_dc_dofs( dofValues );  // will set row_dtoc_dofmap
03270         }
03271         else
03272         {
03273             tdata.pid_src = pid_cpl;
03274             tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, ents_of_interest );
03275             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03276             weightMap->set_col_dc_dofs( dofValues );  // will set col_dtoc_dofmap
03277         }
03278     }
03279     else  // old reader, trivial distribution by row
03280     {
03281         std::vector< int > tmp_owned_ids;  // this will do a trivial row distribution
03282         rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
03283     }
03284 
03285     return moab::MB_SUCCESS;
03286 }
03287 
03288 ErrCode iMOAB_WriteMappingWeightsToFile(
03289     iMOAB_AppID pid_intersection,
03290     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03291     const iMOAB_String remap_weights_filename )
03292 {
03293     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03294     assert( remap_weights_filename && strlen( remap_weights_filename ) );
03295 
03296     ErrorCode rval;
03297 
03298     // Get the source and target data and pcomm objects
03299     appData& data_intx       = context.appDatas[*pid_intersection];
03300     TempestMapAppData& tdata = data_intx.tempestData;
03301 
03302     // Get the handle to the remapper object
03303     assert( tdata.remapper != NULL );
03304 
03305     // Now get online weights object and ensure it is valid
03306     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03307     assert( weightMap != NULL );
03308 
03309     std::string filename = std::string( remap_weights_filename );
03310 
03311     // Write the map file to disk in parallel using either HDF5 or SCRIP interface
03312     rval = weightMap->WriteParallelMap( filename );MB_CHK_ERR( rval );
03313 
03314     return moab::MB_SUCCESS;
03315 }
03316 
03317 #ifdef MOAB_HAVE_MPI
03318 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
03319                               iMOAB_AppID pid2,
03320                               iMOAB_AppID pid3,
03321                               MPI_Comm* jointcomm,
03322                               MPI_Group* groupA,
03323                               MPI_Group* groupB,
03324                               int* type,
03325                               int* comp1,
03326                               int* comp2,
03327                               int* direction )
03328 {
03329     assert( jointcomm );
03330     assert( groupA );
03331     assert( groupB );
03332 
03333     MPI_Comm joint_communicator =
03334         ( context.appDatas[*pid1].is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) )
03335                                              : *jointcomm );
03336     MPI_Group group_first =
03337         ( context.appDatas[*pid1].is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
03338     MPI_Group group_second =
03339         ( context.appDatas[*pid1].is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
03340 
03341     ErrorCode rval = MB_SUCCESS;
03342     int localRank = 0, numProcs = 1;
03343 
03344     // Get the local rank and number of processes in the joint communicator
03345     MPI_Comm_rank( joint_communicator, &localRank );
03346     MPI_Comm_size( joint_communicator, &numProcs );
03347 
03348     // Next instantiate the par comm graph
03349     // here we need to look at direction
03350     // this direction is good for atm source -> ocn target coupler example
03351     ParCommGraph* cgraph = NULL;
03352     if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
03353 
03354     ParCommGraph* cgraph_rev = NULL;
03355     if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
03356 
03357     // we should search if we have another pcomm with the same comp ids in the list already
03358     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
03359     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
03360     if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
03361 
03362     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
03363     // model on the joint_communicator
03364     TupleList TLcomp1;
03365     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03366     TupleList TLcomp2;
03367     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03368 
03369     // will push_back a new tuple, if needed
03370     TLcomp1.enableWriteAccess();
03371 
03372     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03373     Tag gdsTag;
03374 
03375     // find the values on first cell
03376     int lenTagType1 = 1;
03377     if( *type == 1 )
03378     {
03379         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03380         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03381     }
03382     Tag tagType2 = context.MBI->globalId_tag();
03383 
03384     std::vector< int > valuesComp1;
03385 
03386     // populate first tuple
03387     Range ents_of_interest;  // will be filled with entities on pid1, that need to be distributed,
03388                              // rearranged in split_ranges map
03389     if( *pid1 >= 0 )
03390     {
03391         appData& data1     = context.appDatas[*pid1];
03392         EntityHandle fset1 = data1.file_set;
03393 
03394         if( *type == 1 )
03395         {
03396             assert( gdsTag );
03397             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03398             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
03399             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
03400         }
03401         else if( *type == 2 )
03402         {
03403             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03404             valuesComp1.resize( ents_of_interest.size() );
03405             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03406         }
03407         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03408         {
03409             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03410             valuesComp1.resize( ents_of_interest.size() );
03411             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03412         }
03413         else
03414         {
03415             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03416         }
03417         // now fill the tuple list with info and markers
03418         // because we will send only the ids, order and compress the list
03419         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
03420         TLcomp1.resize( uniq.size() );
03421         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03422         {
03423             // to proc, marker, element local index, index in el
03424             int marker               = *sit;
03425             int to_proc              = marker % numProcs;
03426             int n                    = TLcomp1.get_n();
03427             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
03428             TLcomp1.vi_wr[2 * n + 1] = marker;
03429             TLcomp1.inc_n();
03430         }
03431     }
03432 
03433     ProcConfig pc( joint_communicator );  // proc config does the crystal router
03434     pc.crystal_router()->gs_transfer( 1, TLcomp1,
03435                                       0 );  // communication towards joint tasks, with markers
03436     // sort by value (key 1)
03437 #ifdef VERBOSE
03438     std::stringstream ff1;
03439     ff1 << "TLcomp1_" << localRank << ".txt";
03440     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03441 #endif
03442     moab::TupleList::buffer sort_buffer;
03443     sort_buffer.buffer_init( TLcomp1.get_n() );
03444     TLcomp1.sort( 1, &sort_buffer );
03445     sort_buffer.reset();
03446 #ifdef VERBOSE
03447     // after sorting
03448     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03449 #endif
03450     // do the same, for the other component, number2, with type2
03451     // start copy
03452     TLcomp2.enableWriteAccess();
03453 
03454     moab::TempestOnlineMap* weightMap = NULL;  // declare it outside, but it will make sense only for *pid >= 0
03455     // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
03456     // populate second tuple with ids  from read map: we need row_gdofmap and col_gdofmap
03457     std::vector< int > valuesComp2;
03458     if( *pid2 >= 0 )  // we are now on coupler, map side
03459     {
03460         appData& data2           = context.appDatas[*pid2];
03461         TempestMapAppData& tdata = data2.tempestData;
03462         // should be only one map, read from file
03463         assert( tdata.weightMaps.size() == 1 );
03464         // maybe we need to check it is the map we expect
03465         weightMap = tdata.weightMaps.begin()->second;
03466         // std::vector<int> ids_of_interest;
03467         // do a deep copy of the ids of interest: row ids or col ids, target or source direction
03468         if( *direction == 1 )
03469         {
03470             // we are interested in col ids, source
03471             // new method from moab::TempestOnlineMap
03472             rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
03473         }
03474         else if( *direction == 2 )
03475         {
03476             // we are interested in row ids, for target ids
03477             rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
03478         }
03479         //
03480 
03481         // now fill the tuple list with info and markers
03482         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
03483         TLcomp2.resize( uniq.size() );
03484         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03485         {
03486             // to proc, marker, element local index, index in el
03487             int marker               = *sit;
03488             int to_proc              = marker % numProcs;
03489             int n                    = TLcomp2.get_n();
03490             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03491             TLcomp2.vi_wr[2 * n + 1] = marker;
03492             TLcomp2.inc_n();
03493         }
03494     }
03495     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03496                                       0 );  // communication towards joint tasks, with markers
03497     // sort by value (key 1)
03498     // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
03499 #ifdef VERBOSE
03500     std::stringstream ff2;
03501     ff2 << "TLcomp2_" << localRank << ".txt";
03502     TLcomp2.print_to_file( ff2.str().c_str() );
03503 #endif
03504     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03505     TLcomp2.sort( 1, &sort_buffer );
03506     sort_buffer.reset();
03507     // end copy
03508 #ifdef VERBOSE
03509     TLcomp2.print_to_file( ff2.str().c_str() );
03510 #endif
03511     // need to send back the info, from the rendezvous point, for each of the values
03512     /* so go over each value, on local process in joint communicator,
03513 
03514     now have to send back the info needed for communication;
03515      loop in in sync over both TLComp1 and TLComp2, in local process;
03516       So, build new tuple lists, to send synchronous communication
03517       populate them at the same time, based on marker, that is indexed
03518     */
03519 
03520     TupleList TLBackToComp1;
03521     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03522     TLBackToComp1.enableWriteAccess();
03523 
03524     TupleList TLBackToComp2;
03525     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
03526     TLBackToComp2.enableWriteAccess();
03527 
03528     int n1 = TLcomp1.get_n();
03529     int n2 = TLcomp2.get_n();
03530 
03531     int indexInTLComp1 = 0;
03532     int indexInTLComp2 = 0;  // advance both, according to the marker
03533     if( n1 > 0 && n2 > 0 )
03534     {
03535 
03536         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
03537         {
03538             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
03539             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
03540             if( currentValue1 < currentValue2 )
03541             {
03542                 // we have a big problem; basically, we are saying that
03543                 // dof currentValue is on one model and not on the other
03544                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
03545                 indexInTLComp1++;
03546                 continue;
03547             }
03548             if( currentValue1 > currentValue2 )
03549             {
03550                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
03551                 indexInTLComp2++;
03552                 continue;
03553             }
03554             int size1 = 1;
03555             int size2 = 1;
03556             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
03557                 size1++;
03558             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
03559                 size2++;
03560             // must be found in both lists, find the start and end indices
03561             for( int i1 = 0; i1 < size1; i1++ )
03562             {
03563                 for( int i2 = 0; i2 < size2; i2++ )
03564                 {
03565                     // send the info back to components
03566                     int n = TLBackToComp1.get_n();
03567                     TLBackToComp1.reserve();
03568                     TLBackToComp1.vi_wr[3 * n] =
03569                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
03570                                                                      // came from, info from comp2
03571                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03572                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
03573                     n                              = TLBackToComp2.get_n();
03574                     TLBackToComp2.reserve();
03575                     TLBackToComp2.vi_wr[3 * n] =
03576                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
03577                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03578                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
03579                     // what if there are repeated markers in TLcomp2? increase just index2
03580                 }
03581             }
03582             indexInTLComp1 += size1;
03583             indexInTLComp2 += size2;
03584         }
03585     }
03586     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
03587     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
03588 
03589     TupleList TLv;  // vertices
03590     TupleList TLc;  // cells if needed (not type 2)
03591 
03592     if( *pid1 >= 0 )
03593     {
03594         // we are on original comp 1 tasks
03595         // before ordering
03596         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03597         // processors it communicates with
03598 #ifdef VERBOSE
03599         std::stringstream f1;
03600         f1 << "TLBack1_" << localRank << ".txt";
03601         TLBackToComp1.print_to_file( f1.str().c_str() );
03602 #endif
03603         // so we are now on pid1, we know now each marker were it has to go
03604         // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
03605         rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
03606         // we can just send vertices and elements, with crystal routers;
03607         // on the receiving end, make sure they are not duplicated, by looking at the global id
03608         // if *type is 1, also send global_dofs tag in the element tuple
03609         rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
03610     }
03611     pc.crystal_router()->gs_transfer( 1, TLv, 0 );  // communication towards coupler tasks, with mesh vertices
03612     if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );  // those are cells
03613 
03614     if( *pid3 >= 0 )  // will receive the mesh, on coupler pes!
03615     {
03616         appData& data3     = context.appDatas[*pid3];
03617         EntityHandle fset3 = data3.file_set;
03618         Range primary_ents3;                 // vertices for type 2, cells of dim 2 for type 1 or 3
03619         std::vector< int > values_entities;  // will be the size of primary_ents3 * lenTagType1
03620         rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
03621                                                   values_entities );MB_CHK_ERR( rval );
03622         iMOAB_UpdateMeshInfo( pid3 );
03623         int ndofPerEl = 1;
03624         if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
03625         // because we are on the coupler, we know that the read map pid2 exists
03626         assert( *pid2 >= 0 );
03627         appData& dataIntx        = context.appDatas[*pid2];
03628         TempestMapAppData& tdata = dataIntx.tempestData;
03629 
03630         // if we are on source coverage, direction 1, we can set covering mesh, covering cells
03631         if( 1 == *direction )
03632         {
03633             tdata.pid_src = pid3;
03634             tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, primary_ents3 );
03635             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03636             weightMap->set_col_dc_dofs( values_entities );  // will set col_dtoc_dofmap
03637         }
03638         // if we are on target, we can set the target cells
03639         else
03640         {
03641             tdata.pid_dest = pid3;
03642             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, primary_ents3 );
03643             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03644             weightMap->set_row_dc_dofs( values_entities );  // will set row_dtoc_dofmap
03645         }
03646     }
03647 
03648     return moab::MB_SUCCESS;
03649 }
03650 
03651 #endif  // #ifdef MOAB_HAVE_MPI
03652 
03653 #endif  // #ifdef MOAB_HAVE_NETCDF
03654 
03655 #define USE_API
03656 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
03657 {
03658     ErrorCode rval;
03659     moab::CartVect pos;
03660 
03661     Range& verts                   = context.appDatas[*pid].all_verts;
03662     moab::EntityHandle firstVertex = ( verts[0] );
03663 
03664     // coordinate data
03665     rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
03666 
03667     // compute the distance from origin
03668     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
03669     *radius = pos.length();
03670     return moab::MB_SUCCESS;
03671 }
03672 
03673 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03674 {
03675     ErrorCode rval;
03676     ErrCode ierr;
03677     bool validate = true;
03678 
03679     double radius_source = 1.0;
03680     double radius_target = 1.0;
03681     const double epsrel  = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
03682     const double boxeps  = 1.e-6;
03683 
03684     // Get the source and target data and pcomm objects
03685     appData& data_src  = context.appDatas[*pid_src];
03686     appData& data_tgt  = context.appDatas[*pid_tgt];
03687     appData& data_intx = context.appDatas[*pid_intx];
03688 #ifdef MOAB_HAVE_MPI
03689     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03690 #endif
03691 
03692     // Mesh intersection has already been computed; Return early.
03693     TempestMapAppData& tdata = data_intx.tempestData;
03694     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
03695 
03696     bool is_parallel = false, is_root = true;
03697     int rank = 0;
03698 #ifdef MOAB_HAVE_MPI
03699     if( pco_intx )
03700     {
03701         rank        = pco_intx->rank();
03702         is_parallel = ( pco_intx->size() > 1 );
03703         is_root     = ( rank == 0 );
03704         rval        = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
03705     }
03706 #endif
03707 
03708     moab::DebugOutput outputFormatter( std::cout, rank, 0 );
03709     outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
03710 
03711     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
03712     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
03713 
03714     // Rescale the radius of both to compute the intersection
03715     ComputeSphereRadius( pid_src, &radius_source );
03716     ComputeSphereRadius( pid_tgt, &radius_target );
03717 #ifdef VERBOSE
03718     if( is_root )
03719         outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
03720                                 radius_target );
03721 #endif
03722 
03723     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
03724      * unscale later. */
03725     bool defaultradius = 1.0;
03726     if( fabs( radius_source - radius_target ) > 1e-10 )
03727     { /* the radii are different */
03728         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
03729         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
03730     }
03731 
03732     // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
03733 #ifdef MOAB_HAVE_TEMPESTREMAP
03734     IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
03735 #else
03736     IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
03737 #endif
03738 
03739     // print verbosely about the problem setting
03740     bool use_kdtree_search = false;
03741     double srctgt_areas[2], srctgt_areas_glb[2];
03742     {
03743         moab::Range rintxverts, rintxelems;
03744         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
03745         rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
03746         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
03747         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
03748         srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
03749 #ifdef VERBOSE
03750         if( is_root )
03751             outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
03752                                     rintxelems.size() );
03753 #endif
03754 
03755         moab::Range bintxverts, bintxelems;
03756         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
03757         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
03758         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
03759         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
03760         srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
03761 #ifdef VERBOSE
03762         if( is_root )
03763             outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
03764                                     bintxelems.size() );
03765 #endif
03766 #ifdef MOAB_HAVE_MPI
03767         MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
03768 #else
03769         srctgt_areas_glb[0] = srctgt_areas[0];
03770         srctgt_areas_glb[1] = srctgt_areas[1];
03771 #endif
03772         use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
03773     }
03774 
03775     data_intx.dimension = data_tgt.dimension;
03776     // set the context for the source and destination applications
03777     tdata.pid_src  = pid_src;
03778     tdata.pid_dest = pid_tgt;
03779 
03780     // Now allocate and initialize the remapper object
03781 #ifdef MOAB_HAVE_MPI
03782     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03783 #else
03784     tdata.remapper = new moab::TempestRemapper( context.MBI );
03785 #endif
03786     tdata.remapper->meshValidate     = true;
03787     tdata.remapper->constructEdgeMap = true;
03788 
03789     // Do not create new filesets; Use the sets from our respective applications
03790     tdata.remapper->initialize( false );
03791     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03792     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03793     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03794 
03795     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
03796     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
03797 
03798     // First, compute the covering source set.
03799     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
03800 
03801     // Next, compute intersections with MOAB.
03802     rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
03803 
03804     // Mapping computation done
03805     if( validate )
03806     {
03807         double local_area,
03808             global_areas[3];  // Array for Initial area, and through Method 1 and Method 2
03809         local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
03810 
03811         global_areas[0] = srctgt_areas_glb[0];
03812         global_areas[1] = srctgt_areas_glb[1];
03813 
03814 #ifdef MOAB_HAVE_MPI
03815         if( is_parallel )
03816         {
03817             MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
03818         }
03819         else
03820         {
03821             global_areas[2] = local_area;
03822         }
03823 #else
03824         global_areas[2] = local_area;
03825 #endif
03826         if( is_root )
03827         {
03828             outputFormatter.printf( 0,
03829                                     "initial area: source mesh = %12.14f, target mesh = %12.14f, "
03830                                     "overlap mesh = %12.14f\n",
03831                                     global_areas[0], global_areas[1], global_areas[2] );
03832             outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
03833                                     fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
03834                                     fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
03835         }
03836     }
03837 
03838     return moab::MB_SUCCESS;
03839 }
03840 
03841 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03842 {
03843     ErrorCode rval;
03844     ErrCode ierr;
03845 
03846     double radius_source = 1.0;
03847     double radius_target = 1.0;
03848     const double epsrel  = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
03849     const double boxeps  = 1.e-8;
03850 
03851     // Get the source and target data and pcomm objects
03852     appData& data_src  = context.appDatas[*pid_src];
03853     appData& data_tgt  = context.appDatas[*pid_tgt];
03854     appData& data_intx = context.appDatas[*pid_intx];
03855 #ifdef MOAB_HAVE_MPI
03856     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03857 #endif
03858 
03859     // Mesh intersection has already been computed; Return early.
03860     TempestMapAppData& tdata = data_intx.tempestData;
03861     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
03862 
03863 #ifdef MOAB_HAVE_MPI
03864     if( pco_intx )
03865     {
03866         rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
03867     }
03868 #endif
03869 
03870     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
03871     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
03872 
03873     // Rescale the radius of both to compute the intersection
03874     ComputeSphereRadius( pid_src, &radius_source );
03875     ComputeSphereRadius( pid_tgt, &radius_target );
03876 
03877     IntxAreaUtils areaAdaptor;
03878     // print verbosely about the problem setting
03879     {
03880         moab::Range rintxverts, rintxelems;
03881         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
03882         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
03883         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
03884         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
03885 #ifdef VERBOSE
03886         std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
03887                   << " elements \n";
03888 #endif
03889 
03890         moab::Range bintxverts, bintxelems;
03891         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
03892         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
03893         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
03894         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
03895 #ifdef VERBOSE
03896         std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
03897                   << " elements \n";
03898 #endif
03899     }
03900 
03901     data_intx.dimension = data_tgt.dimension;
03902     // set the context for the source and destination applications
03903     // set the context for the source and destination applications
03904     tdata.pid_src         = pid_src;
03905     tdata.pid_dest        = pid_tgt;
03906     data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
03907     assert( data_intx.point_cloud == true );
03908 
03909     // Now allocate and initialize the remapper object
03910 #ifdef MOAB_HAVE_MPI
03911     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03912 #else
03913     tdata.remapper = new moab::TempestRemapper( context.MBI );
03914 #endif
03915     tdata.remapper->meshValidate     = true;
03916     tdata.remapper->constructEdgeMap = true;
03917 
03918     // Do not create new filesets; Use the sets from our respective applications
03919     tdata.remapper->initialize( false );
03920     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03921     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03922     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03923 
03924     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
03925      * unscale later. */
03926     if( fabs( radius_source - radius_target ) > 1e-10 )
03927     { /* the radii are different */
03928         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
03929         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
03930     }
03931 
03932     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
03933     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
03934 
03935     // First, compute the covering source set.
03936     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
03937 
03938 #ifdef MOAB_HAVE_MPI
03939     /* VSM: This context should be set on the data_src but it would overwrite the source
03940        covering set context in case it is coupled to another APP as well.
03941        This needs a redesign. */
03942     // data_intx.covering_set = tdata.remapper->GetCoveringSet();
03943     // data_src.covering_set = tdata.remapper->GetCoveringSet();
03944 #endif
03945 
03946     // Now let us re-convert the MOAB mesh back to Tempest representation
03947     rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
03948 
03949     return moab::MB_SUCCESS;
03950 }
03951 
03952 ErrCode iMOAB_ComputeScalarProjectionWeights(
03953     iMOAB_AppID pid_intx,
03954     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03955     const iMOAB_String disc_method_source,
03956     int* disc_order_source,
03957     const iMOAB_String disc_method_target,
03958     int* disc_order_target,
03959     int* fNoBubble,
03960     int* fMonotoneTypeID,
03961     int* fVolumetric,
03962     int* fInverseDistanceMap,
03963     int* fNoConservation,
03964     int* fValidate,
03965     const iMOAB_String source_solution_tag_dof_name,
03966     const iMOAB_String target_solution_tag_dof_name )
03967 {
03968     moab::ErrorCode rval;
03969 
03970     assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
03971     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03972     assert( disc_method_source && strlen( disc_method_source ) );
03973     assert( disc_method_target && strlen( disc_method_target ) );
03974     assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
03975     assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
03976 
03977     // Get the source and target data and pcomm objects
03978     appData& data_intx       = context.appDatas[*pid_intx];
03979     TempestMapAppData& tdata = data_intx.tempestData;
03980 
03981     // Setup computation of weights
03982     // Set the context for the remapping weights computation
03983     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03984 
03985     // Call to generate the remap weights with the tempest meshes
03986     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03987     assert( weightMap != NULL );
03988 
03989     GenerateOfflineMapAlgorithmOptions mapOptions;
03990     mapOptions.nPin           = *disc_order_source;
03991     mapOptions.nPout          = *disc_order_target;
03992     mapOptions.fSourceConcave = false;
03993     mapOptions.fTargetConcave = false;
03994 
03995     mapOptions.strMethod = "";
03996     if( fMonotoneTypeID )
03997     {
03998         switch( *fMonotoneTypeID )
03999         {
04000             case 0:
04001                 mapOptions.fMonotone = false;
04002                 break;
04003             case 3:
04004                 mapOptions.strMethod += "mono3;";
04005                 break;
04006             case 2:
04007                 mapOptions.strMethod += "mono2;";
04008                 break;
04009             default:
04010                 mapOptions.fMonotone = true;
04011         }
04012     }
04013     else
04014         mapOptions.fMonotone = false;
04015 
04016     mapOptions.fNoBubble       = ( fNoBubble ? *fNoBubble : false );
04017     mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
04018     mapOptions.fNoCorrectAreas = false;
04019     mapOptions.fNoCheck        = !( fValidate ? *fValidate : true );
04020     if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
04021     if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
04022 
04023     // Now let us compute the local-global mapping and store it in the context
04024     // We need this mapping when computing matvec products and to do reductions in parallel
04025     // Additionally, the call below will also compute weights with TempestRemap
04026     rval = weightMap->GenerateRemappingWeights(
04027         std::string( disc_method_source ),            // const std::string& strInputType
04028         std::string( disc_method_target ),            // const std::string& strOutputType
04029         mapOptions,                                   // GenerateOfflineMapAlgorithmOptions& mapOptions
04030         std::string( source_solution_tag_dof_name ),  // const std::string& srcDofTagName = "GLOBAL_ID"
04031         std::string( target_solution_tag_dof_name )   // const std::string& tgtDofTagName = "GLOBAL_ID"
04032     );MB_CHK_ERR( rval );
04033 
04034     return moab::MB_SUCCESS;
04035 }
04036 
04037 ErrCode iMOAB_ApplyScalarProjectionWeights(
04038     iMOAB_AppID pid_intersection,
04039     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
04040     const iMOAB_String source_solution_tag_name,
04041     const iMOAB_String target_solution_tag_name )
04042 {
04043     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
04044     assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
04045     assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
04046 
04047     moab::ErrorCode rval;
04048 
04049     // Get the source and target data and pcomm objects
04050     appData& data_intx       = context.appDatas[*pid_intersection];
04051     TempestMapAppData& tdata = data_intx.tempestData;
04052 
04053     // Now allocate and initialize the remapper object
04054     moab::TempestRemapper* remapper   = tdata.remapper;
04055     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
04056 
04057     // we assume that there are separators ";" between the tag names
04058     std::vector< std::string > srcNames;
04059     std::vector< std::string > tgtNames;
04060     std::vector< Tag > srcTagHandles;
04061     std::vector< Tag > tgtTagHandles;
04062     std::string separator( ":" );
04063     std::string src_name( source_solution_tag_name );
04064     std::string tgt_name( target_solution_tag_name );
04065     split_tag_names( src_name, separator, srcNames );
04066     split_tag_names( tgt_name, separator, tgtNames );
04067     if( srcNames.size() != tgtNames.size() )
04068     {
04069         std::cout << " error in parsing source and target tag names. \n";
04070         return moab::MB_FAILURE;
04071     }
04072 
04073     for( size_t i = 0; i < srcNames.size(); i++ )
04074     {
04075         Tag tagHandle;
04076         rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
04077         if( MB_SUCCESS != rval || NULL == tagHandle )
04078         {
04079             return moab::MB_FAILURE;
04080         }
04081         srcTagHandles.push_back( tagHandle );
04082         rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
04083         if( MB_SUCCESS != rval || NULL == tagHandle )
04084         {
04085             return moab::MB_FAILURE;
04086         }
04087         tgtTagHandles.push_back( tagHandle );
04088     }
04089 
04090     std::vector< double > solSTagVals;
04091     std::vector< double > solTTagVals;
04092 
04093     moab::Range sents, tents;
04094     if( data_intx.point_cloud )
04095     {
04096         appData& data_src = context.appDatas[*tdata.pid_src];
04097         appData& data_tgt = context.appDatas[*tdata.pid_dest];
04098         if( data_src.point_cloud )
04099         {
04100             moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
04101             solSTagVals.resize( covSrcEnts.size(), -1.0 );
04102             sents = covSrcEnts;
04103         }
04104         else
04105         {
04106             moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04107             solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
04108                                     weightMap->GetSourceNDofsPerElement(),
04109                                 -1.0 );
04110             sents = covSrcEnts;
04111         }
04112         if( data_tgt.point_cloud )
04113         {
04114             moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
04115             solTTagVals.resize( tgtEnts.size(), -1.0 );
04116             tents = tgtEnts;
04117         }
04118         else
04119         {
04120             moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04121             solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04122                                     weightMap->GetDestinationNDofsPerElement(),
04123                                 -1.0 );
04124             tents = tgtEnts;
04125         }
04126     }
04127     else
04128     {
04129         moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04130         moab::Range& tgtEnts    = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04131         solSTagVals.resize(
04132             covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
04133         solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04134                                 weightMap->GetDestinationNDofsPerElement(),
04135                             -1.0 );
04136 
04137         sents = covSrcEnts;
04138         tents = tgtEnts;
04139     }
04140 
04141     for( size_t i = 0; i < srcTagHandles.size(); i++ )
04142     {
04143         // The tag data is np*np*n_el_src
04144         Tag ssolnTag = srcTagHandles[i];
04145         Tag tsolnTag = tgtTagHandles[i];
04146         rval         = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );MB_CHK_ERR( rval );
04147 
04148         // Compute the application of weights on the suorce solution data and store it in the
04149         // destination solution vector data Optionally, can also perform the transpose application
04150         // of the weight matrix. Set the 3rd argument to true if this is needed
04151         rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );MB_CHK_ERR( rval );
04152 
04153         // The tag data is np*np*n_el_dest
04154         rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );MB_CHK_ERR( rval );
04155     }
04156 
04157 // #define VERBOSE
04158 #ifdef VERBOSE
04159     ParallelComm* pco_intx = context.pcomms[*pid_intersection];
04160 
04161     int ivar = 0;
04162     {
04163         Tag ssolnTag = srcTagHandles[ivar];
04164         std::stringstream sstr;
04165         sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04166         std::ofstream output_file( sstr.str().c_str() );
04167         for( unsigned i = 0; i < sents.size(); ++i )
04168         {
04169             EntityHandle elem = sents[i];
04170             std::vector< double > locsolSTagVals( 16 );
04171             rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
04172             output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
04173             for( unsigned j = 0; j < 16; ++j )
04174                 output_file << locsolSTagVals[j] << " ";
04175         }
04176         output_file.flush();  // required here
04177         output_file.close();
04178     }
04179     {
04180         std::stringstream sstr;
04181         sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04182         EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
04183                                  context.appDatas[*tdata.pid_dest].file_set };
04184         rval                 = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04185     }
04186     {
04187         std::stringstream sstr;
04188         sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04189         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04190         EntityHandle covering_set = remapper->GetCoveringSet();
04191         EntityHandle sets[2]      = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
04192         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04193     }
04194     {
04195         std::stringstream sstr;
04196         sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04197         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04198         EntityHandle covering_set = remapper->GetCoveringSet();
04199         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
04200     }
04201     {
04202         std::stringstream sstr;
04203         sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04204         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04205         EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
04206         rval                    = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
04207     }
04208     {
04209         std::stringstream sstr;
04210         sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04211         std::ofstream output_file( sstr.str().c_str() );
04212         for( unsigned i = 0; i < solSTagVals.size(); ++i )
04213             output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
04214                         << solSTagVals[i] << "\n";
04215         output_file.flush();  // required here
04216         output_file.close();
04217     }
04218 #endif
04219     // #undef VERBOSE
04220 
04221     return moab::MB_SUCCESS;
04222 }
04223 
04224 #endif  // MOAB_HAVE_TEMPESTREMAP
04225 
04226 #ifdef __cplusplus
04227 }
04228 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines