Mesh Oriented datABase  (version 5.4.1)
Array-based unstructured mesh datastructure
iMOAB.cpp
Go to the documentation of this file.
00001 /** \file iMOAB.cpp
00002  */
00003 
00004 #include "moab/MOABConfig.h"
00005 #include "moab/Core.hpp"
00006 #include "AEntityFactory.hpp"
00007 
00008 #ifdef MOAB_HAVE_MPI
00009 #include "moab_mpi.h"
00010 #include "moab/ParallelComm.hpp"
00011 #include "moab/ParCommGraph.hpp"
00012 #include "moab/ParallelMergeMesh.hpp"
00013 #include "moab/IntxMesh/IntxUtils.hpp"
00014 #endif
00015 #include "DebugOutput.hpp"
00016 #include "moab/iMOAB.h"
00017 
00018 /* this is needed because of direct access to hdf5/mhdf */
00019 #ifdef MOAB_HAVE_HDF5
00020 #include "mhdf.h"
00021 #include <H5Tpublic.h>
00022 #endif
00023 
00024 #include "moab/CartVect.hpp"
00025 #include "MBTagConventions.hpp"
00026 #include "moab/MeshTopoUtil.hpp"
00027 #include "moab/ReadUtilIface.hpp"
00028 #include "moab/MergeMesh.hpp"
00029 
00030 #ifdef MOAB_HAVE_TEMPESTREMAP
00031 #include "STLStringHelper.h"
00032 #include "moab/IntxMesh/IntxUtils.hpp"
00033 
00034 #include "moab/Remapping/TempestRemapper.hpp"
00035 #include "moab/Remapping/TempestOnlineMap.hpp"
00036 #endif
00037 
00038 // C++ includes
00039 #include <cassert>
00040 #include <sstream>
00041 #include <iostream>
00042 
00043 using namespace moab;
00044 
00045 // #define VERBOSE
00046 
00047 // global variables ; should they be organized in a structure, for easier references?
00048 // or how do we keep them global?
00049 
00050 #ifdef __cplusplus
00051 extern "C" {
00052 #endif
00053 
00054 #ifdef MOAB_HAVE_TEMPESTREMAP
00055 struct TempestMapAppData
00056 {
00057     moab::TempestRemapper* remapper;
00058     std::map< std::string, moab::TempestOnlineMap* > weightMaps;
00059     iMOAB_AppID pid_src;
00060     iMOAB_AppID pid_dest;
00061 };
00062 #endif
00063 
00064 struct appData
00065 {
00066     EntityHandle file_set;
00067     int global_id;  // external component id, unique for application
00068     std::string name;
00069     Range all_verts;
00070     Range local_verts;  // it could include shared, but not owned at the interface
00071     // these vertices would be all_verts if no ghosting was required
00072     Range ghost_vertices;  // locally ghosted from other processors
00073     Range primary_elems;
00074     Range owned_elems;
00075     Range ghost_elems;
00076     int dimension;             // 2 or 3, dimension of primary elements (redundant?)
00077     long num_global_elements;  // reunion of all elements in primary_elements; either from hdf5
00078                                // reading or from reduce
00079     long num_global_vertices;  // reunion of all nodes, after sharing is resolved; it could be
00080                                // determined from hdf5 reading
00081     Range mat_sets;
00082     std::map< int, int > matIndex;  // map from global block id to index in mat_sets
00083     Range neu_sets;
00084     Range diri_sets;
00085     std::map< std::string, Tag > tagMap;
00086     std::vector< Tag > tagList;
00087     bool point_cloud;
00088     bool is_fortran;
00089 
00090 #ifdef MOAB_HAVE_MPI
00091     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
00092     // application
00093     std::map< int, ParCommGraph* > pgraph;  // map from context () to the parcommgraph*
00094 #endif
00095 
00096 #ifdef MOAB_HAVE_TEMPESTREMAP
00097     TempestMapAppData tempestData;
00098 #endif
00099 };
00100 
00101 struct GlobalContext
00102 {
00103     // are there reasons to have multiple moab inits? Is ref count needed?
00104     Interface* MBI;
00105     // we should also have the default tags stored, initialized
00106     Tag material_tag, neumann_tag, dirichlet_tag,
00107         globalID_tag;  // material, neumann, dirichlet,  globalID
00108     int refCountMB;
00109     int iArgc;
00110     iMOAB_String* iArgv;
00111     int unused_pid;
00112 
00113     std::map< std::string, int > appIdMap;  // from app string (uppercase) to app id
00114     std::map< int, int > appIdCompMap;      // from component id to app id
00115 
00116 #ifdef MOAB_HAVE_MPI
00117     std::vector< ParallelComm* > pcomms;  // created in order of applications, one moab::ParallelComm for each
00118 
00119 #ifdef MOAB_HAVE_ZOLTAN
00120     // this data structure exists only on the root PE of the coupler
00121     // it will store the buffer with the RCB cuts from which the Zoltan zz structure can be de-serialized,
00122     // to be used in the partitioning
00123     //
00124     std::vector< char > uniqueZoltanBuffer;
00125 #endif
00126 
00127 #endif
00128 
00129     std::vector< appData > appDatas;  // the same order as pcomms
00130     int globalrank, worldprocs;
00131     bool MPI_initialized;
00132 
00133     GlobalContext()
00134     {
00135         MBI        = 0;
00136         refCountMB = 0;
00137         unused_pid = 0;
00138     }
00139 };
00140 
00141 static struct GlobalContext context;
00142 
00143 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
00144 {
00145     if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
00146 
00147     context.iArgc = argc;
00148     context.iArgv = argv;  // shallow copy
00149 
00150     if( 0 == context.refCountMB )
00151     {
00152         context.MBI = new( std::nothrow ) moab::Core;
00153         // retrieve the default tags
00154         const char* const shared_set_tag_names[] = {MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
00155                                                     GLOBAL_ID_TAG_NAME};
00156         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
00157         Tag gtags[4];
00158 
00159         for( int i = 0; i < 4; i++ )
00160         {
00161 
00162             ErrorCode rval =
00163                 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
00164         }
00165 
00166         context.material_tag  = gtags[0];
00167         context.neumann_tag   = gtags[1];
00168         context.dirichlet_tag = gtags[2];
00169         context.globalID_tag  = gtags[3];
00170     }
00171 
00172     context.MPI_initialized = false;
00173 #ifdef MOAB_HAVE_MPI
00174     int flagInit;
00175     MPI_Initialized( &flagInit );
00176 
00177     if( flagInit && !context.MPI_initialized )
00178     {
00179         MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
00180         MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
00181         context.MPI_initialized = true;
00182     }
00183 #endif
00184 
00185     context.refCountMB++;
00186     return moab::MB_SUCCESS;
00187 }
00188 
00189 ErrCode iMOAB_InitializeFortran()
00190 {
00191     return iMOAB_Initialize( 0, 0 );
00192 }
00193 
00194 ErrCode iMOAB_Finalize()
00195 {
00196     context.refCountMB--;
00197 
00198     if( 0 == context.refCountMB )
00199     {
00200         delete context.MBI;
00201     }
00202 
00203     return MB_SUCCESS;
00204 }
00205 
00206 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
00207 #ifdef MOAB_HAVE_MPI
00208                                    MPI_Comm* comm,
00209 #endif
00210                                    int* compid,
00211                                    iMOAB_AppID pid )
00212 {
00213     IMOAB_CHECKPOINTER( app_name, 1 );
00214 #ifdef MOAB_HAVE_MPI
00215     IMOAB_CHECKPOINTER( comm, 2 );
00216     IMOAB_CHECKPOINTER( compid, 3 );
00217 #else
00218     IMOAB_CHECKPOINTER( compid, 2 );
00219 #endif
00220 
00221     // will create a parallel comm for this application too, so there will be a
00222     // mapping from *pid to file set and to parallel comm instances
00223     std::string name( app_name );
00224 
00225     if( context.appIdMap.find( name ) != context.appIdMap.end() )
00226     {
00227         std::cout << " application " << name << " already registered \n";
00228         return moab::MB_FAILURE;
00229     }
00230 
00231     *pid                   = context.unused_pid++;
00232     context.appIdMap[name] = *pid;
00233     int rankHere           = 0;
00234 #ifdef MOAB_HAVE_MPI
00235     MPI_Comm_rank( *comm, &rankHere );
00236 #endif
00237     if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
00238     if( *compid <= 0 )
00239     {
00240         std::cout << " convention for external application is to have its id positive \n";
00241         return moab::MB_FAILURE;
00242     }
00243 
00244     if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
00245     {
00246         std::cout << " external application with comp id " << *compid << " is already registered\n";
00247         return moab::MB_FAILURE;
00248     }
00249 
00250     context.appIdCompMap[*compid] = *pid;
00251 
00252     // now create ParallelComm and a file set for this application
00253 #ifdef MOAB_HAVE_MPI
00254     if( *comm )
00255     {
00256         ParallelComm* pco = new ParallelComm( context.MBI, *comm );
00257 
00258 #ifndef NDEBUG
00259         int index = pco->get_id();  // it could be useful to get app id from pcomm instance ...
00260         assert( index == *pid );
00261         // here, we assert the the pid is the same as the id of the ParallelComm instance
00262         // useful for writing in parallel
00263 #endif
00264         context.pcomms.push_back( pco );
00265     }
00266     else
00267     {
00268         context.pcomms.push_back( 0 );
00269     }
00270 #endif
00271 
00272     // create now the file set that will be used for loading the model in
00273     EntityHandle file_set;
00274     ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
00275 
00276     appData app_data;
00277     app_data.file_set  = file_set;
00278     app_data.global_id = *compid;  // will be used mostly for par comm graph
00279     app_data.name      = name;     // save the name of application
00280 
00281 #ifdef MOAB_HAVE_TEMPESTREMAP
00282     app_data.tempestData.remapper = NULL;  // Only allocate as needed
00283 #endif
00284 
00285     app_data.point_cloud = false;
00286     app_data.is_fortran  = false;
00287 
00288     context.appDatas.push_back(
00289         app_data );  // it will correspond to app_FileSets[*pid] will be the file set of interest
00290     return moab::MB_SUCCESS;
00291 }
00292 
00293 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name,
00294 #ifdef MOAB_HAVE_MPI
00295                                           int* comm,
00296 #endif
00297                                           int* compid,
00298                                           iMOAB_AppID pid )
00299 {
00300     IMOAB_CHECKPOINTER( app_name, 1 );
00301 #ifdef MOAB_HAVE_MPI
00302     IMOAB_CHECKPOINTER( comm, 2 );
00303     IMOAB_CHECKPOINTER( compid, 3 );
00304 #else
00305     IMOAB_CHECKPOINTER( compid, 2 );
00306 #endif
00307 
00308     ErrCode err;
00309     assert( app_name != nullptr );
00310     std::string name( app_name );
00311 
00312 #ifdef MOAB_HAVE_MPI
00313     MPI_Comm ccomm;
00314     if( comm )
00315     {
00316         // convert from Fortran communicator to a C communicator
00317         // see transfer of handles
00318         // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
00319         ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
00320     }
00321 #endif
00322 
00323     // now call C style registration function:
00324     err = iMOAB_RegisterApplication( app_name,
00325 #ifdef MOAB_HAVE_MPI
00326                                      &ccomm,
00327 #endif
00328                                      compid, pid );
00329 
00330     // Now that we have created the application context, store that
00331     // the application being registered is from a Fortran context
00332     context.appDatas[*pid].is_fortran = true;
00333 
00334     return err;
00335 }
00336 
00337 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
00338 {
00339     // the file set , parallel comm are all in vectors indexed by *pid
00340     // assume we did not delete anything yet
00341     // *pid will not be reused if we register another application
00342     appData& data = context.appDatas[*pid];
00343     int rankHere  = 0;
00344 #ifdef MOAB_HAVE_MPI
00345     ParallelComm* pco = context.pcomms[*pid];
00346     rankHere          = pco->rank();
00347 #endif
00348     if( !rankHere )
00349         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
00350                   << " is de-registered now \n";
00351 
00352     EntityHandle fileSet = data.file_set;
00353     // get all entities part of the file set
00354     Range fileents;
00355     ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
00356 
00357     fileents.insert( fileSet );
00358 
00359     rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval );  // append all mesh sets
00360 
00361 #ifdef MOAB_HAVE_TEMPESTREMAP
00362     if( data.tempestData.remapper ) delete data.tempestData.remapper;
00363     if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
00364 #endif
00365 
00366 #ifdef MOAB_HAVE_MPI
00367 
00368     // we could get the pco also with
00369     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
00370 
00371     std::map< int, ParCommGraph* >& pargs = data.pgraph;
00372 
00373     // free the parallel comm graphs associated with this app
00374     for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
00375     {
00376         ParCommGraph* pgr = mt->second;
00377         delete pgr;
00378         pgr = NULL;
00379     }
00380     if( pco ) delete pco;
00381 #endif
00382 
00383     // delete first all except vertices
00384     Range vertices = fileents.subset_by_type( MBVERTEX );
00385     Range noverts  = subtract( fileents, vertices );
00386 
00387     rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
00388     // now retrieve connected elements that still exist (maybe in other sets, pids?)
00389     Range adj_ents_left;
00390     rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00391     rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00392     rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00393 
00394     if( !adj_ents_left.empty() )
00395     {
00396         Range conn_verts;
00397         rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
00398         vertices = subtract( vertices, conn_verts );
00399     }
00400 
00401     rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
00402 
00403     std::map< std::string, int >::iterator mit;
00404 
00405     for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
00406     {
00407         int pidx = mit->second;
00408 
00409         if( *pid == pidx )
00410         {
00411             break;
00412         }
00413     }
00414 
00415     context.appIdMap.erase( mit );
00416     std::map< int, int >::iterator mit1;
00417 
00418     for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
00419     {
00420         int pidx = mit1->second;
00421 
00422         if( *pid == pidx )
00423         {
00424             break;
00425         }
00426     }
00427 
00428     context.appIdCompMap.erase( mit1 );
00429 
00430     context.unused_pid--;  // we have to go backwards always TODO
00431     context.appDatas.pop_back();
00432 #ifdef MOAB_HAVE_MPI
00433     context.pcomms.pop_back();
00434 #endif
00435     return moab::MB_SUCCESS;
00436 }
00437 
00438 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid )
00439 {
00440     // release any Fortran specific allocations here before we pass it on
00441     context.appDatas[*pid].is_fortran = false;
00442 
00443     // release all datastructure allocations
00444     return iMOAB_DeregisterApplication( pid );
00445 }
00446 
00447 // Utility function
00448 static void split_tag_names( std::string input_names,
00449                              std::string& separator,
00450                              std::vector< std::string >& list_tag_names )
00451 {
00452     size_t pos = 0;
00453     std::string token;
00454     while( ( pos = input_names.find( separator ) ) != std::string::npos )
00455     {
00456         token = input_names.substr( 0, pos );
00457         if( !token.empty() ) list_tag_names.push_back( token );
00458         // std::cout << token << std::endl;
00459         input_names.erase( 0, pos + separator.length() );
00460     }
00461     if( !input_names.empty() )
00462     {
00463         // if leftover something, or if not ended with delimiter
00464         list_tag_names.push_back( input_names );
00465     }
00466     return;
00467 }
00468 
00469 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename,
00470                               int* num_global_vertices,
00471                               int* num_global_elements,
00472                               int* num_dimension,
00473                               int* num_parts )
00474 {
00475     IMOAB_CHECKPOINTER( filename, 1 );
00476     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00477 
00478 #ifdef MOAB_HAVE_HDF5
00479     std::string filen( filename );
00480 
00481     int edges   = 0;
00482     int faces   = 0;
00483     int regions = 0;
00484     if( num_global_vertices ) *num_global_vertices = 0;
00485     if( num_global_elements ) *num_global_elements = 0;
00486     if( num_dimension ) *num_dimension = 0;
00487     if( num_parts ) *num_parts = 0;
00488 
00489     mhdf_FileHandle file;
00490     mhdf_Status status;
00491     unsigned long max_id;
00492     struct mhdf_FileDesc* data;
00493 
00494     file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
00495 
00496     if( mhdf_isError( &status ) )
00497     {
00498         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00499         return moab::MB_FAILURE;
00500     }
00501 
00502     data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
00503                                 1 );  // will use extra set info; will get parallel partition tag info too!
00504 
00505     if( mhdf_isError( &status ) )
00506     {
00507         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00508         return moab::MB_FAILURE;
00509     }
00510 
00511     if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
00512     if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
00513 
00514     for( int i = 0; i < data->num_elem_desc; i++ )
00515     {
00516         struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
00517         struct mhdf_EntDesc* ent_d    = &( el_desc->desc );
00518 
00519         if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
00520         {
00521             edges += ent_d->count;
00522         }
00523 
00524         if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
00525         {
00526             faces += ent_d->count;
00527         }
00528 
00529         if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
00530         {
00531             faces += ent_d->count;
00532         }
00533 
00534         if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
00535         {
00536             faces += ent_d->count;
00537         }
00538 
00539         if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
00540         {
00541             regions += ent_d->count;
00542         }
00543 
00544         if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
00545         {
00546             regions += ent_d->count;
00547         }
00548 
00549         if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
00550         {
00551             regions += ent_d->count;
00552         }
00553 
00554         if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
00555         {
00556             regions += ent_d->count;
00557         }
00558 
00559         if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
00560         {
00561             regions += ent_d->count;
00562         }
00563 
00564         if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
00565         {
00566             regions += ent_d->count;
00567         }
00568 
00569         if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
00570         {
00571             regions += ent_d->count;
00572         }
00573     }
00574 
00575     if( num_parts ) *num_parts = data->numEntSets[0];
00576 
00577     // is this required?
00578     if( edges > 0 )
00579     {
00580         if( num_dimension ) *num_dimension = 1;  // I don't think it will ever return 1
00581         if( num_global_elements ) *num_global_elements = edges;
00582     }
00583 
00584     if( faces > 0 )
00585     {
00586         if( num_dimension ) *num_dimension = 2;
00587         if( num_global_elements ) *num_global_elements = faces;
00588     }
00589 
00590     if( regions > 0 )
00591     {
00592         if( num_dimension ) *num_dimension = 3;
00593         if( num_global_elements ) *num_global_elements = regions;
00594     }
00595 
00596     mhdf_closeFile( file, &status );
00597 
00598     free( data );
00599 
00600 #else
00601     std::cout << filename
00602               << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
00603                  "formats other than a h5m file.\n";
00604     if( num_global_vertices ) *num_global_vertices = 0;
00605     if( num_global_elements ) *num_global_elements = 0;
00606     if( num_dimension ) *num_dimension = 0;
00607     if( num_parts ) *num_parts = 0;
00608 #endif
00609 
00610     return moab::MB_SUCCESS;
00611 }
00612 
00613 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid,
00614                         const iMOAB_String filename,
00615                         const iMOAB_String read_options,
00616                         int* num_ghost_layers )
00617 {
00618     IMOAB_CHECKPOINTER( filename, 2 );
00619     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00620 
00621     // make sure we use the file set and pcomm associated with the *pid
00622     std::ostringstream newopts;
00623     if( read_options ) newopts << read_options;
00624 
00625 #ifdef MOAB_HAVE_MPI
00626 
00627     if( context.MPI_initialized )
00628     {
00629         if( context.worldprocs > 1 )
00630         {
00631             std::string opts( ( read_options ? read_options : "" ) );
00632             std::string pcid( "PARALLEL_COMM=" );
00633             std::size_t found = opts.find( pcid );
00634 
00635             if( found != std::string::npos )
00636             {
00637                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00638                 return moab::MB_FAILURE;
00639             }
00640 
00641             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
00642             // files (used in test_remapping)
00643             std::string filen( filename );
00644             std::string::size_type idx = filen.rfind( '.' );
00645 
00646             if( idx != std::string::npos )
00647             {
00648                 std::string extension = filen.substr( idx + 1 );
00649                 if( ( extension == std::string( "h5m" ) ) || ( extension == std::string( "nc" ) ) )
00650                     newopts << ";;PARALLEL_COMM=" << *pid;
00651             }
00652 
00653             if( *num_ghost_layers >= 1 )
00654             {
00655                 // if we want ghosts, we will want additional entities, the last .1
00656                 // because the addl ents can be edges, faces that are part of the neumann sets
00657                 std::string pcid2( "PARALLEL_GHOSTS=" );
00658                 std::size_t found2 = opts.find( pcid2 );
00659 
00660                 if( found2 != std::string::npos )
00661                 {
00662                     std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
00663                                  "number of layers \n";
00664                 }
00665                 else
00666                 {
00667                     // dimension of primary entities is 3 here, but it could be 2 for climate
00668                     // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
00669                     // example:  ";PARALLEL_GHOSTS=2.0.1"
00670                     newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
00671                 }
00672             }
00673         }
00674     }
00675 #else
00676     IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
00677 #endif
00678 
00679     // Now let us actually load the MOAB file with the appropriate read options
00680     ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
00681 
00682 #ifdef VERBOSE
00683     // some debugging stuff
00684     std::ostringstream outfile;
00685 #ifdef MOAB_HAVE_MPI
00686     int rank   = context.pcomms[*pid]->rank();
00687     int nprocs = context.pcomms[*pid]->size();
00688     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
00689 #else
00690     outfile << "TaskMesh_n1.0.h5m";
00691 #endif
00692     // the mesh contains ghosts too, but they are not part of mat/neumann set
00693     // write in serial the file, to see what tags are missing
00694     rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval );  // everything on current task, written in serial
00695 #endif
00696 
00697     // Update mesh information
00698     return iMOAB_UpdateMeshInfo( pid );
00699 }
00700 
00701 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
00702 {
00703     IMOAB_CHECKPOINTER( filename, 2 );
00704     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00705 
00706     appData& data        = context.appDatas[*pid];
00707     EntityHandle fileSet = data.file_set;
00708 
00709     std::ostringstream newopts;
00710 #ifdef MOAB_HAVE_MPI
00711     std::string write_opts( ( write_options ? write_options : "" ) );
00712     std::string pcid( "PARALLEL_COMM=" );
00713     std::size_t found = write_opts.find( pcid );
00714 
00715     if( found != std::string::npos )
00716     {
00717         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00718         return moab::MB_FAILURE;
00719     }
00720 
00721     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
00722     std::string pw( "PARALLEL=WRITE_PART" );
00723     found = write_opts.find( pw );
00724 
00725     if( found != std::string::npos )
00726     {
00727         newopts << "PARALLEL_COMM=" << *pid << ";";
00728     }
00729 
00730 #endif
00731 
00732     if( write_options ) newopts << write_options;
00733 
00734     std::vector< Tag > copyTagList = data.tagList;
00735     // append Global ID and Parallel Partition
00736     std::string gid_name_tag( "GLOBAL_ID" );
00737 
00738     // export global id tag, we need it always
00739     if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
00740     {
00741         Tag gid = context.MBI->globalId_tag();
00742         copyTagList.push_back( gid );
00743     }
00744     // also Parallel_Partition PARALLEL_PARTITION
00745     std::string pp_name_tag( "PARALLEL_PARTITION" );
00746 
00747     // write parallel part tag too, if it exists
00748     if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
00749     {
00750         Tag ptag;
00751         context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
00752         if( ptag ) copyTagList.push_back( ptag );
00753     }
00754 
00755     // Now let us actually write the file to disk with appropriate options
00756     ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
00757                                               (int)copyTagList.size() );MB_CHK_ERR( rval );
00758 
00759     return moab::MB_SUCCESS;
00760 }
00761 
00762 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix )
00763 {
00764     IMOAB_CHECKPOINTER( prefix, 2 );
00765     IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
00766 
00767     std::ostringstream file_name;
00768     int rank = 0, size = 1;
00769 #ifdef MOAB_HAVE_MPI
00770     rank = context.pcomms[*pid]->rank();
00771     size = context.pcomms[*pid]->size();
00772 #endif
00773     file_name << prefix << "_" << size << "_" << rank << ".h5m";
00774     // Now let us actually write the file to disk with appropriate options
00775     ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
00776 
00777     return moab::MB_SUCCESS;
00778 }
00779 
00780 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
00781 {
00782     // this will include ghost elements info
00783     appData& data        = context.appDatas[*pid];
00784     EntityHandle fileSet = data.file_set;
00785 
00786     // first clear all data ranges; this can be called after ghosting
00787     data.all_verts.clear();
00788     data.primary_elems.clear();
00789     data.local_verts.clear();
00790     data.ghost_vertices.clear();
00791     data.owned_elems.clear();
00792     data.ghost_elems.clear();
00793     data.mat_sets.clear();
00794     data.neu_sets.clear();
00795     data.diri_sets.clear();
00796 
00797     // Let us get all the vertex entities
00798     ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval );  // recursive
00799 
00800     // Let us check first entities of dimension = 3
00801     data.dimension = 3;
00802     rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00803 
00804     if( data.primary_elems.empty() )
00805     {
00806         // Now 3-D elements. Let us check entities of dimension = 2
00807         data.dimension = 2;
00808         rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00809 
00810         if( data.primary_elems.empty() )
00811         {
00812             // Now 3-D/2-D elements. Let us check entities of dimension = 1
00813             data.dimension = 1;
00814             rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00815 
00816             if( data.primary_elems.empty() )
00817             {
00818                 // no elements of dimension 1 or 2 or 3; it could happen for point clouds
00819                 data.dimension = 0;
00820                 if( data.all_verts.size() == 0 ) data.dimension = -1;  // nothing on this
00821             }
00822         }
00823     }
00824 
00825     // check if the current mesh is just a point cloud
00826     data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
00827 
00828 #ifdef MOAB_HAVE_MPI
00829 
00830     if( context.MPI_initialized )
00831     {
00832         ParallelComm* pco = context.pcomms[*pid];
00833 
00834         // filter ghost vertices, from local
00835         rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
00836 
00837         // Store handles for all ghosted entities
00838         data.ghost_vertices = subtract( data.all_verts, data.local_verts );
00839 
00840         // filter ghost elements, from local
00841         rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
00842 
00843         data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
00844     }
00845     else
00846     {
00847         data.local_verts = data.all_verts;
00848         data.owned_elems = data.primary_elems;
00849     }
00850 
00851 #else
00852 
00853     data.local_verts = data.all_verts;
00854     data.owned_elems = data.primary_elems;
00855 
00856 #endif
00857 
00858     // Get the references for some standard internal tags such as material blocks, BCs, etc
00859     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00860                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00861 
00862     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00863                                                       data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00864 
00865     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00866                                                       data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00867 
00868     return moab::MB_SUCCESS;
00869 }
00870 
00871 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid,
00872                            int* num_visible_vertices,
00873                            int* num_visible_elements,
00874                            int* num_visible_blocks,
00875                            int* num_visible_surfaceBC,
00876                            int* num_visible_vertexBC )
00877 {
00878     ErrorCode rval;
00879     appData& data        = context.appDatas[*pid];
00880     EntityHandle fileSet = data.file_set;
00881 
00882     // this will include ghost elements
00883     // first clear all data ranges; this can be called after ghosting
00884     if( num_visible_elements )
00885     {
00886         num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
00887         // separate ghost and local/owned primary elements
00888         num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
00889         num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
00890     }
00891     if( num_visible_vertices )
00892     {
00893         num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
00894         num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
00895         // local are those that are not ghosts
00896         num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
00897     }
00898 
00899     if( num_visible_blocks )
00900     {
00901         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00902                                                           data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00903 
00904         num_visible_blocks[2] = data.mat_sets.size();
00905         num_visible_blocks[0] = num_visible_blocks[2];
00906         num_visible_blocks[1] = 0;
00907     }
00908 
00909     if( num_visible_surfaceBC )
00910     {
00911         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00912                                                           data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00913 
00914         num_visible_surfaceBC[2] = 0;
00915         // count how many faces are in each neu set, and how many regions are
00916         // adjacent to them;
00917         int numNeuSets = (int)data.neu_sets.size();
00918 
00919         for( int i = 0; i < numNeuSets; i++ )
00920         {
00921             Range subents;
00922             EntityHandle nset = data.neu_sets[i];
00923             rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
00924 
00925             for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
00926             {
00927                 EntityHandle subent = *it;
00928                 Range adjPrimaryEnts;
00929                 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
00930 
00931                 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
00932             }
00933         }
00934 
00935         num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
00936         num_visible_surfaceBC[1] = 0;
00937     }
00938 
00939     if( num_visible_vertexBC )
00940     {
00941         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00942                                                           data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00943 
00944         num_visible_vertexBC[2] = 0;
00945         int numDiriSets         = (int)data.diri_sets.size();
00946 
00947         for( int i = 0; i < numDiriSets; i++ )
00948         {
00949             Range verts;
00950             EntityHandle diset = data.diri_sets[i];
00951             rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
00952 
00953             num_visible_vertexBC[2] += (int)verts.size();
00954         }
00955 
00956         num_visible_vertexBC[0] = num_visible_vertexBC[2];
00957         num_visible_vertexBC[1] = 0;
00958     }
00959 
00960     return moab::MB_SUCCESS;
00961 }
00962 
00963 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
00964 {
00965     IMOAB_CHECKPOINTER( vertices_length, 2 );
00966     IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
00967 
00968     const Range& verts = context.appDatas[*pid].all_verts;
00969     // check for problems with array length
00970     IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
00971 
00972     // global id tag is context.globalID_tag
00973     return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
00974 }
00975 
00976 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
00977 {
00978     assert( vertices_length && *vertices_length );
00979     assert( visible_global_rank_ID );
00980 
00981     Range& verts = context.appDatas[*pid].all_verts;
00982     int i        = 0;
00983 #ifdef MOAB_HAVE_MPI
00984     ParallelComm* pco = context.pcomms[*pid];
00985 
00986     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00987     {
00988         ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
00989     }
00990 
00991     if( i != *vertices_length )
00992     {
00993         return moab::MB_FAILURE;
00994     }  // warning array allocation problem
00995 
00996 #else
00997 
00998     /* everything owned by proc 0 */
00999     if( (int)verts.size() != *vertices_length )
01000     {
01001         return moab::MB_FAILURE;
01002     }  // warning array allocation problem
01003 
01004     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
01005     {
01006         visible_global_rank_ID[i] = 0;
01007     }  // all vertices are owned by processor 0, as this is serial run
01008 
01009 #endif
01010 
01011     return moab::MB_SUCCESS;
01012 }
01013 
01014 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
01015 {
01016     Range& verts = context.appDatas[*pid].all_verts;
01017 
01018     // interleaved coordinates, so that means deep copy anyway
01019     if( *coords_length != 3 * (int)verts.size() )
01020     {
01021         return moab::MB_FAILURE;
01022     }
01023 
01024     ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
01025 
01026     return moab::MB_SUCCESS;
01027 }
01028 
01029 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
01030 {
01031     // local id blocks? they are counted from 0 to number of visible blocks ...
01032     // will actually return material set tag value for global
01033     Range& matSets = context.appDatas[*pid].mat_sets;
01034 
01035     if( *block_length != (int)matSets.size() )
01036     {
01037         return moab::MB_FAILURE;
01038     }
01039 
01040     // return material set tag gtags[0 is material set tag
01041     ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
01042 
01043     // populate map with index
01044     std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
01045     for( unsigned i = 0; i < matSets.size(); i++ )
01046     {
01047         matIdx[global_block_IDs[i]] = i;
01048     }
01049 
01050     return moab::MB_SUCCESS;
01051 }
01052 
01053 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid,
01054                             iMOAB_GlobalID* global_block_ID,
01055                             int* vertices_per_element,
01056                             int* num_elements_in_block )
01057 {
01058     assert( global_block_ID );
01059 
01060     std::map< int, int >& matMap      = context.appDatas[*pid].matIndex;
01061     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01062 
01063     if( it == matMap.end() )
01064     {
01065         return moab::MB_FAILURE;
01066     }  // error in finding block with id
01067 
01068     int blockIndex          = matMap[*global_block_ID];
01069     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01070     Range blo_elems;
01071     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
01072 
01073     if( MB_SUCCESS != rval || blo_elems.empty() )
01074     {
01075         return moab::MB_FAILURE;
01076     }
01077 
01078     EntityType type = context.MBI->type_from_handle( blo_elems[0] );
01079 
01080     if( !blo_elems.all_of_type( type ) )
01081     {
01082         return moab::MB_FAILURE;
01083     }  // not all of same  type
01084 
01085     const EntityHandle* conn = NULL;
01086     int num_verts            = 0;
01087     rval                     = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
01088 
01089     *vertices_per_element  = num_verts;
01090     *num_elements_in_block = (int)blo_elems.size();
01091 
01092     return moab::MB_SUCCESS;
01093 }
01094 
01095 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid,
01096                                       int* num_visible_elements,
01097                                       iMOAB_GlobalID* element_global_IDs,
01098                                       int* ranks,
01099                                       iMOAB_GlobalID* block_IDs )
01100 {
01101     appData& data = context.appDatas[*pid];
01102 #ifdef MOAB_HAVE_MPI
01103     ParallelComm* pco = context.pcomms[*pid];
01104 #endif
01105 
01106     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
01107 
01108     int i = 0;
01109 
01110     for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
01111     {
01112 #ifdef MOAB_HAVE_MPI
01113         rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
01114 
01115 #else
01116         /* everything owned by task 0 */
01117         ranks[i]             = 0;
01118 #endif
01119     }
01120 
01121     for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
01122     {
01123         EntityHandle matMeshSet = *mit;
01124         Range elems;
01125         rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01126 
01127         int valMatTag;
01128         rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
01129 
01130         for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
01131         {
01132             EntityHandle eh = *eit;
01133             int index       = data.primary_elems.index( eh );
01134 
01135             if( -1 == index )
01136             {
01137                 return moab::MB_FAILURE;
01138             }
01139 
01140             if( -1 >= *num_visible_elements )
01141             {
01142                 return moab::MB_FAILURE;
01143             }
01144 
01145             block_IDs[index] = valMatTag;
01146         }
01147     }
01148 
01149     return moab::MB_SUCCESS;
01150 }
01151 
01152 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid,
01153                                              iMOAB_GlobalID* global_block_ID,
01154                                              int* connectivity_length,
01155                                              int* element_connectivity )
01156 {
01157     assert( global_block_ID );      // ensure global block ID argument is not null
01158     assert( connectivity_length );  // ensure connectivity length argument is not null
01159 
01160     appData& data                     = context.appDatas[*pid];
01161     std::map< int, int >& matMap      = data.matIndex;
01162     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01163 
01164     if( it == matMap.end() )
01165     {
01166         return moab::MB_FAILURE;
01167     }  // error in finding block with id
01168 
01169     int blockIndex          = matMap[*global_block_ID];
01170     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01171     std::vector< EntityHandle > elems;
01172 
01173     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01174 
01175     if( elems.empty() )
01176     {
01177         return moab::MB_FAILURE;
01178     }
01179 
01180     std::vector< EntityHandle > vconnect;
01181     rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
01182 
01183     if( *connectivity_length != (int)vconnect.size() )
01184     {
01185         return moab::MB_FAILURE;
01186     }  // mismatched sizes
01187 
01188     for( int i = 0; i < *connectivity_length; i++ )
01189     {
01190         int inx = data.all_verts.index( vconnect[i] );
01191 
01192         if( -1 == inx )
01193         {
01194             return moab::MB_FAILURE;
01195         }  // error, vertex not in local range
01196 
01197         element_connectivity[i] = inx;
01198     }
01199 
01200     return moab::MB_SUCCESS;
01201 }
01202 
01203 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid,
01204                                       iMOAB_LocalID* elem_index,
01205                                       int* connectivity_length,
01206                                       int* element_connectivity )
01207 {
01208     assert( elem_index );           // ensure element index argument is not null
01209     assert( connectivity_length );  // ensure connectivity length argument is not null
01210 
01211     appData& data = context.appDatas[*pid];
01212     assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
01213 
01214     int num_nodes;
01215     const EntityHandle* conn;
01216 
01217     EntityHandle eh = data.primary_elems[*elem_index];
01218 
01219     ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
01220 
01221     if( *connectivity_length < num_nodes )
01222     {
01223         return moab::MB_FAILURE;
01224     }  // wrong number of vertices
01225 
01226     for( int i = 0; i < num_nodes; i++ )
01227     {
01228         int index = data.all_verts.index( conn[i] );
01229 
01230         if( -1 == index )
01231         {
01232             return moab::MB_FAILURE;
01233         }
01234 
01235         element_connectivity[i] = index;
01236     }
01237 
01238     *connectivity_length = num_nodes;
01239 
01240     return moab::MB_SUCCESS;
01241 }
01242 
01243 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid,
01244                                    iMOAB_GlobalID* global_block_ID,
01245                                    int* num_elements_in_block,
01246                                    int* element_ownership )
01247 {
01248     assert( global_block_ID );        // ensure global block ID argument is not null
01249     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01250 
01251     std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
01252 
01253     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01254 
01255     if( it == matMap.end() )
01256     {
01257         return moab::MB_FAILURE;
01258     }  // error in finding block with id
01259 
01260     int blockIndex          = matMap[*global_block_ID];
01261     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01262     Range elems;
01263 
01264     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01265 
01266     if( elems.empty() )
01267     {
01268         return moab::MB_FAILURE;
01269     }
01270 
01271     if( *num_elements_in_block != (int)elems.size() )
01272     {
01273         return moab::MB_FAILURE;
01274     }  // bad memory allocation
01275 
01276     int i = 0;
01277 #ifdef MOAB_HAVE_MPI
01278     ParallelComm* pco = context.pcomms[*pid];
01279 #endif
01280 
01281     for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
01282     {
01283 #ifdef MOAB_HAVE_MPI
01284         rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
01285 #else
01286         element_ownership[i] = 0; /* owned by 0 */
01287 #endif
01288     }
01289 
01290     return moab::MB_SUCCESS;
01291 }
01292 
01293 ErrCode iMOAB_GetElementID( iMOAB_AppID pid,
01294                             iMOAB_GlobalID* global_block_ID,
01295                             int* num_elements_in_block,
01296                             iMOAB_GlobalID* global_element_ID,
01297                             iMOAB_LocalID* local_element_ID )
01298 {
01299     assert( global_block_ID );        // ensure global block ID argument is not null
01300     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01301 
01302     appData& data                = context.appDatas[*pid];
01303     std::map< int, int >& matMap = data.matIndex;
01304 
01305     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01306 
01307     if( it == matMap.end() )
01308     {
01309         return moab::MB_FAILURE;
01310     }  // error in finding block with id
01311 
01312     int blockIndex          = matMap[*global_block_ID];
01313     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01314     Range elems;
01315     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01316 
01317     if( elems.empty() )
01318     {
01319         return moab::MB_FAILURE;
01320     }
01321 
01322     if( *num_elements_in_block != (int)elems.size() )
01323     {
01324         return moab::MB_FAILURE;
01325     }  // bad memory allocation
01326 
01327     rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
01328 
01329     // check that elems are among primary_elems in data
01330     for( int i = 0; i < *num_elements_in_block; i++ )
01331     {
01332         local_element_ID[i] = data.primary_elems.index( elems[i] );
01333 
01334         if( -1 == local_element_ID[i] )
01335         {
01336             return moab::MB_FAILURE;
01337         }  // error, not in local primary elements
01338     }
01339 
01340     return moab::MB_SUCCESS;
01341 }
01342 
01343 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid,
01344                                      int* surface_BC_length,
01345                                      iMOAB_LocalID* local_element_ID,
01346                                      int* reference_surface_ID,
01347                                      int* boundary_condition_value )
01348 {
01349     // we have to fill bc data for neumann sets;/
01350     ErrorCode rval;
01351 
01352     // it was counted above, in GetMeshInfo
01353     appData& data  = context.appDatas[*pid];
01354     int numNeuSets = (int)data.neu_sets.size();
01355 
01356     int index = 0;  // index [0, surface_BC_length) for the arrays returned
01357 
01358     for( int i = 0; i < numNeuSets; i++ )
01359     {
01360         Range subents;
01361         EntityHandle nset = data.neu_sets[i];
01362         rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
01363 
01364         int neuVal;
01365         rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
01366 
01367         for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
01368         {
01369             EntityHandle subent = *it;
01370             Range adjPrimaryEnts;
01371             rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
01372 
01373             // get global id of the primary ents, and side number of the quad/subentity
01374             // this is moab ordering
01375             for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
01376             {
01377                 EntityHandle primaryEnt = *pit;
01378                 // get global id
01379                 /*int globalID;
01380                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
01381                 if (MB_SUCCESS!=rval)
01382                   return moab::MB_FAILURE;
01383                 global_element_ID[index] = globalID;*/
01384 
01385                 // get local element id
01386                 local_element_ID[index] = data.primary_elems.index( primaryEnt );
01387 
01388                 if( -1 == local_element_ID[index] )
01389                 {
01390                     return moab::MB_FAILURE;
01391                 }  // did not find the element locally
01392 
01393                 int side_number, sense, offset;
01394                 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
01395 
01396                 reference_surface_ID[index]     = side_number + 1;  // moab is from 0 to 5, it needs 1 to 6
01397                 boundary_condition_value[index] = neuVal;
01398                 index++;
01399             }
01400         }
01401     }
01402 
01403     if( index != *surface_BC_length )
01404     {
01405         return moab::MB_FAILURE;
01406     }  // error in array allocations
01407 
01408     return moab::MB_SUCCESS;
01409 }
01410 
01411 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid,
01412                                     int* vertex_BC_length,
01413                                     iMOAB_LocalID* local_vertex_ID,
01414                                     int* boundary_condition_value )
01415 {
01416     ErrorCode rval;
01417 
01418     // it was counted above, in GetMeshInfo
01419     appData& data   = context.appDatas[*pid];
01420     int numDiriSets = (int)data.diri_sets.size();
01421     int index       = 0;  // index [0, *vertex_BC_length) for the arrays returned
01422 
01423     for( int i = 0; i < numDiriSets; i++ )
01424     {
01425         Range verts;
01426         EntityHandle diset = data.diri_sets[i];
01427         rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
01428 
01429         int diriVal;
01430         rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
01431 
01432         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01433         {
01434             EntityHandle vt = *vit;
01435             /*int vgid;
01436             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
01437             if (MB_SUCCESS!=rval)
01438               return moab::MB_FAILURE;
01439             global_vertext_ID[index] = vgid;*/
01440             local_vertex_ID[index] = data.all_verts.index( vt );
01441 
01442             if( -1 == local_vertex_ID[index] )
01443             {
01444                 return moab::MB_FAILURE;
01445             }  // vertex was not found
01446 
01447             boundary_condition_value[index] = diriVal;
01448             index++;
01449         }
01450     }
01451 
01452     if( *vertex_BC_length != index )
01453     {
01454         return moab::MB_FAILURE;
01455     }  // array allocation issue
01456 
01457     return moab::MB_SUCCESS;
01458 }
01459 
01460 ErrCode iMOAB_DuplicateAppMesh( iMOAB_AppID pid, iMOAB_AppID poid )
01461 {
01462     // the file set , parallel comm are all in vectors indexed by *pid
01463     appData& data    = context.appDatas[*pid];
01464     appData& dataout = context.appDatas[*poid];
01465     int rankHere     = 0;
01466 #ifdef MOAB_HAVE_MPI
01467     ParallelComm* pco   = context.pcomms[*pid];
01468     ParallelComm* pcout = context.pcomms[*poid];
01469     rankHere            = pco->rank();
01470 #endif
01471 
01472 #ifdef VERBOSE
01473     if( !rankHere )
01474         std::cout << " Mesh for application with ID: " << *pid << " global id: " << data.global_id
01475                   << " name: " << data.name << "\n  is copied in application with ID " << *poid << " name "
01476                   << dataout.name << "\n";
01477 #endif
01478     EntityHandle fileSet    = data.file_set;
01479     EntityHandle outFileSet = dataout.file_set;
01480     // get all entities part of the file set
01481     Range verts;
01482     ErrorCode rval = context.MBI->get_entities_by_dimension( fileSet, 0, verts );MB_CHK_ERR( rval );
01483     Range primaryCells;
01484     rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, primaryCells );MB_CHK_ERR( rval );
01485     Range connVerts;
01486     rval = context.MBI->get_connectivity( primaryCells, connVerts );MB_CHK_ERR( rval );
01487     verts.merge( connVerts );
01488     // first, deep copy vertices
01489     std::vector< double > coords;
01490     coords.resize( verts.size() * 3 );
01491 
01492     rval = context.MBI->get_coords( verts, &coords[0] );MB_CHK_ERR( rval );
01493     std::vector< int > globalIds;
01494     globalIds.resize( verts.size() );
01495     Tag gid = context.MBI->globalId_tag();
01496     rval    = context.MBI->tag_get_data( gid, verts, &globalIds[0] );MB_CHK_ERR( rval );
01497 
01498     Range newVerts;
01499     int nverts = (int)verts.size();
01500     rval       = context.MBI->create_vertices( &coords[0], nverts, newVerts );MB_CHK_ERR( rval );
01501     // set global ID the same
01502     rval = context.MBI->tag_set_data( gid, newVerts, &globalIds[0] );MB_CHK_ERR( rval );
01503     rval = context.MBI->add_entities( outFileSet, newVerts );MB_CHK_ERR( rval );
01504     // create new cells, cell by cell, and add them to the file set, and set the global id too
01505     // use the vertex global id for identification
01506     std::map< EntityHandle, int > oldMap;
01507     std::map< int, EntityHandle > newMap;
01508     int i = 0;
01509     for( auto it = verts.begin(); it != verts.end(); ++it, ++i )
01510     {
01511         EntityHandle oldVert = *it;
01512         EntityHandle newVert = newVerts[i];
01513         int globalId         = globalIds[i];
01514         oldMap[oldVert]      = globalId;
01515         newMap[globalId]     = newVert;
01516     }
01517     Range newCells;
01518     for( auto it = primaryCells.begin(); it != primaryCells.end(); ++it )
01519     {
01520         EntityHandle oldCell = *it;
01521         int nnodes;
01522         const EntityHandle* conn = NULL;
01523         rval                     = context.MBI->get_connectivity( oldCell, conn, nnodes );MB_CHK_ERR( rval );
01524         std::vector< EntityHandle > newConn;
01525         newConn.resize( nnodes );
01526         EntityType type = context.MBI->type_from_handle( oldCell );
01527         for( int i = 0; i < nnodes; i++ )
01528         {
01529             newConn[i] = newMap[oldMap[conn[i]]];
01530         }
01531         EntityHandle newCell;
01532         rval = context.MBI->create_element( type, &newConn[0], nnodes, newCell );MB_CHK_ERR( rval );
01533         rval = context.MBI->add_entities( outFileSet, &newCell, 1 );MB_CHK_ERR( rval );
01534         int globalIdCell = 0;
01535         rval             = context.MBI->tag_get_data( gid, &oldCell, 1, &globalIdCell );MB_CHK_ERR( rval );
01536         rval = context.MBI->tag_set_data( gid, &newCell, 1, &globalIdCell );MB_CHK_ERR( rval );
01537         newCells.insert( newCell );
01538     }
01539     // adjacencies need to be added explicitly
01540     Core* mb                 = (Core*)context.MBI;
01541     AEntityFactory* adj_fact = mb->a_entity_factory();
01542     if( !adj_fact->vert_elem_adjacencies() )
01543         adj_fact->create_vert_elem_adjacencies();
01544     else
01545     {
01546         for( Range::iterator it = newCells.begin(); it != newCells.end(); ++it )
01547         {
01548             EntityHandle eh          = *it;
01549             const EntityHandle* conn = NULL;
01550             int num_nodes            = 0;
01551             rval                     = mb->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
01552             adj_fact->notify_create_entity( eh, conn, num_nodes );
01553         }
01554     }
01555 
01556 #ifdef MOAB_HAVE_MPI
01557     rval = iMOAB_ResolveSharedEntities( poid, &nverts, &globalIds[0] );MB_CHK_ERR( rval );
01558 #endif
01559 
01560     rval = iMOAB_UpdateMeshInfo( poid );MB_CHK_ERR( rval );
01561     return moab::MB_SUCCESS;
01562 }
01563 
01564 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid,
01565                                 const iMOAB_String tag_storage_name,
01566                                 int* tag_type,
01567                                 int* components_per_entity,
01568                                 int* tag_index )
01569 {
01570     // see if the tag is already existing, and if yes, check the type, length
01571     if( *tag_type < DENSE_INTEGER || *tag_type > SPARSE_ENTITYHANDLE )
01572     {
01573         return moab::MB_FAILURE;
01574     }  // we have 6 types of tags supported so far in enum MOAB_TAG_TYPE
01575 
01576     DataType tagDataType;
01577     TagType tagType;
01578     void* defaultVal        = NULL;
01579     int* defInt             = new int[*components_per_entity];
01580     double* defDouble       = new double[*components_per_entity];
01581     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
01582 
01583     for( int i = 0; i < *components_per_entity; i++ )
01584     {
01585         defInt[i]    = 0;
01586         defDouble[i] = -1e+10;
01587         defHandle[i] = (EntityHandle)0;
01588     }
01589 
01590     switch( *tag_type )
01591     {
01592         case DENSE_INTEGER:
01593             tagDataType = MB_TYPE_INTEGER;
01594             tagType     = MB_TAG_DENSE;
01595             defaultVal  = defInt;
01596             break;
01597 
01598         case DENSE_DOUBLE:
01599             tagDataType = MB_TYPE_DOUBLE;
01600             tagType     = MB_TAG_DENSE;
01601             defaultVal  = defDouble;
01602             break;
01603 
01604         case DENSE_ENTITYHANDLE:
01605             tagDataType = MB_TYPE_HANDLE;
01606             tagType     = MB_TAG_DENSE;
01607             defaultVal  = defHandle;
01608             break;
01609 
01610         case SPARSE_INTEGER:
01611             tagDataType = MB_TYPE_INTEGER;
01612             tagType     = MB_TAG_SPARSE;
01613             defaultVal  = defInt;
01614             break;
01615 
01616         case SPARSE_DOUBLE:
01617             tagDataType = MB_TYPE_DOUBLE;
01618             tagType     = MB_TAG_SPARSE;
01619             defaultVal  = defDouble;
01620             break;
01621 
01622         case SPARSE_ENTITYHANDLE:
01623             tagDataType = MB_TYPE_HANDLE;
01624             tagType     = MB_TAG_SPARSE;
01625             defaultVal  = defHandle;
01626             break;
01627 
01628         default: {
01629             delete[] defInt;
01630             delete[] defDouble;
01631             delete[] defHandle;
01632             return moab::MB_FAILURE;
01633         }  // error
01634     }
01635 
01636     Tag tagHandle;
01637     // split storage names if separated list
01638 
01639     std::string tag_name( tag_storage_name );
01640 
01641     //  first separate the names of the tags
01642     // we assume that there are separators ":" between the tag names
01643     std::vector< std::string > tagNames;
01644     std::string separator( ":" );
01645     split_tag_names( tag_name, separator, tagNames );
01646 
01647     ErrorCode rval           = moab::MB_SUCCESS;  // assume success already :)
01648     appData& data            = context.appDatas[*pid];
01649     int already_defined_tags = 0;
01650 
01651     for( size_t i = 0; i < tagNames.size(); i++ )
01652     {
01653         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
01654                                             tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
01655 
01656         if( MB_ALREADY_ALLOCATED == rval )
01657         {
01658             std::map< std::string, Tag >& mTags        = data.tagMap;
01659             std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
01660 
01661             if( mit == mTags.end() )
01662             {
01663                 // add it to the map
01664                 mTags[tagNames[i]] = tagHandle;
01665                 // push it to the list of tags, too
01666                 *tag_index = (int)data.tagList.size();
01667                 data.tagList.push_back( tagHandle );
01668             }
01669             rval = MB_SUCCESS;
01670             already_defined_tags++;
01671         }
01672         else if( MB_SUCCESS == rval )
01673         {
01674             data.tagMap[tagNames[i]] = tagHandle;
01675             *tag_index               = (int)data.tagList.size();
01676             data.tagList.push_back( tagHandle );
01677         }
01678         else
01679         {
01680             rval = moab::MB_FAILURE;  // some tags were not created
01681         }
01682     }
01683     // we don't need default values anymore, avoid leaks
01684     int rankHere = 0;
01685 #ifdef MOAB_HAVE_MPI
01686     ParallelComm* pco = context.pcomms[*pid];
01687     rankHere          = pco->rank();
01688 #endif
01689     if( !rankHere && already_defined_tags )
01690         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
01691                   << " has " << already_defined_tags << " already defined tags out of " << tagNames.size()
01692                   << " tags \n";
01693     delete[] defInt;
01694     delete[] defDouble;
01695     delete[] defHandle;
01696     return rval;
01697 }
01698 
01699 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid,
01700                                 const iMOAB_String tag_storage_name,
01701                                 int* num_tag_storage_length,
01702                                 int* ent_type,
01703                                 int* tag_storage_data )
01704 {
01705     std::string tag_name( tag_storage_name );
01706 
01707     // Get the application data
01708     appData& data = context.appDatas[*pid];
01709 
01710     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01711     {
01712         return moab::MB_FAILURE;
01713     }  // tag not defined
01714 
01715     Tag tag = data.tagMap[tag_name];
01716 
01717     int tagLength  = 0;
01718     ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01719 
01720     DataType dtype;
01721     rval = context.MBI->tag_get_data_type( tag, dtype );
01722 
01723     if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
01724     {
01725         return moab::MB_FAILURE;
01726     }
01727 
01728     // set it on a subset of entities, based on type and length
01729     Range* ents_to_set;
01730     if( *ent_type == TAG_VERTEX )  // vertices
01731     {
01732         ents_to_set = &data.all_verts;
01733     }
01734     else  // if (*ent_type == TAG_ELEMENT) // *ent_type can be TAG_VERTEX or TAG_ELEMENT
01735     {
01736         ents_to_set = &data.primary_elems;
01737     }
01738 
01739     int nents_to_be_set = *num_tag_storage_length / tagLength;
01740 
01741     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01742     {
01743         return moab::MB_FAILURE;
01744     }  // to many entities to be set or too few
01745 
01746     // restrict the range; everything is contiguous; or not?
01747 
01748     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01749     // 1 ) );
01750     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
01751 
01752     return moab::MB_SUCCESS;  // no error
01753 }
01754 
01755 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid,
01756                                 const iMOAB_String tag_storage_name,
01757                                 int* num_tag_storage_length,
01758                                 int* ent_type,
01759                                 int* tag_storage_data )
01760 {
01761     ErrorCode rval;
01762     std::string tag_name( tag_storage_name );
01763 
01764     appData& data = context.appDatas[*pid];
01765 
01766     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01767     {
01768         return moab::MB_FAILURE;
01769     }  // tag not defined
01770 
01771     Tag tag = data.tagMap[tag_name];
01772 
01773     int tagLength = 0;
01774     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01775 
01776     DataType dtype;
01777     rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01778 
01779     if( dtype != MB_TYPE_INTEGER )
01780     {
01781         return moab::MB_FAILURE;
01782     }
01783 
01784     // set it on a subset of entities, based on type and length
01785     Range* ents_to_get;
01786     if( *ent_type == TAG_VERTEX )  // vertices
01787     {
01788         ents_to_get = &data.all_verts;
01789     }
01790     else  // if (*ent_type == TAG_ELEMENT)
01791     {
01792         ents_to_get = &data.primary_elems;
01793     }
01794 
01795     int nents_to_get = *num_tag_storage_length / tagLength;
01796 
01797     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01798     {
01799         return moab::MB_FAILURE;
01800     }  // to many entities to get, or too little
01801 
01802     // restrict the range; everything is contiguous; or not?
01803     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01804     // ) );
01805 
01806     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
01807 
01808     return moab::MB_SUCCESS;  // no error
01809 }
01810 
01811 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid,
01812                                    const iMOAB_String tag_storage_names,
01813                                    int* num_tag_storage_length,
01814                                    int* ent_type,
01815                                    double* tag_storage_data )
01816 {
01817     ErrorCode rval;
01818     std::string tag_names( tag_storage_names );
01819     // exactly the same code as for int tag :) maybe should check the type of tag too
01820     std::vector< std::string > tagNames;
01821     std::vector< Tag > tagHandles;
01822     std::string separator( ":" );
01823     split_tag_names( tag_names, separator, tagNames );
01824 
01825     appData& data      = context.appDatas[*pid];
01826     Range* ents_to_set = NULL;
01827 
01828     if( *ent_type == TAG_VERTEX )  // vertices
01829     {
01830         ents_to_set = &data.all_verts;
01831     }
01832     else // TAG_ELEMENT
01833     {
01834         ents_to_set = &data.primary_elems;
01835     }
01836 
01837     int nents_to_be_set = (int)( *ents_to_set ).size();
01838     int position        = 0;
01839 
01840     for( size_t i = 0; i < tagNames.size(); i++ )
01841     {
01842         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01843         {
01844             return moab::MB_FAILURE;
01845         }  // some tag not defined yet in the app
01846 
01847         Tag tag = data.tagMap[tagNames[i]];
01848 
01849         int tagLength = 0;
01850         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01851 
01852         DataType dtype;
01853         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01854 
01855         if( dtype != MB_TYPE_DOUBLE )
01856         {
01857             return moab::MB_FAILURE;
01858         }
01859 
01860         // set it on the subset of entities, based on type and length
01861         if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
01862             return moab::MB_FAILURE;  // too many entity values to be set
01863 
01864         rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
01865         // increment position to next tag
01866         position = position + tagLength * nents_to_be_set;
01867     }
01868     return moab::MB_SUCCESS;  // no error
01869 }
01870 
01871 ErrCode iMOAB_SetDoubleTagStorageWithGid( iMOAB_AppID pid,
01872                                           const iMOAB_String tag_storage_names,
01873                                           int* num_tag_storage_length,
01874                                           int* ent_type,
01875                                           double* tag_storage_data,
01876                                           int* globalIds )
01877 {
01878     ErrorCode rval;
01879     std::string tag_names( tag_storage_names );
01880     // exactly the same code as for int tag :) maybe should check the type of tag too
01881     std::vector< std::string > tagNames;
01882     std::vector< Tag > tagHandles;
01883     std::string separator( ":" );
01884     split_tag_names( tag_names, separator, tagNames );
01885 
01886     appData& data      = context.appDatas[*pid];
01887     Range* ents_to_set = NULL;
01888 
01889     if( *ent_type == TAG_VERTEX )  // vertices
01890     {
01891         ents_to_set = &data.all_verts;
01892     }
01893     else // TAG_ELEMENT
01894     {
01895         ents_to_set = &data.primary_elems;
01896     }
01897 
01898     int nents_to_be_set = (int)( *ents_to_set ).size();
01899 
01900     Tag gidTag = context.MBI->globalId_tag();
01901     std::vector< int > gids;
01902     gids.resize( nents_to_be_set );
01903     rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
01904 
01905     // so we will need to set the tags according to the global id passed;
01906     // so the order in tag_storage_data is the same as the order in globalIds, but the order
01907     // in local range is gids
01908     std::map< int, EntityHandle > eh_by_gid;
01909     int i = 0;
01910     for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
01911     {
01912         eh_by_gid[gids[i]] = *it;
01913     }
01914 
01915     std::vector< int > tagLengths( tagNames.size() );
01916     std::vector< Tag > tagList;
01917     size_t total_tag_len = 0;
01918     for( size_t i = 0; i < tagNames.size(); i++ )
01919     {
01920         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01921         {
01922             MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
01923         }  // some tag not defined yet in the app
01924 
01925         Tag tag = data.tagMap[tagNames[i]];
01926         tagList.push_back( tag );
01927 
01928         int tagLength = 0;
01929         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01930 
01931         total_tag_len += tagLength;
01932         tagLengths[i] = tagLength;
01933         DataType dtype;
01934         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01935 
01936         if( dtype != MB_TYPE_DOUBLE )
01937         {
01938             MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
01939         }
01940     }
01941     bool serial = true;
01942 #ifdef MOAB_HAVE_MPI
01943     ParallelComm* pco  = context.pcomms[*pid];
01944     unsigned num_procs = pco->size();
01945     if( num_procs > 1 ) serial = false;
01946 #endif
01947 
01948     if( serial )
01949     {
01950         assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
01951         // tags are unrolled, we loop over global ids first, then careful about tags
01952         for( int i = 0; i < nents_to_be_set; i++ )
01953         {
01954             int gid         = globalIds[i];
01955             EntityHandle eh = eh_by_gid[gid];
01956             // now loop over tags
01957             int indexInTagValues = 0;  //
01958             for( size_t j = 0; j < tagList.size(); j++ )
01959             {
01960                 indexInTagValues += i * tagLengths[j];
01961                 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
01962                 // advance the pointer/index
01963                 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];  // at the end of tag data
01964             }
01965         }
01966     }
01967 #ifdef MOAB_HAVE_MPI
01968     else  // it can be not serial only if pco->size() > 1, parallel
01969     {
01970         // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
01971         // we will create first a tuple to rendevous points, then from there send to the processor that requested it
01972         // it is a 2-hop global gather scatter
01973         int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
01974         assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
01975         TupleList TLsend;
01976         TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals );  //  to proc, marker(gid), total_tag_len doubles
01977         TLsend.enableWriteAccess();
01978         // the processor id that processes global_id is global_id / num_ents_per_proc
01979 
01980         int indexInRealLocal = 0;
01981         for( int i = 0; i < nbLocalVals; i++ )
01982         {
01983             // to proc, marker, element local index, index in el
01984             int marker              = globalIds[i];
01985             int to_proc             = marker % num_procs;
01986             int n                   = TLsend.get_n();
01987             TLsend.vi_wr[2 * n]     = to_proc;  // send to processor
01988             TLsend.vi_wr[2 * n + 1] = marker;
01989             int indexInTagValues    = 0;
01990             // tag data collect by number of tags
01991             for( size_t j = 0; j < tagList.size(); j++ )
01992             {
01993                 indexInTagValues += i * tagLengths[j];
01994                 for( int k = 0; k < tagLengths[j]; k++ )
01995                 {
01996                     TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
01997                 }
01998                 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
01999             }
02000             TLsend.inc_n();
02001         }
02002 
02003         assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
02004         // send now requests, basically inform the rendez-vous point who needs a particular global id
02005         // send the data to the other processors:
02006         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
02007         TupleList TLreq;
02008         TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
02009         TLreq.enableWriteAccess();
02010         for( int i = 0; i < nents_to_be_set; i++ )
02011         {
02012             // to proc, marker
02013             int marker             = gids[i];
02014             int to_proc            = marker % num_procs;
02015             int n                  = TLreq.get_n();
02016             TLreq.vi_wr[2 * n]     = to_proc;  // send to processor
02017             TLreq.vi_wr[2 * n + 1] = marker;
02018             // tag data collect by number of tags
02019             TLreq.inc_n();
02020         }
02021         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
02022 
02023         // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
02024         // we should first order by global id, and then build the new TL with send to proc, global id and
02025         // tags for it
02026         // sort by global ids the tuple lists
02027         moab::TupleList::buffer sort_buffer;
02028         sort_buffer.buffer_init( TLreq.get_n() );
02029         TLreq.sort( 1, &sort_buffer );
02030         sort_buffer.reset();
02031         sort_buffer.buffer_init( TLsend.get_n() );
02032         TLsend.sort( 1, &sort_buffer );
02033         sort_buffer.reset();
02034         // now send the tag values to the proc that requested it
02035         // in theory, for a full  partition, TLreq  and TLsend should have the same size, and
02036         // each dof should have exactly one target proc. Is that true or not in general ?
02037         // how do we plan to use this? Is it better to store the comm graph for future
02038 
02039         // start copy from comm graph settle
02040         TupleList TLBack;
02041         TLBack.initialize( 3, 0, 0, total_tag_len, 0 );  // to proc, marker, tag from proc , tag values
02042         TLBack.enableWriteAccess();
02043 
02044         int n1 = TLreq.get_n();
02045         int n2 = TLsend.get_n();
02046 
02047         int indexInTLreq  = 0;
02048         int indexInTLsend = 0;  // advance both, according to the marker
02049         if( n1 > 0 && n2 > 0 )
02050         {
02051 
02052             while( indexInTLreq < n1 && indexInTLsend < n2 )  // if any is over, we are done
02053             {
02054                 int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
02055                 int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
02056                 if( currentValue1 < currentValue2 )
02057                 {
02058                     // we have a big problem; basically, we are saying that
02059                     // dof currentValue is on one model and not on the other
02060                     // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
02061                     indexInTLreq++;
02062                     continue;
02063                 }
02064                 if( currentValue1 > currentValue2 )
02065                 {
02066                     // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
02067                     indexInTLsend++;
02068                     continue;
02069                 }
02070                 int size1 = 1;
02071                 int size2 = 1;
02072                 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
02073                     size1++;
02074                 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
02075                     size2++;
02076                 // must be found in both lists, find the start and end indices
02077                 for( int i1 = 0; i1 < size1; i1++ )
02078                 {
02079                     for( int i2 = 0; i2 < size2; i2++ )
02080                     {
02081                         // send the info back to components
02082                         int n = TLBack.get_n();
02083                         TLBack.reserve();
02084                         TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )];  // send back to the proc marker
02085                                                                                        // came from, info from comp2
02086                         TLBack.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend, just for verif ?)
02087                         TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )];  // from proc on comp2
02088                         // also fill tag values
02089                         for( size_t k = 0; k < total_tag_len; k++ )
02090                         {
02091                             TLBack.vr_rd[total_tag_len * n + k] =
02092                                 TLsend.vr_rd[total_tag_len * indexInTLsend + k];  // deep copy of tag values
02093                         }
02094                     }
02095                 }
02096                 indexInTLreq += size1;
02097                 indexInTLsend += size2;
02098             }
02099         }
02100         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
02101         // end copy from comm graph
02102         // after we are done sending, we need to set those tag values, in a reverse process compared to send
02103         n1             = TLBack.get_n();
02104         double* ptrVal = &TLBack.vr_rd[0];  //
02105         for( int i = 0; i < n1; i++ )
02106         {
02107             int gid         = TLBack.vi_rd[3 * i + 1];  // marker
02108             EntityHandle eh = eh_by_gid[gid];
02109             // now loop over tags
02110 
02111             for( size_t j = 0; j < tagList.size(); j++ )
02112             {
02113                 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
02114                 // advance the pointer/index
02115                 ptrVal += tagLengths[j];  // at the end of tag data per call
02116             }
02117         }
02118     }
02119 #endif
02120     return MB_SUCCESS;
02121 }
02122 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid,
02123                                    const iMOAB_String tag_storage_names,
02124                                    int* num_tag_storage_length,
02125                                    int* ent_type,
02126                                    double* tag_storage_data )
02127 {
02128     ErrorCode rval;
02129     // exactly the same code, except tag type check
02130     std::string tag_names( tag_storage_names );
02131     // exactly the same code as for int tag :) maybe should check the type of tag too
02132     std::vector< std::string > tagNames;
02133     std::vector< Tag > tagHandles;
02134     std::string separator( ":" );
02135     split_tag_names( tag_names, separator, tagNames );
02136 
02137     appData& data = context.appDatas[*pid];
02138 
02139     // set it on a subset of entities, based on type and length
02140     Range* ents_to_get = NULL;
02141     if( *ent_type == TAG_VERTEX )  // vertices
02142     {
02143         ents_to_get = &data.all_verts;
02144     }
02145     else // TAG_ELEMENT
02146     {
02147         ents_to_get = &data.primary_elems;
02148     }
02149 
02150     int nents_to_get = (int)ents_to_get->size();
02151     int position     = 0;
02152     for( size_t i = 0; i < tagNames.size(); i++ )
02153     {
02154         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
02155         {
02156             return moab::MB_FAILURE;
02157         }  // tag not defined
02158 
02159         Tag tag = data.tagMap[tagNames[i]];
02160 
02161         int tagLength = 0;
02162         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
02163 
02164         DataType dtype;
02165         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
02166 
02167         if( dtype != MB_TYPE_DOUBLE )
02168         {
02169             return moab::MB_FAILURE;
02170         }
02171 
02172         if( position + nents_to_get * tagLength > *num_tag_storage_length )
02173             return moab::MB_FAILURE;  // too many entity values to get
02174 
02175         rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
02176         position = position + nents_to_get * tagLength;
02177     }
02178 
02179     return moab::MB_SUCCESS;  // no error
02180 }
02181 
02182 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
02183 {
02184 #ifdef MOAB_HAVE_MPI
02185     appData& data = context.appDatas[*pid];
02186     Range ent_exchange;
02187     std::vector< Tag > tags;
02188 
02189     for( int i = 0; i < *num_tag; i++ )
02190     {
02191         if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
02192         {
02193             return moab::MB_FAILURE;
02194         }  // error in tag index
02195 
02196         tags.push_back( data.tagList[tag_indices[i]] );
02197     }
02198 
02199     if( *ent_type == 0 )
02200     {
02201         ent_exchange = data.all_verts;
02202     }
02203     else if( *ent_type == 1 )
02204     {
02205         ent_exchange = data.primary_elems;
02206     }
02207     else
02208     {
02209         return moab::MB_FAILURE;
02210     }  // unexpected type
02211 
02212     ParallelComm* pco = context.pcomms[*pid];
02213 
02214     ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
02215 
02216 #else
02217     /* do nothing if serial */
02218     // just silence the warning
02219     // do not call sync tags in serial!
02220     int k = *pid + *num_tag + *tag_indices + *ent_type;
02221     k++;
02222 #endif
02223 
02224     return moab::MB_SUCCESS;
02225 }
02226 
02227 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
02228 {
02229 #ifdef MOAB_HAVE_MPI
02230     appData& data = context.appDatas[*pid];
02231     Range ent_exchange;
02232 
02233     if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
02234     {
02235         return moab::MB_FAILURE;
02236     }  // error in tag index
02237 
02238     Tag tagh = data.tagList[*tag_index];
02239 
02240     if( *ent_type == 0 )
02241     {
02242         ent_exchange = data.all_verts;
02243     }
02244     else if( *ent_type == 1 )
02245     {
02246         ent_exchange = data.primary_elems;
02247     }
02248     else
02249     {
02250         return moab::MB_FAILURE;
02251     }  // unexpected type
02252 
02253     ParallelComm* pco = context.pcomms[*pid];
02254     // we could do different MPI_Op; do not bother now, we will call from fortran
02255     ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
02256 
02257 #else
02258     /* do nothing if serial */
02259     // just silence the warning
02260     // do not call sync tags in serial!
02261     int k = *pid + *tag_index + *ent_type;
02262     k++;  // just do junk, to avoid complaints
02263 #endif
02264     return moab::MB_SUCCESS;
02265 }
02266 
02267 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid,
02268                                    iMOAB_LocalID* local_index,
02269                                    int* num_adjacent_elements,
02270                                    iMOAB_LocalID* adjacent_element_IDs )
02271 {
02272     ErrorCode rval;
02273 
02274     // one neighbor for each subentity of dimension-1
02275     MeshTopoUtil mtu( context.MBI );
02276     appData& data   = context.appDatas[*pid];
02277     EntityHandle eh = data.primary_elems[*local_index];
02278     Range adjs;
02279     rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
02280 
02281     if( *num_adjacent_elements < (int)adjs.size() )
02282     {
02283         return moab::MB_FAILURE;
02284     }  // not dimensioned correctly
02285 
02286     *num_adjacent_elements = (int)adjs.size();
02287 
02288     for( int i = 0; i < *num_adjacent_elements; i++ )
02289     {
02290         adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
02291     }
02292 
02293     return moab::MB_SUCCESS;
02294 }
02295 
02296 #if 0
02297 
02298 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
02299 {
02300     return moab::MB_SUCCESS;
02301 }
02302 
02303 #endif
02304 
02305 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
02306 {
02307     ErrorCode rval;
02308     appData& data = context.appDatas[*pid];
02309 
02310     if( !data.local_verts.empty() )  // we should have no vertices in the app
02311     {
02312         return moab::MB_FAILURE;
02313     }
02314 
02315     int nverts = *coords_len / *dim;
02316 
02317     rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
02318 
02319     rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
02320 
02321     // also add the vertices to the all_verts range
02322     data.all_verts.merge( data.local_verts );
02323     return moab::MB_SUCCESS;
02324 }
02325 
02326 ErrCode iMOAB_CreateElements( iMOAB_AppID pid,
02327                               int* num_elem,
02328                               int* type,
02329                               int* num_nodes_per_element,
02330                               int* connectivity,
02331                               int* block_ID )
02332 {
02333     // Create elements
02334     appData& data = context.appDatas[*pid];
02335 
02336     ReadUtilIface* read_iface;
02337     ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
02338 
02339     EntityType mbtype = ( EntityType )( *type );
02340     EntityHandle actual_start_handle;
02341     EntityHandle* array = NULL;
02342     rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
02343 
02344     // fill up with actual connectivity from input; assume the vertices are in order, and start
02345     // vertex is the first in the current data vertex range
02346     EntityHandle firstVertex = data.local_verts[0];
02347 
02348     for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
02349     {
02350         array[j] = connectivity[j] + firstVertex - 1;
02351     }  // assumes connectivity uses 1 based array (from fortran, mostly)
02352 
02353     Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
02354 
02355     rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
02356 
02357     data.primary_elems.merge( new_elems );
02358 
02359     // add to adjacency
02360     rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
02361     // organize all new elements in block, with the given block ID; if the block set is not
02362     // existing, create  a new mesh set;
02363     Range sets;
02364     int set_no            = *block_ID;
02365     const void* setno_ptr = &set_no;
02366     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
02367                                                       sets );
02368     EntityHandle block_set;
02369 
02370     if( MB_FAILURE == rval || sets.empty() )
02371     {
02372         // create a new set, with this block ID
02373         rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
02374 
02375         rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
02376 
02377         // add the material set to file set
02378         rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
02379     }
02380     else
02381     {
02382         block_set = sets[0];
02383     }  // first set is the one we want
02384 
02385     /// add the new ents to the clock set
02386     rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
02387 
02388     return moab::MB_SUCCESS;
02389 }
02390 
02391 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02392 {
02393     appData& data            = context.appDatas[*pid];
02394     data.num_global_vertices = *num_global_verts;
02395     data.num_global_elements = *num_global_elems;
02396     return moab::MB_SUCCESS;
02397 }
02398 
02399 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02400 {
02401     appData& data = context.appDatas[*pid];
02402     if( NULL != num_global_verts )
02403     {
02404         *num_global_verts = data.num_global_vertices;
02405     }
02406     if( NULL != num_global_elems )
02407     {
02408         *num_global_elems = data.num_global_elements;
02409     }
02410 
02411     return moab::MB_SUCCESS;
02412 }
02413 
02414 #ifdef MOAB_HAVE_MPI
02415 
02416 // this makes sense only for parallel runs
02417 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
02418 {
02419     appData& data     = context.appDatas[*pid];
02420     ParallelComm* pco = context.pcomms[*pid];
02421     EntityHandle cset = data.file_set;
02422     int dum_id        = 0;
02423     ErrorCode rval;
02424     if( data.primary_elems.empty() )
02425     {
02426         // skip actual resolve, assume vertices are distributed already ,
02427         // no need to share them
02428     }
02429     else
02430     {
02431         // create an integer tag for resolving ; maybe it can be a long tag in the future
02432         // (more than 2 B vertices;)
02433 
02434         Tag stag;
02435         rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
02436                                             &dum_id );MB_CHK_ERR( rval );
02437 
02438         if( *num_verts > (int)data.local_verts.size() )
02439         {
02440             return moab::MB_FAILURE;
02441         }  // we are not setting the size
02442 
02443         rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval );  // assumes integer tag
02444 
02445         rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
02446 
02447         rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
02448     }
02449     // provide partition tag equal to rank
02450     Tag part_tag;
02451     dum_id = -1;
02452     rval   = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02453                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02454 
02455     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02456     {
02457         std::cout << " can't get par part tag.\n";
02458         return moab::MB_FAILURE;
02459     }
02460 
02461     int rank = pco->rank();
02462     rval     = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
02463 
02464     return moab::MB_SUCCESS;
02465 }
02466 
02467 // this assumes that this was not called before
02468 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
02469 {
02470     ErrorCode rval;
02471 
02472     // verify we have valid ghost layers input specified. If invalid, exit quick.
02473     if( *num_ghost_layers <= 0 )
02474     {
02475         return moab::MB_SUCCESS;
02476     }  // nothing to do
02477 
02478     appData& data     = context.appDatas[*pid];
02479     ParallelComm* pco = context.pcomms[*pid];
02480 
02481     int addl_ents =
02482         0;  // maybe we should be passing this too; most of the time we do not need additional ents collective call
02483     rval =
02484         pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
02485 
02486     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
02487     return iMOAB_UpdateMeshInfo( pid );
02488 }
02489 
02490 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
02491 {
02492     assert( join != nullptr );
02493     assert( receivingGroup != nullptr );
02494     assert( rcompid != nullptr );
02495 
02496     ErrorCode rval;
02497     int ierr;
02498     appData& data     = context.appDatas[*pid];
02499     ParallelComm* pco = context.pcomms[*pid];
02500 
02501     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02502     MPI_Group recvGroup =
02503         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
02504     MPI_Comm sender = pco->comm();  // the sender comm is obtained from parallel comm in moab; no need to pass it along
02505     // first see what are the processors in each group; get the sender group too, from the sender communicator
02506     MPI_Group senderGroup;
02507     ierr = MPI_Comm_group( sender, &senderGroup );
02508     if( ierr != 0 ) return moab::MB_FAILURE;
02509 
02510     // instantiate the par comm graph
02511     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02512     // int coid2)
02513     ParCommGraph* cgraph =
02514         new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
02515     // we should search if we have another pcomm with the same comp ids in the list already
02516     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02517     context.appDatas[*pid].pgraph[*rcompid] = cgraph;
02518 
02519     int sender_rank = -1;
02520     MPI_Comm_rank( sender, &sender_rank );
02521 
02522     // decide how to distribute elements to each processor
02523     // now, get the entities on local processor, and pack them into a buffer for various processors
02524     // we will do trivial partition: first get the total number of elements from "sender"
02525     std::vector< int > number_elems_per_part;
02526     // how to distribute local elements to receiving tasks?
02527     // trivial partition: compute first the total number of elements need to be sent
02528     Range owned = context.appDatas[*pid].owned_elems;
02529     std::vector< char > zoltanBuffer;
02530     if( owned.size() == 0 )
02531     {
02532         // must be vertices that we want to send then
02533         owned = context.appDatas[*pid].local_verts;
02534         // we should have some vertices here
02535     }
02536 
02537     if( *method == 0 )  // trivial partitioning, old method
02538     {
02539         int local_owned_elem = (int)owned.size();
02540         int size             = pco->size();
02541         int rank             = pco->rank();
02542         number_elems_per_part.resize( size );  //
02543         number_elems_per_part[rank] = local_owned_elem;
02544 #if( MPI_VERSION >= 2 )
02545         // Use "in place" option
02546         ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
02547 #else
02548         {
02549             std::vector< int > all_tmp( size );
02550             ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
02551             number_elems_per_part = all_tmp;
02552         }
02553 #endif
02554 
02555         if( ierr != 0 )
02556         {
02557             return moab::MB_FAILURE;
02558         }
02559 
02560         // every sender computes the trivial partition, it is cheap, and we need to send it anyway
02561         // to each sender
02562         rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
02563 
02564         // nothing in z buff, just to compile
02565         rval = cgraph->send_graph( global, zoltanBuffer );MB_CHK_ERR( rval );
02566     }
02567 #ifdef MOAB_HAVE_ZOLTAN
02568     else  // *method != 0, so it is either graph or geometric, parallel
02569     {
02570         // it is assumed the method 4 was called in advance
02571         if( *method == 5 )
02572         {
02573             zoltanBuffer = context.uniqueZoltanBuffer;
02574         }
02575         rval = cgraph->compute_partition( pco, owned, *method, zoltanBuffer );MB_CHK_ERR( rval );
02576         // basically, send the graph to the receiver side, with unblocking send
02577         rval = cgraph->send_graph_partition( pco, global, zoltanBuffer );MB_CHK_ERR( rval );
02578     }
02579 #endif  //  #ifdef MOAB_HAVE_ZOLTAN
02580     // pco is needed to pack, not for communication
02581     rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
02582 
02583     // mark for deletion
02584     MPI_Group_free( &senderGroup );
02585     return moab::MB_SUCCESS;
02586 }
02587 
02588 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
02589 {
02590     assert( join != nullptr );
02591     assert( sendingGroup != nullptr );
02592     assert( scompid != nullptr );
02593 
02594     ErrorCode rval;
02595     appData& data          = context.appDatas[*pid];
02596     ParallelComm* pco      = context.pcomms[*pid];
02597     MPI_Comm receive       = pco->comm();
02598     EntityHandle local_set = data.file_set;
02599 
02600     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02601     MPI_Group sendGroup =
02602         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
02603 
02604     // first see what are the processors in each group; get the sender group too, from the sender
02605     // communicator
02606     MPI_Group receiverGroup;
02607     int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
02608 
02609     // instantiate the par comm graph
02610     ParCommGraph* cgraph =
02611         new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
02612     // TODO we should search if we have another pcomm with the same comp ids in the list already
02613     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02614     context.appDatas[*pid].pgraph[*scompid] = cgraph;
02615 
02616     int receiver_rank = -1;
02617     MPI_Comm_rank( receive, &receiver_rank );
02618 
02619     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
02620     // knows what data to expect
02621     std::vector< int > pack_array;
02622     std::vector< char > zoltanBuff;
02623     rval = cgraph->receive_comm_graph( global, pco, pack_array, zoltanBuff );MB_CHK_ERR( rval );
02624 
02625     // senders across for the current receiver
02626     int current_receiver = cgraph->receiver( receiver_rank );
02627 #ifdef MOAB_HAVE_ZOLTAN
02628     if( zoltanBuff.size() > 0 )  // it could happen only on root of receiver; store it in a global context member
02629         // the corresponding send it with method 4, for sure
02630         context.uniqueZoltanBuffer = zoltanBuff;
02631 #endif
02632     std::vector< int > senders_local;
02633     size_t n = 0;
02634 
02635     while( n < pack_array.size() - 1 )  // now, the last int was already processed, if it has zoltan or not
02636     {
02637         if( current_receiver == pack_array[n] )
02638         {
02639             for( int j = 0; j < pack_array[n + 1]; j++ )
02640             {
02641                 senders_local.push_back( pack_array[n + 2 + j] );
02642             }
02643 
02644             break;
02645         }
02646 
02647         n = n + 2 + pack_array[n + 1];
02648     }
02649 
02650 #ifdef VERBOSE
02651     std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
02652               << senders_local.size() << " tasks: ";
02653 
02654     for( int k = 0; k < (int)senders_local.size(); k++ )
02655     {
02656         std::cout << " " << senders_local[k];
02657     }
02658 
02659     std::cout << "\n";
02660 #endif
02661 
02662     if( senders_local.empty() )
02663     {
02664         std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
02665     }
02666     rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
02667 
02668     // after we are done, we could merge vertices that come from different senders, but
02669     // have the same global id
02670     Tag idtag;
02671     rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
02672 
02673     //   data.point_cloud = false;
02674     Range local_ents;
02675     rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
02676 
02677     // do not do merge if point cloud
02678     if( !local_ents.empty() )
02679     {
02680         if( !local_ents.all_of_type( MBVERTEX ) )
02681         {
02682             if( (int)senders_local.size() >= 2 )  // need to remove duplicate vertices
02683             // that might come from different senders
02684             {
02685 
02686                 Range local_verts = local_ents.subset_by_type( MBVERTEX );
02687                 Range local_elems = subtract( local_ents, local_verts );
02688 
02689                 // remove from local set the vertices
02690                 rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
02691 
02692 #ifdef VERBOSE
02693                 std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
02694 #endif
02695                 MergeMesh mm( context.MBI );
02696 
02697                 rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
02698 
02699                 Range new_verts;  // local elems are local entities without vertices
02700                 rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
02701 
02702 #ifdef VERBOSE
02703                 std::cout << "after merging: new verts: " << new_verts.size() << "\n";
02704 #endif
02705                 rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
02706             }
02707         }
02708         else
02709             data.point_cloud = true;
02710     }
02711     // it could be point cloud empty ?
02712     if( !data.point_cloud )
02713     {
02714         // still need to resolve shared entities (in this case, vertices )
02715         rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
02716     }
02717     else
02718     {
02719         // if partition tag exists, set it to current rank; just to make it visible in VisIt
02720         Tag densePartTag;
02721         rval = context.MBI->tag_get_handle( "partition", densePartTag );
02722         if( NULL != densePartTag && MB_SUCCESS == rval )
02723         {
02724             Range local_verts;
02725             rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
02726             std::vector< int > vals;
02727             int rank = pco->rank();
02728             vals.resize( local_verts.size(), rank );
02729             rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
02730         }
02731     }
02732     // set the parallel partition tag
02733     Tag part_tag;
02734     int dum_id = -1;
02735     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02736                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02737 
02738     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02739     {
02740         std::cout << " can't get par part tag.\n";
02741         return moab::MB_FAILURE;
02742     }
02743 
02744     int rank = pco->rank();
02745     rval     = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
02746 
02747     // populate the mesh with current data info
02748     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02749 
02750     // mark for deletion
02751     MPI_Group_free( &receiverGroup );
02752 
02753     return moab::MB_SUCCESS;
02754 }
02755 
02756 // need to send the zoltan buffer from coupler root towards the component root
02757 ErrCode iMOAB_RetrieveZBuffer( MPI_Group* cmpGrp, MPI_Group* cplGrp, MPI_Comm* joint, int* is_fortran )
02758 {
02759     // need to use MPI_Group_translate_ranks to find the rank of sender root and the rank of receiver root,
02760     // in the joint communicator
02761     assert( cmpGrp != nullptr );
02762     assert( cplGrp != nullptr );
02763     assert( joint != nullptr );
02764 
02765     MPI_Group cmpGroup = ( *is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( cmpGrp ) ) : *cmpGrp );
02766     MPI_Group cplGroup = ( *is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( cplGrp ) ) : *cplGrp );
02767     MPI_Comm jointComm = ( *is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( joint ) ) : *joint );
02768 
02769     int rootRank        = 0;
02770     int rankCompRoot    = -1;  // this will receive the buffer
02771     int rankCouplerRoot = -1;  // this will send the coupler
02772     int rank, ierr;
02773     MPI_Group global_grp;
02774     MPI_Comm_group( jointComm, &global_grp );
02775     MPI_Group_translate_ranks( cmpGroup, 1, &rootRank, global_grp, &rankCompRoot );
02776     MPI_Group_translate_ranks( cplGroup, 1, &rootRank, global_grp, &rankCouplerRoot );
02777     MPI_Comm_rank( jointComm, &rank );
02778     MPI_Group_free( &global_grp );  // we do not need the global group anymore
02779 #ifdef MOAB_HAVE_ZOLTAN
02780     // send from root of coupler, towards the root of component
02781     if( rankCouplerRoot != rankCompRoot )  // we do not send / receive if we do not have to; it is a blocking send/recv
02782     {
02783         if( rank == rankCouplerRoot )
02784         {
02785             // do a send of context.uniqueZoltanBuffer, towards the rankCompRoot
02786             ierr = MPI_Send( &context.uniqueZoltanBuffer[0], (int)context.uniqueZoltanBuffer.size(), MPI_CHAR,
02787                              rankCompRoot, 123, jointComm );CHK_MPI_ERR( ierr );
02788         }
02789         if( rank == rankCompRoot )
02790         {
02791             // first a probe, then a MPI_Recv
02792             MPI_Status status;
02793             ierr = MPI_Probe( rankCouplerRoot, 123, jointComm, &status );CHK_MPI_ERR( ierr );
02794             // get the count of data received from the MPI_Status structure
02795             int size_buff;
02796             ierr = MPI_Get_count( &status, MPI_CHAR, &size_buff );CHK_MPI_ERR( ierr );
02797 
02798             context.uniqueZoltanBuffer.resize( size_buff );
02799             ierr = MPI_Recv( &context.uniqueZoltanBuffer[0], size_buff, MPI_CHAR, rankCouplerRoot, 123, jointComm,
02800                              &status );CHK_MPI_ERR( ierr );
02801         }
02802     }
02803 #endif
02804     return moab::MB_SUCCESS;
02805 }
02806 
02807 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02808 {
02809 
02810     appData& data                               = context.appDatas[*pid];
02811     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02812     if( mt == data.pgraph.end() )
02813     {
02814         return moab::MB_FAILURE;
02815     }
02816     ParCommGraph* cgraph = mt->second;
02817     ParallelComm* pco    = context.pcomms[*pid];
02818     Range owned          = data.owned_elems;
02819     ErrorCode rval;
02820     EntityHandle cover_set;
02821 
02822     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02823     if( data.point_cloud )
02824     {
02825         owned = data.local_verts;
02826     }
02827 
02828     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02829     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02830     // from ints remapper
02831 #ifdef MOAB_HAVE_TEMPESTREMAP
02832     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02833     {
02834         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02835         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02836         // should still have only quads ?
02837     }
02838 #else
02839     // now, in case we are sending from intx between ocn and atm, we involve coverage set
02840     // how do I know if this receiver already participated in an intersection driven by coupler?
02841     // also, what if this was the "source" mesh in intx?
02842     // in that case, the elements might have been instantiated in the coverage set locally, the
02843     // "owned" range can be different the elements are now in tempestRemap coverage_set
02844     cover_set = cgraph->get_cover_set();  // this will be non null only for intx app ?
02845 
02846     if( 0 != cover_set )
02847     {
02848         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02849     }
02850 #endif
02851 
02852     std::string tag_name( tag_storage_name );
02853 
02854     // basically, we assume everything is defined already on the tag,
02855     //   and we can get the tags just by its name
02856     // we assume that there are separators ":" between the tag names
02857     std::vector< std::string > tagNames;
02858     std::vector< Tag > tagHandles;
02859     std::string separator( ":" );
02860     split_tag_names( tag_name, separator, tagNames );
02861     for( size_t i = 0; i < tagNames.size(); i++ )
02862     {
02863         Tag tagHandle;
02864         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02865         if( MB_SUCCESS != rval || NULL == tagHandle )
02866         {
02867             std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
02868         }
02869         tagHandles.push_back( tagHandle );
02870     }
02871 
02872     // pco is needed to pack, and for moab instance, not for communication!
02873     // still use nonblocking communication, over the joint comm
02874     rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02875     // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
02876 
02877     return moab::MB_SUCCESS;
02878 }
02879 
02880 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02881 {
02882     appData& data = context.appDatas[*pid];
02883 
02884     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02885     if( mt == data.pgraph.end() )
02886     {
02887         return moab::MB_FAILURE;
02888     }
02889     ParCommGraph* cgraph = mt->second;
02890 
02891     MPI_Comm global   = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02892     ParallelComm* pco = context.pcomms[*pid];
02893     Range owned       = data.owned_elems;
02894 
02895     // how do I know if this receiver already participated in an intersection driven by coupler?
02896     // also, what if this was the "source" mesh in intx?
02897     // in that case, the elements might have been instantiated in the coverage set locally, the
02898     // "owned" range can be different the elements are now in tempestRemap coverage_set
02899     EntityHandle cover_set = cgraph->get_cover_set();
02900     ErrorCode rval;
02901     if( 0 != cover_set )
02902     {
02903         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02904     }
02905     if( data.point_cloud )
02906     {
02907         owned = data.local_verts;
02908     }
02909     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02910     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02911     // from ints remapper
02912 #ifdef MOAB_HAVE_TEMPESTREMAP
02913     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02914     {
02915         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02916         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02917         // should still have only quads ?
02918     }
02919 #endif
02920     /*
02921      * data_intx.remapper exists though only on the intersection application
02922      *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate
02923      * mesh )
02924      */
02925 
02926     std::string tag_name( tag_storage_name );
02927 
02928     // we assume that there are separators ";" between the tag names
02929     std::vector< std::string > tagNames;
02930     std::vector< Tag > tagHandles;
02931     std::string separator( ":" );
02932     split_tag_names( tag_name, separator, tagNames );
02933     for( size_t i = 0; i < tagNames.size(); i++ )
02934     {
02935         Tag tagHandle;
02936         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02937         if( MB_SUCCESS != rval || NULL == tagHandle )
02938         {
02939             std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
02940         }
02941         tagHandles.push_back( tagHandle );
02942     }
02943 
02944 #ifdef VERBOSE
02945     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
02946               << " and file set = " << ( data.file_set ) << "\n";
02947 #endif
02948     // pco is needed to pack, and for moab instance, not for communication!
02949     // still use nonblocking communication
02950     rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02951 
02952 #ifdef VERBOSE
02953     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
02954 #endif
02955 
02956     return moab::MB_SUCCESS;
02957 }
02958 
02959 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
02960 {
02961     // need first to find the pgraph that holds the information we need
02962     // this will be called on sender side only
02963     appData& data                               = context.appDatas[*pid];
02964     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02965     if( mt == data.pgraph.end() ) return moab::MB_FAILURE;  // error
02966 
02967     mt->second->release_send_buffers();
02968     return moab::MB_SUCCESS;
02969 }
02970 
02971 /**
02972 \brief compute a comm graph between 2 moab apps, based on ID matching
02973 <B>Operations:</B> Collective
02974 */
02975 //#define VERBOSE
02976 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
02977                                 iMOAB_AppID pid2,
02978                                 MPI_Comm* join,
02979                                 MPI_Group* group1,
02980                                 MPI_Group* group2,
02981                                 int* type1,
02982                                 int* type2,
02983                                 int* comp1,
02984                                 int* comp2 )
02985 {
02986     assert( join );
02987     assert( group1 );
02988     assert( group2 );
02989     ErrorCode rval = MB_SUCCESS;
02990     int localRank = 0, numProcs = 1;
02991 
02992     bool isFortran = false;
02993     if( *pid1 >= 0 ) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
02994     if( *pid2 >= 0 ) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
02995 
02996     MPI_Comm global    = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02997     MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
02998     MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
02999 
03000     MPI_Comm_rank( global, &localRank );
03001     MPI_Comm_size( global, &numProcs );
03002     // instantiate the par comm graph
03003 
03004     // we should search if we have another pcomm with the same comp ids in the list already
03005     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
03006     bool already_exists = false;
03007     if( *pid1 >= 0 )
03008     {
03009         appData& data                               = context.appDatas[*pid1];
03010         std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2 );
03011         if( mt != data.pgraph.end() ) already_exists = true;
03012     }
03013     if( *pid2 >= 0 )
03014     {
03015         appData& data                               = context.appDatas[*pid2];
03016         std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1 );
03017         if( mt != data.pgraph.end() ) already_exists = true;
03018     }
03019     // nothing to do if it already exists
03020     if( already_exists )
03021     {
03022 #ifdef VERBOSE
03023         if( !localRank )
03024             std::cout << " parcomgraph already existing between components " << *comp1 << " and " << *comp2
03025                       << ". Do not compute again\n";
03026 #endif
03027         return moab::MB_SUCCESS;
03028     }
03029 
03030     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
03031     // int coid2)
03032     ParCommGraph* cgraph = NULL;
03033     if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
03034     ParCommGraph* cgraph_rev = NULL;
03035     if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
03036 
03037     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
03038     if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
03039     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
03040     // model on the joint comm
03041     TupleList TLcomp1;
03042     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03043     TupleList TLcomp2;
03044     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03045     // will push_back a new tuple, if needed
03046 
03047     TLcomp1.enableWriteAccess();
03048 
03049     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03050     Tag gdsTag;
03051     // find the values on first cell
03052     int lenTagType1 = 1;
03053     if( 1 == *type1 || 1 == *type2 )
03054     {
03055         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03056         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03057     }
03058     Tag tagType2 = context.MBI->globalId_tag();
03059 
03060     std::vector< int > valuesComp1;
03061     // populate first tuple
03062     if( *pid1 >= 0 )
03063     {
03064         appData& data1     = context.appDatas[*pid1];
03065         EntityHandle fset1 = data1.file_set;
03066         // in case of tempest remap, get the coverage set
03067 #ifdef MOAB_HAVE_TEMPESTREMAP
03068         if( data1.tempestData.remapper != NULL )  // this is the case this is part of intx;;
03069         {
03070             fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03071             // should still have only quads ?
03072         }
03073 #endif
03074         Range ents_of_interest;
03075         if( *type1 == 1 )
03076         {
03077             assert( gdsTag );
03078             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03079             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
03080             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
03081         }
03082         else if( *type1 == 2 )
03083         {
03084             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03085             valuesComp1.resize( ents_of_interest.size() );
03086             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03087         }
03088         else if( *type1 == 3 )  // for FV meshes, just get the global id of cell
03089         {
03090             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03091             valuesComp1.resize( ents_of_interest.size() );
03092             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03093         }
03094         else
03095         {
03096             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03097         }
03098         // now fill the tuple list with info and markers
03099         // because we will send only the ids, order and compress the list
03100         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
03101         TLcomp1.resize( uniq.size() );
03102         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03103         {
03104             // to proc, marker, element local index, index in el
03105             int marker               = *sit;
03106             int to_proc              = marker % numProcs;
03107             int n                    = TLcomp1.get_n();
03108             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
03109             TLcomp1.vi_wr[2 * n + 1] = marker;
03110             TLcomp1.inc_n();
03111         }
03112     }
03113 
03114     ProcConfig pc( global );  // proc config does the crystal router
03115     pc.crystal_router()->gs_transfer( 1, TLcomp1,
03116                                       0 );  // communication towards joint tasks, with markers
03117     // sort by value (key 1)
03118 #ifdef VERBOSE
03119     std::stringstream ff1;
03120     ff1 << "TLcomp1_" << localRank << ".txt";
03121     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03122 #endif
03123     moab::TupleList::buffer sort_buffer;
03124     sort_buffer.buffer_init( TLcomp1.get_n() );
03125     TLcomp1.sort( 1, &sort_buffer );
03126     sort_buffer.reset();
03127 #ifdef VERBOSE
03128     // after sorting
03129     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03130 #endif
03131     // do the same, for the other component, number2, with type2
03132     // start copy
03133     TLcomp2.enableWriteAccess();
03134     // populate second tuple
03135     std::vector< int > valuesComp2;
03136     if( *pid2 >= 0 )
03137     {
03138         appData& data2     = context.appDatas[*pid2];
03139         EntityHandle fset2 = data2.file_set;
03140         // in case of tempest remap, get the coverage set
03141 #ifdef MOAB_HAVE_TEMPESTREMAP
03142         if( data2.tempestData.remapper != NULL )  // this is the case this is part of intx;;
03143         {
03144             fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03145             // should still have only quads ?
03146         }
03147 #endif
03148 
03149         Range ents_of_interest;
03150         if( *type2 == 1 )
03151         {
03152             assert( gdsTag );
03153             rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03154             valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
03155             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
03156         }
03157         else if( *type2 == 2 )
03158         {
03159             rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03160             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
03161             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
03162         }
03163         else if( *type2 == 3 )
03164         {
03165             rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
03166             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
03167             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
03168         }
03169         else
03170         {
03171             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2
03172         }
03173         // now fill the tuple list with info and markers
03174         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
03175         TLcomp2.resize( uniq.size() );
03176         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03177         {
03178             // to proc, marker, element local index, index in el
03179             int marker               = *sit;
03180             int to_proc              = marker % numProcs;
03181             int n                    = TLcomp2.get_n();
03182             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03183             TLcomp2.vi_wr[2 * n + 1] = marker;
03184             TLcomp2.inc_n();
03185         }
03186     }
03187     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03188                                       0 );  // communication towards joint tasks, with markers
03189     // sort by value (key 1)
03190 #ifdef VERBOSE
03191     std::stringstream ff2;
03192     ff2 << "TLcomp2_" << localRank << ".txt";
03193     TLcomp2.print_to_file( ff2.str().c_str() );
03194 #endif
03195     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03196     TLcomp2.sort( 1, &sort_buffer );
03197     sort_buffer.reset();
03198     // end copy
03199 #ifdef VERBOSE
03200     TLcomp2.print_to_file( ff2.str().c_str() );
03201 #endif
03202     // need to send back the info, from the rendezvous point, for each of the values
03203     /* so go over each value, on local process in joint communicator
03204 
03205     now have to send back the info needed for communication;
03206      loop in in sync over both TLComp1 and TLComp2, in local process;
03207       So, build new tuple lists, to send synchronous communication
03208       populate them at the same time, based on marker, that is indexed
03209     */
03210 
03211     TupleList TLBackToComp1;
03212     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03213     TLBackToComp1.enableWriteAccess();
03214 
03215     TupleList TLBackToComp2;
03216     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
03217     TLBackToComp2.enableWriteAccess();
03218 
03219     int n1 = TLcomp1.get_n();
03220     int n2 = TLcomp2.get_n();
03221 
03222     int indexInTLComp1 = 0;
03223     int indexInTLComp2 = 0;  // advance both, according to the marker
03224     if( n1 > 0 && n2 > 0 )
03225     {
03226 
03227         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
03228         {
03229             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
03230             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
03231             if( currentValue1 < currentValue2 )
03232             {
03233                 // we have a big problem; basically, we are saying that
03234                 // dof currentValue is on one model and not on the other
03235                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
03236                 indexInTLComp1++;
03237                 continue;
03238             }
03239             if( currentValue1 > currentValue2 )
03240             {
03241                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
03242                 indexInTLComp2++;
03243                 continue;
03244             }
03245             int size1 = 1;
03246             int size2 = 1;
03247             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
03248                 size1++;
03249             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
03250                 size2++;
03251             // must be found in both lists, find the start and end indices
03252             for( int i1 = 0; i1 < size1; i1++ )
03253             {
03254                 for( int i2 = 0; i2 < size2; i2++ )
03255                 {
03256                     // send the info back to components
03257                     int n = TLBackToComp1.get_n();
03258                     TLBackToComp1.reserve();
03259                     TLBackToComp1.vi_wr[3 * n] =
03260                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
03261                                                                      // came from, info from comp2
03262                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03263                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
03264                     n                              = TLBackToComp2.get_n();
03265                     TLBackToComp2.reserve();
03266                     TLBackToComp2.vi_wr[3 * n] =
03267                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
03268                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03269                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
03270                     // what if there are repeated markers in TLcomp2? increase just index2
03271                 }
03272             }
03273             indexInTLComp1 += size1;
03274             indexInTLComp2 += size2;
03275         }
03276     }
03277     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
03278     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
03279 
03280     if( *pid1 >= 0 )
03281     {
03282         // we are on original comp 1 tasks
03283         // before ordering
03284         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03285         // processors it communicates with
03286 #ifdef VERBOSE
03287         std::stringstream f1;
03288         f1 << "TLBack1_" << localRank << ".txt";
03289         TLBackToComp1.print_to_file( f1.str().c_str() );
03290 #endif
03291         sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
03292         TLBackToComp1.sort( 1, &sort_buffer );
03293         sort_buffer.reset();
03294 #ifdef VERBOSE
03295         TLBackToComp1.print_to_file( f1.str().c_str() );
03296 #endif
03297         // so we are now on pid1, we know now each marker were it has to go
03298         // add a new method to ParCommGraph, to set up the involved_IDs_map
03299         cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
03300     }
03301     if( *pid2 >= 0 )
03302     {
03303         // we are on original comp 1 tasks
03304         // before ordering
03305         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03306         // processors it communicates with
03307 #ifdef VERBOSE
03308         std::stringstream f2;
03309         f2 << "TLBack2_" << localRank << ".txt";
03310         TLBackToComp2.print_to_file( f2.str().c_str() );
03311 #endif
03312         sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
03313         TLBackToComp2.sort( 2, &sort_buffer );
03314         sort_buffer.reset();
03315 #ifdef VERBOSE
03316         TLBackToComp2.print_to_file( f2.str().c_str() );
03317 #endif
03318         cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
03319         //
03320     }
03321     return moab::MB_SUCCESS;
03322 }
03323 
03324 //#undef VERBOSE
03325 
03326 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
03327 {
03328     appData& data     = context.appDatas[*pid];
03329     ParallelComm* pco = context.pcomms[*pid];
03330     // collapse vertices and transform cells into triangles/quads /polys
03331     // tags we care about: area, frac, global id
03332     std::vector< Tag > tagsList;
03333     Tag tag;
03334     ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
03335     if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE;  // fatal error, abort
03336     tagsList.push_back( tag );
03337     rval = context.MBI->tag_get_handle( "area", tag );
03338     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
03339     rval = context.MBI->tag_get_handle( "frac", tag );
03340     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
03341     double tol = 1.0e-9;
03342     rval       = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
03343 
03344     // clean material sets of cells that were deleted
03345     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
03346                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
03347 
03348     if( !data.mat_sets.empty() )
03349     {
03350         EntityHandle matSet = data.mat_sets[0];
03351         Range elems;
03352         rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
03353         rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
03354         // put back only cells from data.file_set
03355         elems.clear();
03356         rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
03357         rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
03358     }
03359     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
03360 
03361     ParallelMergeMesh pmm( pco, tol );
03362     rval = pmm.merge( data.file_set,
03363                       /* do not do local merge*/ false,
03364                       /*  2d cells*/ 2 );MB_CHK_ERR( rval );
03365 
03366     // assign global ids only for vertices, cells have them fine
03367     rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
03368 
03369     // set the partition tag on the file set
03370     Tag part_tag;
03371     int dum_id = -1;
03372     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
03373                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
03374 
03375     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
03376     {
03377         std::cout << " can't get par part tag.\n";
03378         return moab::MB_FAILURE;
03379     }
03380 
03381     int rank = pco->rank();
03382     rval     = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
03383 
03384     return moab::MB_SUCCESS;
03385 }
03386 
03387 #ifdef MOAB_HAVE_TEMPESTREMAP
03388 
03389 // this call must be collective on the joint communicator
03390 //  intersection tasks on coupler will need to send to the components tasks the list of
03391 // id elements that are relevant: they intersected some of the target elements (which are not needed
03392 // here)
03393 //  in the intersection
03394 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
03395                              iMOAB_AppID pid_src,
03396                              iMOAB_AppID pid_migr,
03397                              iMOAB_AppID pid_intx,
03398                              int* src_id,
03399                              int* migr_id,
03400                              int* context_id )
03401 {
03402     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
03403     // exchange
03404     ErrorCode rval;
03405     std::vector< int > srcSenders;
03406     std::vector< int > receivers;
03407     ParCommGraph* sendGraph = NULL;
03408     int ierr;
03409     int default_context_id  = -1;
03410     bool is_fortran_context = false;
03411 
03412     // First, find the original graph between PE sets
03413     // And based on this one, we will build the newly modified one for coverage
03414     if( *pid_src >= 0 )
03415     {
03416         default_context_id = *migr_id;  // the other one
03417         assert( context.appDatas[*pid_src].global_id == *src_id );
03418         is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
03419         sendGraph          = context.appDatas[*pid_src].pgraph[default_context_id];  // maybe check if it does not exist
03420 
03421         // report the sender and receiver tasks in the joint comm
03422         srcSenders = sendGraph->senders();
03423         receivers  = sendGraph->receivers();
03424 #ifdef VERBOSE
03425         std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
03426 #endif
03427     }
03428 
03429     ParCommGraph* recvGraph = NULL;  // will be non null on receiver tasks (intx tasks)
03430     if( *pid_migr >= 0 )
03431     {
03432         is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
03433         // find the original one
03434         default_context_id = *src_id;
03435         assert( context.appDatas[*pid_migr].global_id == *migr_id );
03436         recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
03437         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
03438         srcSenders = recvGraph->senders();
03439         receivers  = recvGraph->receivers();
03440 #ifdef VERBOSE
03441         std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
03442 #endif
03443     }
03444 
03445     if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
03446 
03447     // loop over pid_intx elements, to see what original processors in joint comm have sent the
03448     // coverage mesh;
03449     // If we are on intx tasks, send coverage info towards original component tasks,
03450     // about needed cells
03451     TupleList TLcovIDs;
03452     TLcovIDs.initialize( 2, 0, 0, 0, 0 );  // to proc, GLOBAL ID; estimate about 100 IDs to be sent
03453     // will push_back a new tuple, if needed
03454     TLcovIDs.enableWriteAccess();
03455     // the crystal router will send ID cell to the original source, on the component task
03456     // if we are on intx tasks, loop over all intx elements and
03457 
03458     MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
03459     int currentRankInJointComm = -1;
03460     ierr                       = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
03461 
03462     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
03463     // need to send information towards component tasks
03464     if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
03465         receivers.end() )  // we are on receivers tasks, we can request intx info
03466     {
03467         // find the pcomm for the intx pid
03468         if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
03469 
03470         appData& dataIntx = context.appDatas[*pid_intx];
03471         Tag parentTag, orgSendProcTag;
03472 
03473         rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval );                        // global id of the blue, source element
03474         if( !parentTag ) return moab::MB_FAILURE;  // fatal error, abort
03475 
03476         rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
03477         if( !orgSendProcTag ) return moab::MB_FAILURE;  // fatal error, abort
03478 
03479         // find the file set, red parents for intx cells, and put them in tuples
03480         EntityHandle intxSet = dataIntx.file_set;
03481         Range cells;
03482         // get all entities from the set, and look at their RedParent
03483         rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
03484 
03485         std::map< int, std::set< int > > idsFromProcs;  // send that info back to enhance parCommGraph cache
03486         for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03487         {
03488             EntityHandle intx_cell = *it;
03489             int gidCell, origProc;  // look at receivers
03490 
03491             rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
03492             rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
03493             // we have augmented the overlap set with ghost cells ; in that case, the
03494             // orig_sending_processor is not set so it will be -1;
03495             if( origProc < 0 ) continue;
03496 
03497             std::set< int >& setInts = idsFromProcs[origProc];
03498             setInts.insert( gidCell );
03499         }
03500 // questionable ??
03501 #if 0
03502         // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
03503         // from coverage set
03504         if( cells.empty() )
03505         {
03506             // get coverage set
03507             assert( *pid_intx >= 0 );
03508             appData& dataIntx      = context.appDatas[*pid_intx];
03509             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03510 
03511             // get all cells from coverage set
03512             Tag gidTag;
03513             rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
03514             rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
03515             // look at their orig_sending_processor
03516             for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03517             {
03518                 EntityHandle covCell = *it;
03519                 int gidCell, origProc;  // look at o
03520 
03521                 rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
03522                 rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
03523                 // we have augmented the overlap set with ghost cells ; in that case, the
03524                 // orig_sending_processor is not set so it will be -1;
03525                 if( origProc < 0 )  // it cannot < 0, I think
03526                     continue;
03527                 std::set< int >& setInts = idsFromProcs[origProc];
03528                 setInts.insert( gidCell );
03529             }
03530         }
03531 #endif  // #if 0
03532 
03533 #ifdef VERBOSE
03534         std::ofstream dbfile;
03535         std::stringstream outf;
03536         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
03537         dbfile.open( outf.str().c_str() );
03538         dbfile << "Writing this to a file.\n";
03539 
03540         dbfile << " map size:" << idsFromProcs.size()
03541                << std::endl;  // on the receiver side, these show how much data to receive
03542         // from the sender (how many ids, and how much tag data later; we need to size up the
03543         // receiver buffer) arrange in tuples , use map iterators to send the ids
03544         for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
03545         {
03546             std::set< int >& setIds = mt->second;
03547             dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
03548             int counter = 0;
03549             for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
03550             {
03551                 int valueID = *st;
03552                 dbfile << " " << valueID;
03553                 counter++;
03554                 if( counter % 10 == 0 ) dbfile << "\n";
03555             }
03556             dbfile << "\n";
03557         }
03558         dbfile.close();
03559 #endif
03560         if( NULL != recvGraph )
03561         {
03562             ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph );  // just copy
03563             recvGraph1->set_context_id( *context_id );
03564             recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
03565             // this par comm graph will need to use the coverage set
03566             // so we are for sure on intx pes (the receiver is the coupler mesh)
03567             assert( *pid_intx >= 0 );
03568             appData& dataIntx      = context.appDatas[*pid_intx];
03569             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03570             recvGraph1->set_cover_set( cover_set );
03571             context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1;  // possible memory leak if context_id is same
03572         }
03573         // initial loop to see how much space we need for TLcovIDs
03574         size_t nbIds = 0;
03575         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
03576         {
03577             std::set< int >& idSet = mit->second;
03578             nbIds += idSet.size();
03579         }
03580 
03581         TLcovIDs.resize( nbIds );
03582 
03583         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
03584         {
03585             int procToSendTo       = mit->first;
03586             std::set< int >& idSet = mit->second;
03587             for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
03588             {
03589                 int n                     = TLcovIDs.get_n();
03590                 TLcovIDs.vi_wr[2 * n]     = procToSendTo;  // send to processor
03591                 TLcovIDs.vi_wr[2 * n + 1] = *sit;          // global id needs index in the local_verts range
03592                 TLcovIDs.inc_n();
03593             }
03594         }
03595     }
03596 
03597     ProcConfig pc( global );  // proc config does the crystal router
03598     pc.crystal_router()->gs_transfer( 1, TLcovIDs,
03599                                       0 );  // communication towards component tasks, with what ids are needed
03600     // for each task from receiver
03601 
03602     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
03603     if( NULL != sendGraph )
03604     {
03605         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
03606         // receiver task
03607         ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph );  // just copy
03608         sendGraph1->set_context_id( *context_id );
03609         context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
03610         rval                                           = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
03611     }
03612     return moab::MB_SUCCESS;  // success
03613 }
03614 
03615 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, int* verbose, const iMOAB_String prefix )
03616 {
03617 
03618     ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
03619     std::string prefix_str( prefix );
03620 
03621     if( NULL != cgraph )
03622         cgraph->dump_comm_information( prefix_str, *is_sender, *verbose );
03623     else
03624     {
03625         std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
03626                   << " context: " << *context_id << "\n";
03627     }
03628     return moab::MB_SUCCESS;
03629 }
03630 
03631 #endif  // #ifdef MOAB_HAVE_TEMPESTREMAP
03632 
03633 #endif  // #ifdef MOAB_HAVE_MPI
03634 
03635 #ifdef MOAB_HAVE_TEMPESTREMAP
03636 
03637 #ifdef MOAB_HAVE_NETCDF
03638 ErrCode iMOAB_LoadMappingWeightsFromFile(
03639     iMOAB_AppID pid_intersection,
03640     iMOAB_AppID pid_cpl,
03641     int* col_or_row,
03642     int* type,
03643     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03644     const iMOAB_String remap_weights_filename )
03645 {
03646     ErrorCode rval;
03647     bool row_based_partition = true;
03648     if( *col_or_row == 1 ) row_based_partition = false;  // do a column based partition;
03649 
03650     // get the local degrees of freedom, from the pid_cpl and type of mesh
03651 
03652     // Get the source and target data and pcomm objects
03653     appData& data_intx       = context.appDatas[*pid_intersection];
03654     TempestMapAppData& tdata = data_intx.tempestData;
03655 
03656     // Get the handle to the remapper object
03657     if( tdata.remapper == NULL )
03658     {
03659         // Now allocate and initialize the remapper object
03660 #ifdef MOAB_HAVE_MPI
03661         ParallelComm* pco = context.pcomms[*pid_intersection];
03662         tdata.remapper    = new moab::TempestRemapper( context.MBI, pco );
03663 #else
03664         tdata.remapper = new moab::TempestRemapper( context.MBI );
03665 #endif
03666         tdata.remapper->meshValidate     = true;
03667         tdata.remapper->constructEdgeMap = true;
03668         // Do not create new filesets; Use the sets from our respective applications
03669         tdata.remapper->initialize( false );
03670         tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03671     }
03672 
03673     // Setup loading of weights onto TempestOnlineMap
03674     // Set the context for the remapping weights computation
03675     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03676 
03677     // Now allocate and initialize the remapper object
03678     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03679     assert( weightMap != NULL );
03680 
03681     if( *pid_cpl >= 0 )  // it means we are looking for how to distribute the degrees of freedom, new map reader
03682     {
03683         appData& data1     = context.appDatas[*pid_cpl];
03684         EntityHandle fset1 = data1.file_set;  // this is source or target, depending on direction
03685 
03686         // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03687         Tag gdsTag;
03688 
03689         // find the values on first cell
03690         int lenTagType1 = 1;
03691         if( *type == 1 )
03692         {
03693             rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03694             rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03695         }
03696         Tag tagType2 = context.MBI->globalId_tag();
03697 
03698         std::vector< int > dofValues;
03699 
03700         // populate first tuple
03701         Range
03702             ents_of_interest;  // will be filled with entities on coupler, from which we will get the DOFs, based on type
03703         int ndofPerEl = 1;
03704 
03705         if( *type == 1 )
03706         {
03707             assert( gdsTag );
03708             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03709             dofValues.resize( ents_of_interest.size() * lenTagType1 );
03710             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
03711             ndofPerEl = lenTagType1;
03712         }
03713         else if( *type == 2 )
03714         {
03715             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03716             dofValues.resize( ents_of_interest.size() );
03717             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03718         }
03719         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03720         {
03721             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03722             dofValues.resize( ents_of_interest.size() );
03723             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03724         }
03725         else
03726         {
03727             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03728         }
03729         // pass ordered dofs, and unique
03730         std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
03731 
03732         std::sort( orderDofs.begin(), orderDofs.end() );
03733         orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() );  // remove duplicates
03734 
03735         rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
03736 
03737         // if we are on target mesh (row based partition)
03738         if( row_based_partition )
03739         {
03740             tdata.pid_dest = pid_cpl;
03741             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, ents_of_interest );
03742             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03743             weightMap->set_row_dc_dofs( dofValues );  // will set row_dtoc_dofmap
03744         }
03745         else
03746         {
03747             tdata.pid_src = pid_cpl;
03748             tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, ents_of_interest );
03749             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03750             weightMap->set_col_dc_dofs( dofValues );  // will set col_dtoc_dofmap
03751         }
03752     }
03753     else  // old reader, trivial distribution by row
03754     {
03755         std::vector< int > tmp_owned_ids;  // this will do a trivial row distribution
03756         rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
03757     }
03758 
03759     return moab::MB_SUCCESS;
03760 }
03761 
03762 ErrCode iMOAB_WriteMappingWeightsToFile(
03763     iMOAB_AppID pid_intersection,
03764     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03765     const iMOAB_String remap_weights_filename )
03766 {
03767     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03768     assert( remap_weights_filename && strlen( remap_weights_filename ) );
03769 
03770     ErrorCode rval;
03771 
03772     // Get the source and target data and pcomm objects
03773     appData& data_intx       = context.appDatas[*pid_intersection];
03774     TempestMapAppData& tdata = data_intx.tempestData;
03775 
03776     // Get the handle to the remapper object
03777     assert( tdata.remapper != NULL );
03778 
03779     // Now get online weights object and ensure it is valid
03780     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03781     assert( weightMap != NULL );
03782 
03783     std::string filename = std::string( remap_weights_filename );
03784 
03785     // Write the map file to disk in parallel using either HDF5 or SCRIP interface
03786     rval = weightMap->WriteParallelMap( filename );MB_CHK_ERR( rval );
03787 
03788     return moab::MB_SUCCESS;
03789 }
03790 
03791 #ifdef MOAB_HAVE_MPI
03792 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
03793                               iMOAB_AppID pid2,
03794                               iMOAB_AppID pid3,
03795                               MPI_Comm* jointcomm,
03796                               MPI_Group* groupA,
03797                               MPI_Group* groupB,
03798                               int* type,
03799                               int* comp1,
03800                               int* comp2,
03801                               int* direction )
03802 {
03803     assert( jointcomm );
03804     assert( groupA );
03805     assert( groupB );
03806     bool is_fortran = false;
03807     if( *pid1 >= 0 ) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
03808     if( *pid2 >= 0 ) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
03809     if( *pid3 >= 0 ) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
03810 
03811     MPI_Comm joint_communicator =
03812         ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
03813     MPI_Group group_first  = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
03814     MPI_Group group_second = ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
03815 
03816     ErrorCode rval = MB_SUCCESS;
03817     int localRank = 0, numProcs = 1;
03818 
03819     // Get the local rank and number of processes in the joint communicator
03820     MPI_Comm_rank( joint_communicator, &localRank );
03821     MPI_Comm_size( joint_communicator, &numProcs );
03822 
03823     // Next instantiate the par comm graph
03824     // here we need to look at direction
03825     // this direction is good for atm source -> ocn target coupler example
03826     ParCommGraph* cgraph = NULL;
03827     if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
03828 
03829     ParCommGraph* cgraph_rev = NULL;
03830     if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
03831 
03832     // we should search if we have another pcomm with the same comp ids in the list already
03833     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
03834     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
03835     if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
03836 
03837     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
03838     // model on the joint_communicator
03839     TupleList TLcomp1;
03840     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03841     TupleList TLcomp2;
03842     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03843 
03844     // will push_back a new tuple, if needed
03845     TLcomp1.enableWriteAccess();
03846 
03847     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03848     Tag gdsTag;
03849 
03850     // find the values on first cell
03851     int lenTagType1 = 1;
03852     if( *type == 1 )
03853     {
03854         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03855         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03856     }
03857     Tag tagType2 = context.MBI->globalId_tag();
03858 
03859     std::vector< int > valuesComp1;
03860 
03861     // populate first tuple
03862     Range ents_of_interest;  // will be filled with entities on pid1, that need to be distributed,
03863                              // rearranged in split_ranges map
03864     if( *pid1 >= 0 )
03865     {
03866         appData& data1     = context.appDatas[*pid1];
03867         EntityHandle fset1 = data1.file_set;
03868 
03869         if( *type == 1 )
03870         {
03871             assert( gdsTag );
03872             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03873             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
03874             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
03875         }
03876         else if( *type == 2 )
03877         {
03878             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03879             valuesComp1.resize( ents_of_interest.size() );
03880             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03881         }
03882         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03883         {
03884             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03885             valuesComp1.resize( ents_of_interest.size() );
03886             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03887         }
03888         else
03889         {
03890             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03891         }
03892         // now fill the tuple list with info and markers
03893         // because we will send only the ids, order and compress the list
03894         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
03895         TLcomp1.resize( uniq.size() );
03896         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03897         {
03898             // to proc, marker, element local index, index in el
03899             int marker               = *sit;
03900             int to_proc              = marker % numProcs;
03901             int n                    = TLcomp1.get_n();
03902             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
03903             TLcomp1.vi_wr[2 * n + 1] = marker;
03904             TLcomp1.inc_n();
03905         }
03906     }
03907 
03908     ProcConfig pc( joint_communicator );  // proc config does the crystal router
03909     pc.crystal_router()->gs_transfer( 1, TLcomp1,
03910                                       0 );  // communication towards joint tasks, with markers
03911     // sort by value (key 1)
03912 #ifdef VERBOSE
03913     std::stringstream ff1;
03914     ff1 << "TLcomp1_" << localRank << ".txt";
03915     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03916 #endif
03917     moab::TupleList::buffer sort_buffer;
03918     sort_buffer.buffer_init( TLcomp1.get_n() );
03919     TLcomp1.sort( 1, &sort_buffer );
03920     sort_buffer.reset();
03921 #ifdef VERBOSE
03922     // after sorting
03923     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03924 #endif
03925     // do the same, for the other component, number2, with type2
03926     // start copy
03927     TLcomp2.enableWriteAccess();
03928 
03929     moab::TempestOnlineMap* weightMap = NULL;  // declare it outside, but it will make sense only for *pid >= 0
03930     // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
03931     // populate second tuple with ids  from read map: we need row_gdofmap and col_gdofmap
03932     std::vector< int > valuesComp2;
03933     if( *pid2 >= 0 )  // we are now on coupler, map side
03934     {
03935         appData& data2           = context.appDatas[*pid2];
03936         TempestMapAppData& tdata = data2.tempestData;
03937         // should be only one map, read from file
03938         assert( tdata.weightMaps.size() == 1 );
03939         // maybe we need to check it is the map we expect
03940         weightMap = tdata.weightMaps.begin()->second;
03941         // std::vector<int> ids_of_interest;
03942         // do a deep copy of the ids of interest: row ids or col ids, target or source direction
03943         if( *direction == 1 )
03944         {
03945             // we are interested in col ids, source
03946             // new method from moab::TempestOnlineMap
03947             rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
03948         }
03949         else if( *direction == 2 )
03950         {
03951             // we are interested in row ids, for target ids
03952             rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
03953         }
03954         //
03955 
03956         // now fill the tuple list with info and markers
03957         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
03958         TLcomp2.resize( uniq.size() );
03959         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03960         {
03961             // to proc, marker, element local index, index in el
03962             int marker               = *sit;
03963             int to_proc              = marker % numProcs;
03964             int n                    = TLcomp2.get_n();
03965             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03966             TLcomp2.vi_wr[2 * n + 1] = marker;
03967             TLcomp2.inc_n();
03968         }
03969     }
03970     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03971                                       0 );  // communication towards joint tasks, with markers
03972     // sort by value (key 1)
03973     // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
03974 #ifdef VERBOSE
03975     std::stringstream ff2;
03976     ff2 << "TLcomp2_" << localRank << ".txt";
03977     TLcomp2.print_to_file( ff2.str().c_str() );
03978 #endif
03979     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03980     TLcomp2.sort( 1, &sort_buffer );
03981     sort_buffer.reset();
03982     // end copy
03983 #ifdef VERBOSE
03984     TLcomp2.print_to_file( ff2.str().c_str() );
03985 #endif
03986     // need to send back the info, from the rendezvous point, for each of the values
03987     /* so go over each value, on local process in joint communicator,
03988 
03989     now have to send back the info needed for communication;
03990      loop in in sync over both TLComp1 and TLComp2, in local process;
03991       So, build new tuple lists, to send synchronous communication
03992       populate them at the same time, based on marker, that is indexed
03993     */
03994 
03995     TupleList TLBackToComp1;
03996     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03997     TLBackToComp1.enableWriteAccess();
03998 
03999     TupleList TLBackToComp2;
04000     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
04001     TLBackToComp2.enableWriteAccess();
04002 
04003     int n1 = TLcomp1.get_n();
04004     int n2 = TLcomp2.get_n();
04005 
04006     int indexInTLComp1 = 0;
04007     int indexInTLComp2 = 0;  // advance both, according to the marker
04008     if( n1 > 0 && n2 > 0 )
04009     {
04010 
04011         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
04012         {
04013             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
04014             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
04015             if( currentValue1 < currentValue2 )
04016             {
04017                 // we have a big problem; basically, we are saying that
04018                 // dof currentValue is on one model and not on the other
04019                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
04020                 indexInTLComp1++;
04021                 continue;
04022             }
04023             if( currentValue1 > currentValue2 )
04024             {
04025                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
04026                 indexInTLComp2++;
04027                 continue;
04028             }
04029             int size1 = 1;
04030             int size2 = 1;
04031             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
04032                 size1++;
04033             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
04034                 size2++;
04035             // must be found in both lists, find the start and end indices
04036             for( int i1 = 0; i1 < size1; i1++ )
04037             {
04038                 for( int i2 = 0; i2 < size2; i2++ )
04039                 {
04040                     // send the info back to components
04041                     int n = TLBackToComp1.get_n();
04042                     TLBackToComp1.reserve();
04043                     TLBackToComp1.vi_wr[3 * n] =
04044                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
04045                                                                      // came from, info from comp2
04046                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
04047                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
04048                     n                              = TLBackToComp2.get_n();
04049                     TLBackToComp2.reserve();
04050                     TLBackToComp2.vi_wr[3 * n] =
04051                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
04052                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
04053                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
04054                     // what if there are repeated markers in TLcomp2? increase just index2
04055                 }
04056             }
04057             indexInTLComp1 += size1;
04058             indexInTLComp2 += size2;
04059         }
04060     }
04061     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
04062     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
04063 
04064     TupleList TLv;  // vertices
04065     TupleList TLc;  // cells if needed (not type 2)
04066 
04067     if( *pid1 >= 0 )
04068     {
04069         // we are on original comp 1 tasks
04070         // before ordering
04071         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
04072         // processors it communicates with
04073 #ifdef VERBOSE
04074         std::stringstream f1;
04075         f1 << "TLBack1_" << localRank << ".txt";
04076         TLBackToComp1.print_to_file( f1.str().c_str() );
04077 #endif
04078         // so we are now on pid1, we know now each marker were it has to go
04079         // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
04080         rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
04081         // we can just send vertices and elements, with crystal routers;
04082         // on the receiving end, make sure they are not duplicated, by looking at the global id
04083         // if *type is 1, also send global_dofs tag in the element tuple
04084         rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
04085     }
04086     else
04087     {
04088         TLv.initialize( 2, 0, 0, 3, 0 );  // no vertices here, for sure
04089         TLv.enableWriteAccess();          // to be able to receive stuff, even if nothing is here yet, on this task
04090         if( *type != 2 )                  // for point cloud, we do not need to initialize TLc (for cells)
04091         {
04092             // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
04093             int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 +
04094                              10;  // 10 is the max number of vertices in cell; kind of arbitrary
04095             TLc.initialize( size_tuple, 0, 0, 0, 0 );
04096             TLc.enableWriteAccess();
04097         }
04098     }
04099     pc.crystal_router()->gs_transfer( 1, TLv, 0 );  // communication towards coupler tasks, with mesh vertices
04100     if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );  // those are cells
04101 
04102     if( *pid3 >= 0 )  // will receive the mesh, on coupler pes!
04103     {
04104         appData& data3     = context.appDatas[*pid3];
04105         EntityHandle fset3 = data3.file_set;
04106         Range primary_ents3;                 // vertices for type 2, cells of dim 2 for type 1 or 3
04107         std::vector< int > values_entities;  // will be the size of primary_ents3 * lenTagType1
04108         rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
04109                                                   values_entities );MB_CHK_ERR( rval );
04110         iMOAB_UpdateMeshInfo( pid3 );
04111         int ndofPerEl = 1;
04112         if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
04113         // because we are on the coupler, we know that the read map pid2 exists
04114         assert( *pid2 >= 0 );
04115         appData& dataIntx        = context.appDatas[*pid2];
04116         TempestMapAppData& tdata = dataIntx.tempestData;
04117 
04118         // if we are on source coverage, direction 1, we can set covering mesh, covering cells
04119         if( 1 == *direction )
04120         {
04121             tdata.pid_src = pid3;
04122             tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, primary_ents3 );
04123             weightMap->SetSourceNDofsPerElement( ndofPerEl );
04124             weightMap->set_col_dc_dofs( values_entities );  // will set col_dtoc_dofmap
04125         }
04126         // if we are on target, we can set the target cells
04127         else
04128         {
04129             tdata.pid_dest = pid3;
04130             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, primary_ents3 );
04131             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
04132             weightMap->set_row_dc_dofs( values_entities );  // will set row_dtoc_dofmap
04133         }
04134     }
04135 
04136     return moab::MB_SUCCESS;
04137 }
04138 
04139 #endif  // #ifdef MOAB_HAVE_MPI
04140 
04141 #endif  // #ifdef MOAB_HAVE_NETCDF
04142 
04143 #define USE_API
04144 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
04145 {
04146     ErrorCode rval;
04147     moab::CartVect pos;
04148 
04149     Range& verts                   = context.appDatas[*pid].all_verts;
04150     moab::EntityHandle firstVertex = ( verts[0] );
04151 
04152     // coordinate data
04153     rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
04154 
04155     // compute the distance from origin
04156     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
04157     *radius = pos.length();
04158     return moab::MB_SUCCESS;
04159 }
04160 
04161 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
04162 {
04163     ErrorCode rval;
04164     ErrCode ierr;
04165     bool validate = true;
04166 
04167     double radius_source = 1.0;
04168     double radius_target = 1.0;
04169 
04170     const double epsrel = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
04171     const double boxeps = 1.e-6;
04172 
04173     // Get the source and target data and pcomm objects
04174     appData& data_src  = context.appDatas[*pid_src];
04175     appData& data_tgt  = context.appDatas[*pid_tgt];
04176     appData& data_intx = context.appDatas[*pid_intx];
04177 #ifdef MOAB_HAVE_MPI
04178     ParallelComm* pco_intx = context.pcomms[*pid_intx];
04179 #endif
04180 
04181     // Mesh intersection has already been computed; Return early.
04182     TempestMapAppData& tdata = data_intx.tempestData;
04183     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
04184 
04185     bool is_parallel = false, is_root = true;
04186     int rank = 0;
04187 #ifdef MOAB_HAVE_MPI
04188     if( pco_intx )
04189     {
04190         rank        = pco_intx->rank();
04191         is_parallel = ( pco_intx->size() > 1 );
04192         is_root     = ( rank == 0 );
04193         rval        = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
04194     }
04195 #endif
04196 
04197     moab::DebugOutput outputFormatter( std::cout, rank, 0 );
04198     outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
04199 
04200     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
04201     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
04202 
04203     // Rescale the radius of both to compute the intersection
04204     ComputeSphereRadius( pid_src, &radius_source );
04205     ComputeSphereRadius( pid_tgt, &radius_target );
04206 #ifdef VERBOSE
04207     if( is_root )
04208         outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
04209                                 radius_target );
04210 #endif
04211 
04212     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
04213      * unscale later. */
04214     double defaultradius = 1.0;
04215     if( fabs( radius_source - radius_target ) > 1e-10 )
04216     { /* the radii are different */
04217         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
04218         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
04219     }
04220 
04221     // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
04222 #ifdef MOAB_HAVE_TEMPESTREMAP
04223     IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
04224 #else
04225     IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
04226 #endif
04227 
04228     // print verbosely about the problem setting
04229     bool use_kdtree_search = false;
04230     double srctgt_areas[2], srctgt_areas_glb[2];
04231     {
04232         moab::Range rintxverts, rintxelems;
04233         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
04234         rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
04235         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
04236         rval =
04237             areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/, rank );MB_CHK_ERR( rval );
04238         srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
04239 #ifdef VERBOSE
04240         if( is_root )
04241             outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
04242                                     rintxelems.size() );
04243 #endif
04244 
04245         moab::Range bintxverts, bintxelems;
04246         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
04247         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
04248         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
04249         rval =
04250             areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/, rank );MB_CHK_ERR( rval );
04251         srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
04252 #ifdef VERBOSE
04253         if( is_root )
04254             outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
04255                                     bintxelems.size() );
04256 #endif
04257 #ifdef MOAB_HAVE_MPI
04258         MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
04259 #else
04260         srctgt_areas_glb[0] = srctgt_areas[0];
04261         srctgt_areas_glb[1] = srctgt_areas[1];
04262 #endif
04263         use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
04264     }
04265 
04266     data_intx.dimension = data_tgt.dimension;
04267     // set the context for the source and destination applications
04268     tdata.pid_src  = pid_src;
04269     tdata.pid_dest = pid_tgt;
04270 
04271     // Now allocate and initialize the remapper object
04272 #ifdef MOAB_HAVE_MPI
04273     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
04274 #else
04275     tdata.remapper = new moab::TempestRemapper( context.MBI );
04276 #endif
04277     tdata.remapper->meshValidate     = true;
04278     tdata.remapper->constructEdgeMap = true;
04279 
04280     tdata.remapper->set_intx_name( data_intx.name );
04281     // Do not create new filesets; Use the sets from our respective applications
04282     tdata.remapper->initialize( false );
04283     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
04284     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
04285     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
04286 
04287     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
04288     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
04289 
04290 #ifdef VERBOSE_ZOLTAN_TIMINGS
04291 #ifdef MOAB_HAVE_MPI
04292     double t1;
04293     if( is_root ) t1 = MPI_Wtime();
04294 #endif
04295 #endif
04296     // First, compute the covering source set.
04297     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
04298 #ifdef VERBOSE_ZOLTAN_TIMINGS
04299 #ifdef MOAB_HAVE_MPI
04300     double t2;
04301     if( is_root )
04302     {
04303         t2 = MPI_Wtime();
04304         std::cout << "[LOG] Time: coverage mesh:" << t2 - t1 << "\n";
04305     }
04306 #endif
04307 #endif
04308     // Next, compute intersections with MOAB.
04309     rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
04310 
04311     // Mapping computation done
04312     if( validate )
04313     {
04314         double local_area,
04315             global_areas[3];  // Array for Initial area, and through Method 1 and Method 2
04316         local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source, rank );
04317 
04318         global_areas[0] = srctgt_areas_glb[0];
04319         global_areas[1] = srctgt_areas_glb[1];
04320 
04321 #ifdef MOAB_HAVE_MPI
04322         if( is_parallel )
04323         {
04324             MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
04325         }
04326         else
04327         {
04328             global_areas[2] = local_area;
04329         }
04330 #else
04331         global_areas[2] = local_area;
04332 #endif
04333         if( is_root )
04334         {
04335             outputFormatter.printf( 0,
04336                                     "initial area: source mesh = %12.14f, target mesh = %12.14f, "
04337                                     "overlap mesh = %12.14f\n",
04338                                     global_areas[0], global_areas[1], global_areas[2] );
04339             outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
04340                                     fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
04341                                     fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
04342         }
04343     }
04344 
04345     return moab::MB_SUCCESS;
04346 }
04347 
04348 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
04349 {
04350     ErrorCode rval;
04351     ErrCode ierr;
04352 
04353     double radius_source = 1.0;
04354     double radius_target = 1.0;
04355 
04356     const double epsrel = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
04357     const double boxeps = 1.e-8;
04358 
04359     // Get the source and target data and pcomm objects
04360     appData& data_src  = context.appDatas[*pid_src];
04361     appData& data_tgt  = context.appDatas[*pid_tgt];
04362     appData& data_intx = context.appDatas[*pid_intx];
04363     int rank           = 0;
04364 #ifdef MOAB_HAVE_MPI
04365     ParallelComm* pco_intx = context.pcomms[*pid_intx];
04366     rank                   = pco_intx->rank();
04367 #endif
04368 
04369     // Mesh intersection has already been computed; Return early.
04370     TempestMapAppData& tdata = data_intx.tempestData;
04371     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
04372 
04373 #ifdef MOAB_HAVE_MPI
04374     if( pco_intx )
04375     {
04376         rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
04377     }
04378 #endif
04379 
04380     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
04381     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
04382 
04383     // Rescale the radius of both to compute the intersection
04384     ComputeSphereRadius( pid_src, &radius_source );
04385     ComputeSphereRadius( pid_tgt, &radius_target );
04386 
04387     IntxAreaUtils areaAdaptor;
04388     // print verbosely about the problem setting
04389     {
04390         moab::Range rintxverts, rintxelems;
04391         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
04392         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
04393         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
04394         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source, rank );MB_CHK_ERR( rval );
04395 #ifdef VERBOSE
04396         std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
04397                   << " elements \n";
04398 #endif
04399 
04400         moab::Range bintxverts, bintxelems;
04401         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
04402         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
04403         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
04404         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target, rank );MB_CHK_ERR( rval );
04405 #ifdef VERBOSE
04406         std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
04407                   << " elements \n";
04408 #endif
04409     }
04410 
04411     data_intx.dimension = data_tgt.dimension;
04412     // set the context for the source and destination applications
04413     // set the context for the source and destination applications
04414     tdata.pid_src         = pid_src;
04415     tdata.pid_dest        = pid_tgt;
04416     data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
04417     assert( data_intx.point_cloud == true );
04418 
04419     // Now allocate and initialize the remapper object
04420 #ifdef MOAB_HAVE_MPI
04421     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
04422 #else
04423     tdata.remapper = new moab::TempestRemapper( context.MBI );
04424 #endif
04425     tdata.remapper->meshValidate     = true;
04426     tdata.remapper->constructEdgeMap = true;
04427 
04428     // Do not create new filesets; Use the sets from our respective applications
04429     tdata.remapper->initialize( false );
04430     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
04431     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
04432     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
04433 
04434     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
04435      * unscale later. */
04436     if( fabs( radius_source - radius_target ) > 1e-10 )
04437     { /* the radii are different */
04438         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
04439         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
04440     }
04441 
04442     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
04443     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
04444 
04445     // First, compute the covering source set.
04446     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
04447 
04448 #ifdef MOAB_HAVE_MPI
04449     /* VSM: This context should be set on the data_src but it would overwrite the source
04450        covering set context in case it is coupled to another APP as well.
04451        This needs a redesign. */
04452     // data_intx.covering_set = tdata.remapper->GetCoveringSet();
04453     // data_src.covering_set = tdata.remapper->GetCoveringSet();
04454 #endif
04455 
04456     // Now let us re-convert the MOAB mesh back to Tempest representation
04457     rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
04458 
04459     return moab::MB_SUCCESS;
04460 }
04461 
04462 ErrCode iMOAB_ComputeScalarProjectionWeights(
04463     iMOAB_AppID pid_intx,
04464     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
04465     const iMOAB_String disc_method_source,
04466     int* disc_order_source,
04467     const iMOAB_String disc_method_target,
04468     int* disc_order_target,
04469     int* fNoBubble,
04470     int* fMonotoneTypeID,
04471     int* fVolumetric,
04472     int* fInverseDistanceMap,
04473     int* fNoConservation,
04474     int* fValidate,
04475     const iMOAB_String source_solution_tag_dof_name,
04476     const iMOAB_String target_solution_tag_dof_name )
04477 {
04478     moab::ErrorCode rval;
04479 
04480     assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
04481     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
04482     assert( disc_method_source && strlen( disc_method_source ) );
04483     assert( disc_method_target && strlen( disc_method_target ) );
04484     assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
04485     assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
04486 
04487     // Get the source and target data and pcomm objects
04488     appData& data_intx       = context.appDatas[*pid_intx];
04489     TempestMapAppData& tdata = data_intx.tempestData;
04490 
04491     // Setup computation of weights
04492     // Set the context for the remapping weights computation
04493     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
04494 
04495     // Call to generate the remap weights with the tempest meshes
04496     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
04497     assert( weightMap != NULL );
04498 
04499     GenerateOfflineMapAlgorithmOptions mapOptions;
04500     mapOptions.nPin           = *disc_order_source;
04501     mapOptions.nPout          = *disc_order_target;
04502     mapOptions.fSourceConcave = false;
04503     mapOptions.fTargetConcave = false;
04504 
04505     mapOptions.strMethod = "";
04506     if( fMonotoneTypeID )
04507     {
04508         switch( *fMonotoneTypeID )
04509         {
04510             case 0:
04511                 mapOptions.fMonotone = false;
04512                 break;
04513             case 3:
04514                 mapOptions.strMethod += "mono3;";
04515                 break;
04516             case 2:
04517                 mapOptions.strMethod += "mono2;";
04518                 break;
04519             default:
04520                 mapOptions.fMonotone = true;
04521         }
04522     }
04523     else
04524         mapOptions.fMonotone = false;
04525 
04526     mapOptions.fNoBubble       = ( fNoBubble ? *fNoBubble : false );
04527     mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
04528     mapOptions.fNoCorrectAreas = false;
04529     mapOptions.fNoCheck        = !( fValidate ? *fValidate : true );
04530     if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
04531     if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
04532 
04533     // Now let us compute the local-global mapping and store it in the context
04534     // We need this mapping when computing matvec products and to do reductions in parallel
04535     // Additionally, the call below will also compute weights with TempestRemap
04536     rval = weightMap->GenerateRemappingWeights(
04537         std::string( disc_method_source ),            // const std::string& strInputType
04538         std::string( disc_method_target ),            // const std::string& strOutputType
04539         mapOptions,                                   // GenerateOfflineMapAlgorithmOptions& mapOptions
04540         std::string( source_solution_tag_dof_name ),  // const std::string& srcDofTagName = "GLOBAL_ID"
04541         std::string( target_solution_tag_dof_name )   // const std::string& tgtDofTagName = "GLOBAL_ID"
04542     );MB_CHK_ERR( rval );
04543 
04544     return moab::MB_SUCCESS;
04545 }
04546 
04547 ErrCode iMOAB_ApplyScalarProjectionWeights(
04548     iMOAB_AppID pid_intersection,
04549     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
04550     const iMOAB_String source_solution_tag_name,
04551     const iMOAB_String target_solution_tag_name )
04552 {
04553     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
04554     assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
04555     assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
04556 
04557     moab::ErrorCode rval;
04558 
04559     // Get the source and target data and pcomm objects
04560     appData& data_intx       = context.appDatas[*pid_intersection];
04561     TempestMapAppData& tdata = data_intx.tempestData;
04562 
04563     // Now allocate and initialize the remapper object
04564     moab::TempestRemapper* remapper   = tdata.remapper;
04565     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
04566 
04567     // we assume that there are separators ";" between the tag names
04568     std::vector< std::string > srcNames;
04569     std::vector< std::string > tgtNames;
04570     std::vector< Tag > srcTagHandles;
04571     std::vector< Tag > tgtTagHandles;
04572     std::string separator( ":" );
04573     std::string src_name( source_solution_tag_name );
04574     std::string tgt_name( target_solution_tag_name );
04575     split_tag_names( src_name, separator, srcNames );
04576     split_tag_names( tgt_name, separator, tgtNames );
04577     if( srcNames.size() != tgtNames.size() )
04578     {
04579         std::cout << " error in parsing source and target tag names. \n";
04580         return moab::MB_FAILURE;
04581     }
04582 
04583     for( size_t i = 0; i < srcNames.size(); i++ )
04584     {
04585         Tag tagHandle;
04586         rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
04587         if( MB_SUCCESS != rval || NULL == tagHandle )
04588         {
04589             return moab::MB_FAILURE;
04590         }
04591         srcTagHandles.push_back( tagHandle );
04592         rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
04593         if( MB_SUCCESS != rval || NULL == tagHandle )
04594         {
04595             return moab::MB_FAILURE;
04596         }
04597         tgtTagHandles.push_back( tagHandle );
04598     }
04599 
04600     std::vector< double > solSTagVals;
04601     std::vector< double > solTTagVals;
04602 
04603     moab::Range sents, tents;
04604     if( data_intx.point_cloud )
04605     {
04606         appData& data_src = context.appDatas[*tdata.pid_src];
04607         appData& data_tgt = context.appDatas[*tdata.pid_dest];
04608         if( data_src.point_cloud )
04609         {
04610             moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
04611             solSTagVals.resize( covSrcEnts.size(), -1.0 );
04612             sents = covSrcEnts;
04613         }
04614         else
04615         {
04616             moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04617             solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
04618                                     weightMap->GetSourceNDofsPerElement(),
04619                                 -1.0 );
04620             sents = covSrcEnts;
04621         }
04622         if( data_tgt.point_cloud )
04623         {
04624             moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
04625             solTTagVals.resize( tgtEnts.size(), -1.0 );
04626             tents = tgtEnts;
04627         }
04628         else
04629         {
04630             moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04631             solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04632                                     weightMap->GetDestinationNDofsPerElement(),
04633                                 -1.0 );
04634             tents = tgtEnts;
04635         }
04636     }
04637     else
04638     {
04639         moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04640         moab::Range& tgtEnts    = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04641         solSTagVals.resize(
04642             covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
04643         solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04644                                 weightMap->GetDestinationNDofsPerElement(),
04645                             -1.0 );
04646 
04647         sents = covSrcEnts;
04648         tents = tgtEnts;
04649     }
04650 
04651     for( size_t i = 0; i < srcTagHandles.size(); i++ )
04652     {
04653         // The tag data is np*np*n_el_src
04654         Tag ssolnTag = srcTagHandles[i];
04655         Tag tsolnTag = tgtTagHandles[i];
04656         rval         = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );MB_CHK_ERR( rval );
04657 
04658         // Compute the application of weights on the suorce solution data and store it in the
04659         // destination solution vector data Optionally, can also perform the transpose application
04660         // of the weight matrix. Set the 3rd argument to true if this is needed
04661         rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );MB_CHK_ERR( rval );
04662 
04663         // The tag data is np*np*n_el_dest
04664         rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );MB_CHK_ERR( rval );
04665     }
04666 
04667 // #define VERBOSE
04668 #ifdef VERBOSE
04669     ParallelComm* pco_intx = context.pcomms[*pid_intersection];
04670 
04671     int ivar = 0;
04672     {
04673         Tag ssolnTag = srcTagHandles[ivar];
04674         std::stringstream sstr;
04675         sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04676         std::ofstream output_file( sstr.str().c_str() );
04677         for( unsigned i = 0; i < sents.size(); ++i )
04678         {
04679             EntityHandle elem = sents[i];
04680             std::vector< double > locsolSTagVals( 16 );
04681             rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
04682             output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
04683             for( unsigned j = 0; j < 16; ++j )
04684                 output_file << locsolSTagVals[j] << " ";
04685         }
04686         output_file.flush();  // required here
04687         output_file.close();
04688     }
04689     {
04690         std::stringstream sstr;
04691         sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04692         EntityHandle sets[2] = {context.appDatas[*tdata.pid_src].file_set, context.appDatas[*tdata.pid_dest].file_set};
04693         rval                 = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04694     }
04695     {
04696         std::stringstream sstr;
04697         sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04698         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04699         EntityHandle covering_set = remapper->GetCoveringSet();
04700         EntityHandle sets[2]      = {covering_set, context.appDatas[*tdata.pid_dest].file_set};
04701         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04702     }
04703     {
04704         std::stringstream sstr;
04705         sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04706         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04707         EntityHandle covering_set = remapper->GetCoveringSet();
04708         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
04709     }
04710     {
04711         std::stringstream sstr;
04712         sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04713         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04714         EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
04715         rval                    = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
04716     }
04717     {
04718         std::stringstream sstr;
04719         sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04720         std::ofstream output_file( sstr.str().c_str() );
04721         for( unsigned i = 0; i < solSTagVals.size(); ++i )
04722             output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
04723                         << solSTagVals[i] << "\n";
04724         output_file.flush();  // required here
04725         output_file.close();
04726     }
04727 #endif
04728     // #undef VERBOSE
04729 
04730     return moab::MB_SUCCESS;
04731 }
04732 
04733 #endif  // MOAB_HAVE_TEMPESTREMAP
04734 
04735 #ifdef __cplusplus
04736 }
04737 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines