MOAB: Mesh Oriented datABase  (version 5.4.1)
iMOAB.cpp
Go to the documentation of this file.
00001 /** \file iMOAB.cpp
00002  */
00003 
00004 #include "moab/MOABConfig.h"
00005 #include "moab/Core.hpp"
00006 
00007 #ifdef MOAB_HAVE_MPI
00008 #include "moab_mpi.h"
00009 #include "moab/ParallelComm.hpp"
00010 #include "moab/ParCommGraph.hpp"
00011 #include "moab/ParallelMergeMesh.hpp"
00012 #include "moab/IntxMesh/IntxUtils.hpp"
00013 #endif
00014 #include "DebugOutput.hpp"
00015 #include "moab/iMOAB.h"
00016 
00017 /* this is needed because of direct access to hdf5/mhdf */
00018 #ifdef MOAB_HAVE_HDF5
00019 #include "mhdf.h"
00020 #include <H5Tpublic.h>
00021 #endif
00022 
00023 #include "moab/CartVect.hpp"
00024 #include "MBTagConventions.hpp"
00025 #include "moab/MeshTopoUtil.hpp"
00026 #include "moab/ReadUtilIface.hpp"
00027 #include "moab/MergeMesh.hpp"
00028 
00029 #ifdef MOAB_HAVE_TEMPESTREMAP
00030 #include "STLStringHelper.h"
00031 #include "moab/IntxMesh/IntxUtils.hpp"
00032 
00033 #include "moab/Remapping/TempestRemapper.hpp"
00034 #include "moab/Remapping/TempestOnlineMap.hpp"
00035 #endif
00036 
00037 // C++ includes
00038 #include <cassert>
00039 #include <sstream>
00040 #include <iostream>
00041 
00042 using namespace moab;
00043 
00044 // #define VERBOSE
00045 
00046 // global variables ; should they be organized in a structure, for easier references?
00047 // or how do we keep them global?
00048 
00049 #ifdef __cplusplus
00050 extern "C" {
00051 #endif
00052 
00053 #ifdef MOAB_HAVE_TEMPESTREMAP
00054 struct TempestMapAppData
00055 {
00056     moab::TempestRemapper* remapper;
00057     std::map< std::string, moab::TempestOnlineMap* > weightMaps;
00058     iMOAB_AppID pid_src;
00059     iMOAB_AppID pid_dest;
00060 };
00061 #endif
00062 
00063 struct appData
00064 {
00065     EntityHandle file_set;
00066     int global_id;  // external component id, unique for application
00067     std::string name;
00068     Range all_verts;
00069     Range local_verts;  // it could include shared, but not owned at the interface
00070     // these vertices would be all_verts if no ghosting was required
00071     Range ghost_vertices;  // locally ghosted from other processors
00072     Range primary_elems;
00073     Range owned_elems;
00074     Range ghost_elems;
00075     int dimension;             // 2 or 3, dimension of primary elements (redundant?)
00076     long num_global_elements;  // reunion of all elements in primary_elements; either from hdf5
00077                                // reading or from reduce
00078     long num_global_vertices;  // reunion of all nodes, after sharing is resolved; it could be
00079                                // determined from hdf5 reading
00080     Range mat_sets;
00081     std::map< int, int > matIndex;  // map from global block id to index in mat_sets
00082     Range neu_sets;
00083     Range diri_sets;
00084     std::map< std::string, Tag > tagMap;
00085     std::vector< Tag > tagList;
00086     bool point_cloud;
00087     bool is_fortran;
00088 
00089 #ifdef MOAB_HAVE_MPI
00090     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
00091     // application
00092     std::map< int, ParCommGraph* > pgraph;  // map from context () to the parcommgraph*
00093 #endif
00094 
00095 #ifdef MOAB_HAVE_TEMPESTREMAP
00096     TempestMapAppData tempestData;
00097 #endif
00098 };
00099 
00100 struct GlobalContext
00101 {
00102     // are there reasons to have multiple moab inits? Is ref count needed?
00103     Interface* MBI;
00104     // we should also have the default tags stored, initialized
00105     Tag material_tag, neumann_tag, dirichlet_tag,
00106         globalID_tag;  // material, neumann, dirichlet,  globalID
00107     int refCountMB;
00108     int iArgc;
00109     iMOAB_String* iArgv;
00110     int unused_pid;
00111 
00112     std::map< std::string, int > appIdMap;  // from app string (uppercase) to app id
00113     std::map< int, int > appIdCompMap;      // from component id to app id
00114 
00115 #ifdef MOAB_HAVE_MPI
00116     std::vector< ParallelComm* > pcomms;  // created in order of applications, one moab::ParallelComm for each
00117 #endif
00118 
00119     std::vector< appData > appDatas;  // the same order as pcomms
00120     int globalrank, worldprocs;
00121     bool MPI_initialized;
00122 
00123     GlobalContext()
00124     {
00125         MBI        = 0;
00126         refCountMB = 0;
00127         unused_pid = 0;
00128     }
00129 };
00130 
00131 static struct GlobalContext context;
00132 
00133 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
00134 {
00135     if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
00136 
00137     context.iArgc = argc;
00138     context.iArgv = argv;  // shallow copy
00139 
00140     if( 0 == context.refCountMB )
00141     {
00142         context.MBI = new( std::nothrow ) moab::Core;
00143         // retrieve the default tags
00144         const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
00145                                                      DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
00146         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
00147         Tag gtags[4];
00148 
00149         for( int i = 0; i < 4; i++ )
00150         {
00151 
00152             ErrorCode rval =
00153                 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
00154         }
00155 
00156         context.material_tag  = gtags[0];
00157         context.neumann_tag   = gtags[1];
00158         context.dirichlet_tag = gtags[2];
00159         context.globalID_tag  = gtags[3];
00160     }
00161 
00162     context.MPI_initialized = false;
00163 #ifdef MOAB_HAVE_MPI
00164     int flagInit;
00165     MPI_Initialized( &flagInit );
00166 
00167     if( flagInit && !context.MPI_initialized )
00168     {
00169         MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
00170         MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
00171         context.MPI_initialized = true;
00172     }
00173 #endif
00174 
00175     context.refCountMB++;
00176     return moab::MB_SUCCESS;
00177 }
00178 
00179 ErrCode iMOAB_InitializeFortran()
00180 {
00181     return iMOAB_Initialize( 0, 0 );
00182 }
00183 
00184 ErrCode iMOAB_Finalize()
00185 {
00186     context.refCountMB--;
00187 
00188     if( 0 == context.refCountMB )
00189     {
00190         delete context.MBI;
00191     }
00192 
00193     return MB_SUCCESS;
00194 }
00195 
00196 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
00197 #ifdef MOAB_HAVE_MPI
00198                                    MPI_Comm* comm,
00199 #endif
00200                                    int* compid,
00201                                    iMOAB_AppID pid )
00202 {
00203     IMOAB_CHECKPOINTER( app_name, 1 );
00204 #ifdef MOAB_HAVE_MPI
00205     IMOAB_CHECKPOINTER( comm, 2 );
00206     IMOAB_CHECKPOINTER( compid, 3 );
00207 #else
00208     IMOAB_CHECKPOINTER( compid, 2 );
00209 #endif
00210 
00211     // will create a parallel comm for this application too, so there will be a
00212     // mapping from *pid to file set and to parallel comm instances
00213     std::string name( app_name );
00214 
00215     if( context.appIdMap.find( name ) != context.appIdMap.end() )
00216     {
00217         std::cout << " application " << name << " already registered \n";
00218         return moab::MB_FAILURE;
00219     }
00220 
00221     *pid                   = context.unused_pid++;
00222     context.appIdMap[name] = *pid;
00223     int rankHere           = 0;
00224 #ifdef MOAB_HAVE_MPI
00225     MPI_Comm_rank( *comm, &rankHere );
00226 #endif
00227     if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
00228     if( *compid <= 0 )
00229     {
00230         std::cout << " convention for external application is to have its id positive \n";
00231         return moab::MB_FAILURE;
00232     }
00233 
00234     if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
00235     {
00236         std::cout << " external application with comp id " << *compid << " is already registered\n";
00237         return moab::MB_FAILURE;
00238     }
00239 
00240     context.appIdCompMap[*compid] = *pid;
00241 
00242     // now create ParallelComm and a file set for this application
00243 #ifdef MOAB_HAVE_MPI
00244     if( *comm )
00245     {
00246         ParallelComm* pco = new ParallelComm( context.MBI, *comm );
00247 
00248 #ifndef NDEBUG
00249         int index = pco->get_id();  // it could be useful to get app id from pcomm instance ...
00250         assert( index == *pid );
00251         // here, we assert the the pid is the same as the id of the ParallelComm instance
00252         // useful for writing in parallel
00253 #endif
00254         context.pcomms.push_back( pco );
00255     }
00256     else
00257     {
00258         context.pcomms.push_back( 0 );
00259     }
00260 #endif
00261 
00262     // create now the file set that will be used for loading the model in
00263     EntityHandle file_set;
00264     ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
00265 
00266     appData app_data;
00267     app_data.file_set  = file_set;
00268     app_data.global_id = *compid;  // will be used mostly for par comm graph
00269     app_data.name      = name;     // save the name of application
00270 
00271 #ifdef MOAB_HAVE_TEMPESTREMAP
00272     app_data.tempestData.remapper = NULL;  // Only allocate as needed
00273 #endif
00274 
00275     app_data.point_cloud = false;
00276     app_data.is_fortran  = false;
00277 
00278     context.appDatas.push_back(
00279         app_data );  // it will correspond to app_FileSets[*pid] will be the file set of interest
00280     return moab::MB_SUCCESS;
00281 }
00282 
00283 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name,
00284 #ifdef MOAB_HAVE_MPI
00285                                           int* comm,
00286 #endif
00287                                           int* compid,
00288                                           iMOAB_AppID pid )
00289 {
00290     IMOAB_CHECKPOINTER( app_name, 1 );
00291 #ifdef MOAB_HAVE_MPI
00292     IMOAB_CHECKPOINTER( comm, 2 );
00293     IMOAB_CHECKPOINTER( compid, 3 );
00294 #else
00295     IMOAB_CHECKPOINTER( compid, 2 );
00296 #endif
00297 
00298     ErrCode err;
00299     assert( app_name != nullptr );
00300     std::string name( app_name );
00301 
00302 #ifdef MOAB_HAVE_MPI
00303     MPI_Comm ccomm;
00304     if( comm )
00305     {
00306         // convert from Fortran communicator to a C communicator
00307         // see transfer of handles
00308         // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
00309         ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
00310     }
00311 #endif
00312 
00313     // now call C style registration function:
00314     err = iMOAB_RegisterApplication( app_name,
00315 #ifdef MOAB_HAVE_MPI
00316                                      &ccomm,
00317 #endif
00318                                      compid, pid );
00319 
00320     // Now that we have created the application context, store that
00321     // the application being registered is from a Fortran context
00322     context.appDatas[*pid].is_fortran = true;
00323 
00324     return err;
00325 }
00326 
00327 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
00328 {
00329     // the file set , parallel comm are all in vectors indexed by *pid
00330     // assume we did not delete anything yet
00331     // *pid will not be reused if we register another application
00332     appData& data = context.appDatas[*pid];
00333     int rankHere  = 0;
00334 #ifdef MOAB_HAVE_MPI
00335     ParallelComm* pco = context.pcomms[*pid];
00336     rankHere          = pco->rank();
00337 #endif
00338     if( !rankHere )
00339         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
00340                   << " is de-registered now \n";
00341 
00342     EntityHandle fileSet = data.file_set;
00343     // get all entities part of the file set
00344     Range fileents;
00345     ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
00346 
00347     fileents.insert( fileSet );
00348 
00349     rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval );  // append all mesh sets
00350 
00351 #ifdef MOAB_HAVE_TEMPESTREMAP
00352     if( data.tempestData.remapper ) delete data.tempestData.remapper;
00353     if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
00354 #endif
00355 
00356 #ifdef MOAB_HAVE_MPI
00357 
00358     // we could get the pco also with
00359     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
00360 
00361     std::map< int, ParCommGraph* >& pargs = data.pgraph;
00362 
00363     // free the parallel comm graphs associated with this app
00364     for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
00365     {
00366         ParCommGraph* pgr = mt->second;
00367         delete pgr;
00368         pgr = NULL;
00369     }
00370     if( pco ) delete pco;
00371 #endif
00372 
00373     // delete first all except vertices
00374     Range vertices = fileents.subset_by_type( MBVERTEX );
00375     Range noverts  = subtract( fileents, vertices );
00376 
00377     rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
00378     // now retrieve connected elements that still exist (maybe in other sets, pids?)
00379     Range adj_ents_left;
00380     rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00381     rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00382     rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00383 
00384     if( !adj_ents_left.empty() )
00385     {
00386         Range conn_verts;
00387         rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
00388         vertices = subtract( vertices, conn_verts );
00389     }
00390 
00391     rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
00392 
00393     std::map< std::string, int >::iterator mit;
00394 
00395     for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
00396     {
00397         int pidx = mit->second;
00398 
00399         if( *pid == pidx )
00400         {
00401             break;
00402         }
00403     }
00404 
00405     context.appIdMap.erase( mit );
00406     std::map< int, int >::iterator mit1;
00407 
00408     for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
00409     {
00410         int pidx = mit1->second;
00411 
00412         if( *pid == pidx )
00413         {
00414             break;
00415         }
00416     }
00417 
00418     context.appIdCompMap.erase( mit1 );
00419 
00420     context.unused_pid--;  // we have to go backwards always TODO
00421     context.appDatas.pop_back();
00422 #ifdef MOAB_HAVE_MPI
00423     context.pcomms.pop_back();
00424 #endif
00425     return moab::MB_SUCCESS;
00426 }
00427 
00428 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid )
00429 {
00430     // release any Fortran specific allocations here before we pass it on
00431     context.appDatas[*pid].is_fortran = false;
00432 
00433     // release all datastructure allocations
00434     return iMOAB_DeregisterApplication( pid );
00435 }
00436 
00437 // Utility function
00438 static void split_tag_names( std::string input_names,
00439                              std::string& separator,
00440                              std::vector< std::string >& list_tag_names )
00441 {
00442     size_t pos = 0;
00443     std::string token;
00444     while( ( pos = input_names.find( separator ) ) != std::string::npos )
00445     {
00446         token = input_names.substr( 0, pos );
00447         if( !token.empty() ) list_tag_names.push_back( token );
00448         // std::cout << token << std::endl;
00449         input_names.erase( 0, pos + separator.length() );
00450     }
00451     if( !input_names.empty() )
00452     {
00453         // if leftover something, or if not ended with delimiter
00454         list_tag_names.push_back( input_names );
00455     }
00456     return;
00457 }
00458 
00459 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename,
00460                               int* num_global_vertices,
00461                               int* num_global_elements,
00462                               int* num_dimension,
00463                               int* num_parts )
00464 {
00465     IMOAB_CHECKPOINTER( filename, 1 );
00466     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00467 
00468 #ifdef MOAB_HAVE_HDF5
00469     std::string filen( filename );
00470 
00471     int edges   = 0;
00472     int faces   = 0;
00473     int regions = 0;
00474     if( num_global_vertices ) *num_global_vertices = 0;
00475     if( num_global_elements ) *num_global_elements = 0;
00476     if( num_dimension ) *num_dimension = 0;
00477     if( num_parts ) *num_parts = 0;
00478 
00479     mhdf_FileHandle file;
00480     mhdf_Status status;
00481     unsigned long max_id;
00482     struct mhdf_FileDesc* data;
00483 
00484     file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
00485 
00486     if( mhdf_isError( &status ) )
00487     {
00488         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00489         return moab::MB_FAILURE;
00490     }
00491 
00492     data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
00493                                 1 );  // will use extra set info; will get parallel partition tag info too!
00494 
00495     if( mhdf_isError( &status ) )
00496     {
00497         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00498         return moab::MB_FAILURE;
00499     }
00500 
00501     if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
00502     if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
00503 
00504     for( int i = 0; i < data->num_elem_desc; i++ )
00505     {
00506         struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
00507         struct mhdf_EntDesc* ent_d    = &( el_desc->desc );
00508 
00509         if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
00510         {
00511             edges += ent_d->count;
00512         }
00513 
00514         if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
00515         {
00516             faces += ent_d->count;
00517         }
00518 
00519         if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
00520         {
00521             faces += ent_d->count;
00522         }
00523 
00524         if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
00525         {
00526             faces += ent_d->count;
00527         }
00528 
00529         if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
00530         {
00531             regions += ent_d->count;
00532         }
00533 
00534         if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
00535         {
00536             regions += ent_d->count;
00537         }
00538 
00539         if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
00540         {
00541             regions += ent_d->count;
00542         }
00543 
00544         if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
00545         {
00546             regions += ent_d->count;
00547         }
00548 
00549         if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
00550         {
00551             regions += ent_d->count;
00552         }
00553 
00554         if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
00555         {
00556             regions += ent_d->count;
00557         }
00558 
00559         if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
00560         {
00561             regions += ent_d->count;
00562         }
00563     }
00564 
00565     if( num_parts ) *num_parts = data->numEntSets[0];
00566 
00567     // is this required?
00568     if( edges > 0 )
00569     {
00570         if( num_dimension ) *num_dimension = 1;  // I don't think it will ever return 1
00571         if( num_global_elements ) *num_global_elements = edges;
00572     }
00573 
00574     if( faces > 0 )
00575     {
00576         if( num_dimension ) *num_dimension = 2;
00577         if( num_global_elements ) *num_global_elements = faces;
00578     }
00579 
00580     if( regions > 0 )
00581     {
00582         if( num_dimension ) *num_dimension = 3;
00583         if( num_global_elements ) *num_global_elements = regions;
00584     }
00585 
00586     mhdf_closeFile( file, &status );
00587 
00588     free( data );
00589 
00590 #else
00591     std::cout << filename
00592               << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
00593                  "formats other than a h5m file.\n";
00594     if( num_global_vertices ) *num_global_vertices = 0;
00595     if( num_global_elements ) *num_global_elements = 0;
00596     if( num_dimension ) *num_dimension = 0;
00597     if( num_parts ) *num_parts = 0;
00598 #endif
00599 
00600     return moab::MB_SUCCESS;
00601 }
00602 
00603 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid,
00604                         const iMOAB_String filename,
00605                         const iMOAB_String read_options,
00606                         int* num_ghost_layers )
00607 {
00608     IMOAB_CHECKPOINTER( filename, 2 );
00609     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00610 
00611     // make sure we use the file set and pcomm associated with the *pid
00612     std::ostringstream newopts;
00613     if( read_options ) newopts << read_options;
00614 
00615 #ifdef MOAB_HAVE_MPI
00616 
00617     if( context.MPI_initialized )
00618     {
00619         if( context.worldprocs > 1 )
00620         {
00621             std::string opts( ( read_options ? read_options : "" ) );
00622             std::string pcid( "PARALLEL_COMM=" );
00623             std::size_t found = opts.find( pcid );
00624 
00625             if( found != std::string::npos )
00626             {
00627                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00628                 return moab::MB_FAILURE;
00629             }
00630 
00631             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
00632             // files (used in test_remapping)
00633             std::string filen( filename );
00634             std::string::size_type idx = filen.rfind( '.' );
00635 
00636             if( idx != std::string::npos )
00637             {
00638                 std::string extension = filen.substr( idx + 1 );
00639                 if( (extension == std::string( "h5m" )) || (extension == std::string( "nc" ))) newopts << ";;PARALLEL_COMM=" << *pid;
00640             }
00641 
00642             if( *num_ghost_layers >= 1 )
00643             {
00644                 // if we want ghosts, we will want additional entities, the last .1
00645                 // because the addl ents can be edges, faces that are part of the neumann sets
00646                 std::string pcid2( "PARALLEL_GHOSTS=" );
00647                 std::size_t found2 = opts.find( pcid2 );
00648 
00649                 if( found2 != std::string::npos )
00650                 {
00651                     std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
00652                                  "number of layers \n";
00653                 }
00654                 else
00655                 {
00656                     // dimension of primary entities is 3 here, but it could be 2 for climate
00657                     // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
00658                     // example:  ";PARALLEL_GHOSTS=2.0.1"
00659                     newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
00660                 }
00661             }
00662         }
00663     }
00664 #else
00665     IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
00666 #endif
00667 
00668     // Now let us actually load the MOAB file with the appropriate read options
00669     ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
00670 
00671 #ifdef VERBOSE
00672     // some debugging stuff
00673     std::ostringstream outfile;
00674 #ifdef MOAB_HAVE_MPI
00675     int rank   = context.pcomms[*pid]->rank();
00676     int nprocs = context.pcomms[*pid]->size();
00677     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
00678 #else
00679     outfile << "TaskMesh_n1.0.h5m";
00680 #endif
00681     // the mesh contains ghosts too, but they are not part of mat/neumann set
00682     // write in serial the file, to see what tags are missing
00683     rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval );  // everything on current task, written in serial
00684 #endif
00685 
00686     // Update mesh information
00687     return iMOAB_UpdateMeshInfo( pid );
00688 }
00689 
00690 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
00691 {
00692     IMOAB_CHECKPOINTER( filename, 2 );
00693     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00694 
00695     appData& data        = context.appDatas[*pid];
00696     EntityHandle fileSet = data.file_set;
00697 
00698     std::ostringstream newopts;
00699 #ifdef MOAB_HAVE_MPI
00700     std::string write_opts( ( write_options ? write_options : "" ) );
00701     std::string pcid( "PARALLEL_COMM=" );
00702     std::size_t found = write_opts.find( pcid );
00703 
00704     if( found != std::string::npos )
00705     {
00706         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00707         return moab::MB_FAILURE;
00708     }
00709 
00710     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
00711     std::string pw( "PARALLEL=WRITE_PART" );
00712     found = write_opts.find( pw );
00713 
00714     if( found != std::string::npos )
00715     {
00716         newopts << "PARALLEL_COMM=" << *pid << ";";
00717     }
00718 
00719 #endif
00720 
00721     if( write_options ) newopts << write_options;
00722 
00723     std::vector< Tag > copyTagList = data.tagList;
00724     // append Global ID and Parallel Partition
00725     std::string gid_name_tag( "GLOBAL_ID" );
00726 
00727     // export global id tag, we need it always
00728     if( data.tagMap.find( gid_name_tag ) == data.tagMap.end() )
00729     {
00730         Tag gid = context.MBI->globalId_tag();
00731         copyTagList.push_back( gid );
00732     }
00733     // also Parallel_Partition PARALLEL_PARTITION
00734     std::string pp_name_tag( "PARALLEL_PARTITION" );
00735 
00736     // write parallel part tag too, if it exists
00737     if( data.tagMap.find( pp_name_tag ) == data.tagMap.end() )
00738     {
00739         Tag ptag;
00740         context.MBI->tag_get_handle( pp_name_tag.c_str(), ptag );
00741         if( ptag ) copyTagList.push_back( ptag );
00742     }
00743 
00744     // Now let us actually write the file to disk with appropriate options
00745     ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &fileSet, 1, &copyTagList[0],
00746                                               (int)copyTagList.size() );MB_CHK_ERR( rval );
00747 
00748     return moab::MB_SUCCESS;
00749 }
00750 
00751 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix )
00752 {
00753     IMOAB_CHECKPOINTER( prefix, 2 );
00754     IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
00755 
00756     std::ostringstream file_name;
00757     int rank = 0, size = 1;
00758 #ifdef MOAB_HAVE_MPI
00759     rank = context.pcomms[*pid]->rank();
00760     size = context.pcomms[*pid]->size();
00761 #endif
00762     file_name << prefix << "_" << size << "_" << rank << ".h5m";
00763     // Now let us actually write the file to disk with appropriate options
00764     ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
00765 
00766     return moab::MB_SUCCESS;
00767 }
00768 
00769 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
00770 {
00771     // this will include ghost elements info
00772     appData& data        = context.appDatas[*pid];
00773     EntityHandle fileSet = data.file_set;
00774 
00775     // first clear all data ranges; this can be called after ghosting
00776     data.all_verts.clear();
00777     data.primary_elems.clear();
00778     data.local_verts.clear();
00779     data.ghost_vertices.clear();
00780     data.owned_elems.clear();
00781     data.ghost_elems.clear();
00782     data.mat_sets.clear();
00783     data.neu_sets.clear();
00784     data.diri_sets.clear();
00785 
00786     // Let us get all the vertex entities
00787     ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval );  // recursive
00788 
00789     // Let us check first entities of dimension = 3
00790     data.dimension = 3;
00791     rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00792 
00793     if( data.primary_elems.empty() )
00794     {
00795         // Now 3-D elements. Let us check entities of dimension = 2
00796         data.dimension = 2;
00797         rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00798 
00799         if( data.primary_elems.empty() )
00800         {
00801             // Now 3-D/2-D elements. Let us check entities of dimension = 1
00802             data.dimension = 1;
00803             rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00804 
00805             if( data.primary_elems.empty() )
00806             {
00807                 // no elements of dimension 1 or 2 or 3; it could happen for point clouds
00808                 data.dimension = 0;
00809             }
00810         }
00811     }
00812 
00813     // check if the current mesh is just a point cloud
00814     data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
00815 
00816 #ifdef MOAB_HAVE_MPI
00817 
00818     if( context.MPI_initialized )
00819     {
00820         ParallelComm* pco = context.pcomms[*pid];
00821 
00822         // filter ghost vertices, from local
00823         rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
00824 
00825         // Store handles for all ghosted entities
00826         data.ghost_vertices = subtract( data.all_verts, data.local_verts );
00827 
00828         // filter ghost elements, from local
00829         rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
00830 
00831         data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
00832     }
00833     else
00834     {
00835         data.local_verts = data.all_verts;
00836         data.owned_elems = data.primary_elems;
00837     }
00838 
00839 #else
00840 
00841     data.local_verts = data.all_verts;
00842     data.owned_elems = data.primary_elems;
00843 
00844 #endif
00845 
00846     // Get the references for some standard internal tags such as material blocks, BCs, etc
00847     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00848                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00849 
00850     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00851                                                       data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00852 
00853     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00854                                                       data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00855 
00856     return moab::MB_SUCCESS;
00857 }
00858 
00859 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid,
00860                            int* num_visible_vertices,
00861                            int* num_visible_elements,
00862                            int* num_visible_blocks,
00863                            int* num_visible_surfaceBC,
00864                            int* num_visible_vertexBC )
00865 {
00866     ErrorCode rval;
00867     appData& data        = context.appDatas[*pid];
00868     EntityHandle fileSet = data.file_set;
00869 
00870     // this will include ghost elements
00871     // first clear all data ranges; this can be called after ghosting
00872     if( num_visible_elements )
00873     {
00874         num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
00875         // separate ghost and local/owned primary elements
00876         num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
00877         num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
00878     }
00879     if( num_visible_vertices )
00880     {
00881         num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
00882         num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
00883         // local are those that are not ghosts
00884         num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
00885     }
00886 
00887     if( num_visible_blocks )
00888     {
00889         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00890                                                           data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00891 
00892         num_visible_blocks[2] = data.mat_sets.size();
00893         num_visible_blocks[0] = num_visible_blocks[2];
00894         num_visible_blocks[1] = 0;
00895     }
00896 
00897     if( num_visible_surfaceBC )
00898     {
00899         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00900                                                           data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00901 
00902         num_visible_surfaceBC[2] = 0;
00903         // count how many faces are in each neu set, and how many regions are
00904         // adjacent to them;
00905         int numNeuSets = (int)data.neu_sets.size();
00906 
00907         for( int i = 0; i < numNeuSets; i++ )
00908         {
00909             Range subents;
00910             EntityHandle nset = data.neu_sets[i];
00911             rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
00912 
00913             for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
00914             {
00915                 EntityHandle subent = *it;
00916                 Range adjPrimaryEnts;
00917                 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
00918 
00919                 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
00920             }
00921         }
00922 
00923         num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
00924         num_visible_surfaceBC[1] = 0;
00925     }
00926 
00927     if( num_visible_vertexBC )
00928     {
00929         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00930                                                           data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00931 
00932         num_visible_vertexBC[2] = 0;
00933         int numDiriSets         = (int)data.diri_sets.size();
00934 
00935         for( int i = 0; i < numDiriSets; i++ )
00936         {
00937             Range verts;
00938             EntityHandle diset = data.diri_sets[i];
00939             rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
00940 
00941             num_visible_vertexBC[2] += (int)verts.size();
00942         }
00943 
00944         num_visible_vertexBC[0] = num_visible_vertexBC[2];
00945         num_visible_vertexBC[1] = 0;
00946     }
00947 
00948     return moab::MB_SUCCESS;
00949 }
00950 
00951 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
00952 {
00953     IMOAB_CHECKPOINTER( vertices_length, 2 );
00954     IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
00955 
00956     const Range& verts = context.appDatas[*pid].all_verts;
00957     // check for problems with array length
00958     IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
00959 
00960     // global id tag is context.globalID_tag
00961     return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
00962 }
00963 
00964 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
00965 {
00966     assert( vertices_length && *vertices_length );
00967     assert( visible_global_rank_ID );
00968 
00969     Range& verts = context.appDatas[*pid].all_verts;
00970     int i        = 0;
00971 #ifdef MOAB_HAVE_MPI
00972     ParallelComm* pco = context.pcomms[*pid];
00973 
00974     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00975     {
00976         ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
00977     }
00978 
00979     if( i != *vertices_length )
00980     {
00981         return moab::MB_FAILURE;
00982     }  // warning array allocation problem
00983 
00984 #else
00985 
00986     /* everything owned by proc 0 */
00987     if( (int)verts.size() != *vertices_length )
00988     {
00989         return moab::MB_FAILURE;
00990     }  // warning array allocation problem
00991 
00992     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00993     {
00994         visible_global_rank_ID[i] = 0;
00995     }  // all vertices are owned by processor 0, as this is serial run
00996 
00997 #endif
00998 
00999     return moab::MB_SUCCESS;
01000 }
01001 
01002 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
01003 {
01004     Range& verts = context.appDatas[*pid].all_verts;
01005 
01006     // interleaved coordinates, so that means deep copy anyway
01007     if( *coords_length != 3 * (int)verts.size() )
01008     {
01009         return moab::MB_FAILURE;
01010     }
01011 
01012     ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
01013 
01014     return moab::MB_SUCCESS;
01015 }
01016 
01017 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
01018 {
01019     // local id blocks? they are counted from 0 to number of visible blocks ...
01020     // will actually return material set tag value for global
01021     Range& matSets = context.appDatas[*pid].mat_sets;
01022 
01023     if( *block_length != (int)matSets.size() )
01024     {
01025         return moab::MB_FAILURE;
01026     }
01027 
01028     // return material set tag gtags[0 is material set tag
01029     ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
01030 
01031     // populate map with index
01032     std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
01033     for( unsigned i = 0; i < matSets.size(); i++ )
01034     {
01035         matIdx[global_block_IDs[i]] = i;
01036     }
01037 
01038     return moab::MB_SUCCESS;
01039 }
01040 
01041 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid,
01042                             iMOAB_GlobalID* global_block_ID,
01043                             int* vertices_per_element,
01044                             int* num_elements_in_block )
01045 {
01046     assert( global_block_ID );
01047 
01048     std::map< int, int >& matMap      = context.appDatas[*pid].matIndex;
01049     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01050 
01051     if( it == matMap.end() )
01052     {
01053         return moab::MB_FAILURE;
01054     }  // error in finding block with id
01055 
01056     int blockIndex          = matMap[*global_block_ID];
01057     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01058     Range blo_elems;
01059     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
01060 
01061     if( MB_SUCCESS != rval || blo_elems.empty() )
01062     {
01063         return moab::MB_FAILURE;
01064     }
01065 
01066     EntityType type = context.MBI->type_from_handle( blo_elems[0] );
01067 
01068     if( !blo_elems.all_of_type( type ) )
01069     {
01070         return moab::MB_FAILURE;
01071     }  // not all of same  type
01072 
01073     const EntityHandle* conn = NULL;
01074     int num_verts            = 0;
01075     rval                     = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
01076 
01077     *vertices_per_element  = num_verts;
01078     *num_elements_in_block = (int)blo_elems.size();
01079 
01080     return moab::MB_SUCCESS;
01081 }
01082 
01083 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid,
01084                                       int* num_visible_elements,
01085                                       iMOAB_GlobalID* element_global_IDs,
01086                                       int* ranks,
01087                                       iMOAB_GlobalID* block_IDs )
01088 {
01089     appData& data = context.appDatas[*pid];
01090 #ifdef MOAB_HAVE_MPI
01091     ParallelComm* pco = context.pcomms[*pid];
01092 #endif
01093 
01094     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
01095 
01096     int i = 0;
01097 
01098     for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
01099     {
01100 #ifdef MOAB_HAVE_MPI
01101         rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
01102 
01103 #else
01104         /* everything owned by task 0 */
01105         ranks[i]             = 0;
01106 #endif
01107     }
01108 
01109     for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
01110     {
01111         EntityHandle matMeshSet = *mit;
01112         Range elems;
01113         rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01114 
01115         int valMatTag;
01116         rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
01117 
01118         for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
01119         {
01120             EntityHandle eh = *eit;
01121             int index       = data.primary_elems.index( eh );
01122 
01123             if( -1 == index )
01124             {
01125                 return moab::MB_FAILURE;
01126             }
01127 
01128             if( -1 >= *num_visible_elements )
01129             {
01130                 return moab::MB_FAILURE;
01131             }
01132 
01133             block_IDs[index] = valMatTag;
01134         }
01135     }
01136 
01137     return moab::MB_SUCCESS;
01138 }
01139 
01140 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid,
01141                                              iMOAB_GlobalID* global_block_ID,
01142                                              int* connectivity_length,
01143                                              int* element_connectivity )
01144 {
01145     assert( global_block_ID );      // ensure global block ID argument is not null
01146     assert( connectivity_length );  // ensure connectivity length argument is not null
01147 
01148     appData& data                     = context.appDatas[*pid];
01149     std::map< int, int >& matMap      = data.matIndex;
01150     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01151 
01152     if( it == matMap.end() )
01153     {
01154         return moab::MB_FAILURE;
01155     }  // error in finding block with id
01156 
01157     int blockIndex          = matMap[*global_block_ID];
01158     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01159     std::vector< EntityHandle > elems;
01160 
01161     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01162 
01163     if( elems.empty() )
01164     {
01165         return moab::MB_FAILURE;
01166     }
01167 
01168     std::vector< EntityHandle > vconnect;
01169     rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
01170 
01171     if( *connectivity_length != (int)vconnect.size() )
01172     {
01173         return moab::MB_FAILURE;
01174     }  // mismatched sizes
01175 
01176     for( int i = 0; i < *connectivity_length; i++ )
01177     {
01178         int inx = data.all_verts.index( vconnect[i] );
01179 
01180         if( -1 == inx )
01181         {
01182             return moab::MB_FAILURE;
01183         }  // error, vertex not in local range
01184 
01185         element_connectivity[i] = inx;
01186     }
01187 
01188     return moab::MB_SUCCESS;
01189 }
01190 
01191 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid,
01192                                       iMOAB_LocalID* elem_index,
01193                                       int* connectivity_length,
01194                                       int* element_connectivity )
01195 {
01196     assert( elem_index );           // ensure element index argument is not null
01197     assert( connectivity_length );  // ensure connectivity length argument is not null
01198 
01199     appData& data = context.appDatas[*pid];
01200     assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
01201 
01202     int num_nodes;
01203     const EntityHandle* conn;
01204 
01205     EntityHandle eh = data.primary_elems[*elem_index];
01206 
01207     ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
01208 
01209     if( *connectivity_length < num_nodes )
01210     {
01211         return moab::MB_FAILURE;
01212     }  // wrong number of vertices
01213 
01214     for( int i = 0; i < num_nodes; i++ )
01215     {
01216         int index = data.all_verts.index( conn[i] );
01217 
01218         if( -1 == index )
01219         {
01220             return moab::MB_FAILURE;
01221         }
01222 
01223         element_connectivity[i] = index;
01224     }
01225 
01226     *connectivity_length = num_nodes;
01227 
01228     return moab::MB_SUCCESS;
01229 }
01230 
01231 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid,
01232                                    iMOAB_GlobalID* global_block_ID,
01233                                    int* num_elements_in_block,
01234                                    int* element_ownership )
01235 {
01236     assert( global_block_ID );        // ensure global block ID argument is not null
01237     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01238 
01239     std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
01240 
01241     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01242 
01243     if( it == matMap.end() )
01244     {
01245         return moab::MB_FAILURE;
01246     }  // error in finding block with id
01247 
01248     int blockIndex          = matMap[*global_block_ID];
01249     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01250     Range elems;
01251 
01252     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01253 
01254     if( elems.empty() )
01255     {
01256         return moab::MB_FAILURE;
01257     }
01258 
01259     if( *num_elements_in_block != (int)elems.size() )
01260     {
01261         return moab::MB_FAILURE;
01262     }  // bad memory allocation
01263 
01264     int i = 0;
01265 #ifdef MOAB_HAVE_MPI
01266     ParallelComm* pco = context.pcomms[*pid];
01267 #endif
01268 
01269     for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
01270     {
01271 #ifdef MOAB_HAVE_MPI
01272         rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
01273 #else
01274         element_ownership[i] = 0; /* owned by 0 */
01275 #endif
01276     }
01277 
01278     return moab::MB_SUCCESS;
01279 }
01280 
01281 ErrCode iMOAB_GetElementID( iMOAB_AppID pid,
01282                             iMOAB_GlobalID* global_block_ID,
01283                             int* num_elements_in_block,
01284                             iMOAB_GlobalID* global_element_ID,
01285                             iMOAB_LocalID* local_element_ID )
01286 {
01287     assert( global_block_ID );        // ensure global block ID argument is not null
01288     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01289 
01290     appData& data                = context.appDatas[*pid];
01291     std::map< int, int >& matMap = data.matIndex;
01292 
01293     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01294 
01295     if( it == matMap.end() )
01296     {
01297         return moab::MB_FAILURE;
01298     }  // error in finding block with id
01299 
01300     int blockIndex          = matMap[*global_block_ID];
01301     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01302     Range elems;
01303     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01304 
01305     if( elems.empty() )
01306     {
01307         return moab::MB_FAILURE;
01308     }
01309 
01310     if( *num_elements_in_block != (int)elems.size() )
01311     {
01312         return moab::MB_FAILURE;
01313     }  // bad memory allocation
01314 
01315     rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
01316 
01317     // check that elems are among primary_elems in data
01318     for( int i = 0; i < *num_elements_in_block; i++ )
01319     {
01320         local_element_ID[i] = data.primary_elems.index( elems[i] );
01321 
01322         if( -1 == local_element_ID[i] )
01323         {
01324             return moab::MB_FAILURE;
01325         }  // error, not in local primary elements
01326     }
01327 
01328     return moab::MB_SUCCESS;
01329 }
01330 
01331 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid,
01332                                      int* surface_BC_length,
01333                                      iMOAB_LocalID* local_element_ID,
01334                                      int* reference_surface_ID,
01335                                      int* boundary_condition_value )
01336 {
01337     // we have to fill bc data for neumann sets;/
01338     ErrorCode rval;
01339 
01340     // it was counted above, in GetMeshInfo
01341     appData& data  = context.appDatas[*pid];
01342     int numNeuSets = (int)data.neu_sets.size();
01343 
01344     int index = 0;  // index [0, surface_BC_length) for the arrays returned
01345 
01346     for( int i = 0; i < numNeuSets; i++ )
01347     {
01348         Range subents;
01349         EntityHandle nset = data.neu_sets[i];
01350         rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
01351 
01352         int neuVal;
01353         rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
01354 
01355         for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
01356         {
01357             EntityHandle subent = *it;
01358             Range adjPrimaryEnts;
01359             rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
01360 
01361             // get global id of the primary ents, and side number of the quad/subentity
01362             // this is moab ordering
01363             for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
01364             {
01365                 EntityHandle primaryEnt = *pit;
01366                 // get global id
01367                 /*int globalID;
01368                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
01369                 if (MB_SUCCESS!=rval)
01370                   return moab::MB_FAILURE;
01371                 global_element_ID[index] = globalID;*/
01372 
01373                 // get local element id
01374                 local_element_ID[index] = data.primary_elems.index( primaryEnt );
01375 
01376                 if( -1 == local_element_ID[index] )
01377                 {
01378                     return moab::MB_FAILURE;
01379                 }  // did not find the element locally
01380 
01381                 int side_number, sense, offset;
01382                 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
01383 
01384                 reference_surface_ID[index]     = side_number + 1;  // moab is from 0 to 5, it needs 1 to 6
01385                 boundary_condition_value[index] = neuVal;
01386                 index++;
01387             }
01388         }
01389     }
01390 
01391     if( index != *surface_BC_length )
01392     {
01393         return moab::MB_FAILURE;
01394     }  // error in array allocations
01395 
01396     return moab::MB_SUCCESS;
01397 }
01398 
01399 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid,
01400                                     int* vertex_BC_length,
01401                                     iMOAB_LocalID* local_vertex_ID,
01402                                     int* boundary_condition_value )
01403 {
01404     ErrorCode rval;
01405 
01406     // it was counted above, in GetMeshInfo
01407     appData& data   = context.appDatas[*pid];
01408     int numDiriSets = (int)data.diri_sets.size();
01409     int index       = 0;  // index [0, *vertex_BC_length) for the arrays returned
01410 
01411     for( int i = 0; i < numDiriSets; i++ )
01412     {
01413         Range verts;
01414         EntityHandle diset = data.diri_sets[i];
01415         rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
01416 
01417         int diriVal;
01418         rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
01419 
01420         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01421         {
01422             EntityHandle vt = *vit;
01423             /*int vgid;
01424             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
01425             if (MB_SUCCESS!=rval)
01426               return moab::MB_FAILURE;
01427             global_vertext_ID[index] = vgid;*/
01428             local_vertex_ID[index] = data.all_verts.index( vt );
01429 
01430             if( -1 == local_vertex_ID[index] )
01431             {
01432                 return moab::MB_FAILURE;
01433             }  // vertex was not found
01434 
01435             boundary_condition_value[index] = diriVal;
01436             index++;
01437         }
01438     }
01439 
01440     if( *vertex_BC_length != index )
01441     {
01442         return moab::MB_FAILURE;
01443     }  // array allocation issue
01444 
01445     return moab::MB_SUCCESS;
01446 }
01447 
01448 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid,
01449                                 const iMOAB_String tag_storage_name,
01450                                 int* tag_type,
01451                                 int* components_per_entity,
01452                                 int* tag_index )
01453 {
01454     // see if the tag is already existing, and if yes, check the type, length
01455     if( *tag_type < 0 || *tag_type > 5 )
01456     {
01457         return moab::MB_FAILURE;
01458     }  // we have 6 types of tags supported so far
01459 
01460     DataType tagDataType;
01461     TagType tagType;
01462     void* defaultVal        = NULL;
01463     int* defInt             = new int[*components_per_entity];
01464     double* defDouble       = new double[*components_per_entity];
01465     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
01466 
01467     for( int i = 0; i < *components_per_entity; i++ )
01468     {
01469         defInt[i]    = 0;
01470         defDouble[i] = -1e+10;
01471         defHandle[i] = (EntityHandle)0;
01472     }
01473 
01474     switch( *tag_type )
01475     {
01476         case 0:
01477             tagDataType = MB_TYPE_INTEGER;
01478             tagType     = MB_TAG_DENSE;
01479             defaultVal  = defInt;
01480             break;
01481 
01482         case 1:
01483             tagDataType = MB_TYPE_DOUBLE;
01484             tagType     = MB_TAG_DENSE;
01485             defaultVal  = defDouble;
01486             break;
01487 
01488         case 2:
01489             tagDataType = MB_TYPE_HANDLE;
01490             tagType     = MB_TAG_DENSE;
01491             defaultVal  = defHandle;
01492             break;
01493 
01494         case 3:
01495             tagDataType = MB_TYPE_INTEGER;
01496             tagType     = MB_TAG_SPARSE;
01497             defaultVal  = defInt;
01498             break;
01499 
01500         case 4:
01501             tagDataType = MB_TYPE_DOUBLE;
01502             tagType     = MB_TAG_SPARSE;
01503             defaultVal  = defDouble;
01504             break;
01505 
01506         case 5:
01507             tagDataType = MB_TYPE_HANDLE;
01508             tagType     = MB_TAG_SPARSE;
01509             defaultVal  = defHandle;
01510             break;
01511 
01512         default: {
01513             delete[] defInt;
01514             delete[] defDouble;
01515             delete[] defHandle;
01516             return moab::MB_FAILURE;
01517         }  // error
01518     }
01519 
01520     Tag tagHandle;
01521     // split storage names if separated list
01522 
01523     std::string tag_name( tag_storage_name );
01524 
01525     //  first separate the names of the tags
01526     // we assume that there are separators ":" between the tag names
01527     std::vector< std::string > tagNames;
01528     std::string separator( ":" );
01529     split_tag_names( tag_name, separator, tagNames );
01530 
01531     ErrorCode rval = moab::MB_SUCCESS;  // assume success already :)
01532     appData& data  = context.appDatas[*pid];
01533     int already_defined_tags = 0;
01534 
01535     for( size_t i = 0; i < tagNames.size(); i++ )
01536     {
01537         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), *components_per_entity, tagDataType, tagHandle,
01538                                             tagType | MB_TAG_EXCL | MB_TAG_CREAT, defaultVal );
01539 
01540         if( MB_ALREADY_ALLOCATED == rval )
01541         {
01542             std::map< std::string, Tag >& mTags        = data.tagMap;
01543             std::map< std::string, Tag >::iterator mit = mTags.find( tagNames[i].c_str() );
01544 
01545             if( mit == mTags.end() )
01546             {
01547                 // add it to the map
01548                 mTags[tagNames[i]] = tagHandle;
01549                 // push it to the list of tags, too
01550                 *tag_index = (int)data.tagList.size();
01551                 data.tagList.push_back( tagHandle );
01552             }
01553             rval = MB_SUCCESS;
01554             already_defined_tags++;
01555         }
01556         else if( MB_SUCCESS == rval )
01557         {
01558             data.tagMap[tagNames[i]] = tagHandle;
01559             *tag_index               = (int)data.tagList.size();
01560             data.tagList.push_back( tagHandle );
01561         }
01562         else
01563         {
01564             rval = moab::MB_FAILURE;  // some tags were not created
01565         }
01566     }
01567     // we don't need default values anymore, avoid leaks
01568     int rankHere  = 0;
01569 #ifdef MOAB_HAVE_MPI
01570     ParallelComm* pco = context.pcomms[*pid];
01571     rankHere          = pco->rank();
01572 #endif
01573     if( !rankHere && already_defined_tags)
01574         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
01575                   << " has "  << already_defined_tags << " already defined tags out of " << tagNames.size() << " tags \n";
01576     delete[] defInt;
01577     delete[] defDouble;
01578     delete[] defHandle;
01579     return rval;
01580 }
01581 
01582 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid,
01583                                 const iMOAB_String tag_storage_name,
01584                                 int* num_tag_storage_length,
01585                                 int* ent_type,
01586                                 int* tag_storage_data )
01587 {
01588     std::string tag_name( tag_storage_name );
01589 
01590     // Get the application data
01591     appData& data = context.appDatas[*pid];
01592 
01593     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01594     {
01595         return moab::MB_FAILURE;
01596     }  // tag not defined
01597 
01598     Tag tag = data.tagMap[tag_name];
01599 
01600     int tagLength  = 0;
01601     ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01602 
01603     DataType dtype;
01604     rval = context.MBI->tag_get_data_type( tag, dtype );
01605 
01606     if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
01607     {
01608         return moab::MB_FAILURE;
01609     }
01610 
01611     // set it on a subset of entities, based on type and length
01612     Range* ents_to_set;
01613 
01614     if( *ent_type == 0 )  // vertices
01615     {
01616         ents_to_set = &data.all_verts;
01617     }
01618     else  // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
01619     {
01620         ents_to_set = &data.primary_elems;
01621     }
01622 
01623     int nents_to_be_set = *num_tag_storage_length / tagLength;
01624 
01625     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01626     {
01627         return moab::MB_FAILURE;
01628     }  // to many entities to be set or too few
01629 
01630     // restrict the range; everything is contiguous; or not?
01631 
01632     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01633     // 1 ) );
01634     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
01635 
01636     return moab::MB_SUCCESS;  // no error
01637 }
01638 
01639 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid,
01640                                 const iMOAB_String tag_storage_name,
01641                                 int* num_tag_storage_length,
01642                                 int* ent_type,
01643                                 int* tag_storage_data )
01644 {
01645     ErrorCode rval;
01646     std::string tag_name( tag_storage_name );
01647 
01648     appData& data = context.appDatas[*pid];
01649 
01650     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01651     {
01652         return moab::MB_FAILURE;
01653     }  // tag not defined
01654 
01655     Tag tag = data.tagMap[tag_name];
01656 
01657     int tagLength = 0;
01658     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01659 
01660     DataType dtype;
01661     rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01662 
01663     if( dtype != MB_TYPE_INTEGER )
01664     {
01665         return moab::MB_FAILURE;
01666     }
01667 
01668     // set it on a subset of entities, based on type and length
01669     Range* ents_to_get;
01670 
01671     if( *ent_type == 0 )  // vertices
01672     {
01673         ents_to_get = &data.all_verts;
01674     }
01675     else  // if (*ent_type == 1)
01676     {
01677         ents_to_get = &data.primary_elems;
01678     }
01679 
01680     int nents_to_get = *num_tag_storage_length / tagLength;
01681 
01682     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01683     {
01684         return moab::MB_FAILURE;
01685     }  // to many entities to get, or too little
01686 
01687     // restrict the range; everything is contiguous; or not?
01688     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01689     // ) );
01690 
01691     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
01692 
01693     return moab::MB_SUCCESS;  // no error
01694 }
01695 
01696 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid,
01697                                    const iMOAB_String tag_storage_names,
01698                                    int* num_tag_storage_length,
01699                                    int* ent_type,
01700                                    double* tag_storage_data )
01701 {
01702     ErrorCode rval;
01703     std::string tag_names( tag_storage_names );
01704     // exactly the same code as for int tag :) maybe should check the type of tag too
01705     std::vector< std::string > tagNames;
01706     std::vector< Tag > tagHandles;
01707     std::string separator( ":" );
01708     split_tag_names( tag_names, separator, tagNames );
01709 
01710     appData& data      = context.appDatas[*pid];
01711     Range* ents_to_set = NULL;
01712 
01713     if( *ent_type == 0 )  // vertices
01714     {
01715         ents_to_set = &data.all_verts;
01716     }
01717     else if( *ent_type == 1 )
01718     {
01719         ents_to_set = &data.primary_elems;
01720     }
01721 
01722     int nents_to_be_set = (int)( *ents_to_set ).size();
01723     int position        = 0;
01724 
01725     for( size_t i = 0; i < tagNames.size(); i++ )
01726     {
01727         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01728         {
01729             return moab::MB_FAILURE;
01730         }  // some tag not defined yet in the app
01731 
01732         Tag tag = data.tagMap[tagNames[i]];
01733 
01734         int tagLength = 0;
01735         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01736 
01737         DataType dtype;
01738         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01739 
01740         if( dtype != MB_TYPE_DOUBLE )
01741         {
01742             return moab::MB_FAILURE;
01743         }
01744 
01745         // set it on the subset of entities, based on type and length
01746         if( position + tagLength * nents_to_be_set > *num_tag_storage_length )
01747             return moab::MB_FAILURE;  // too many entity values to be set
01748 
01749         rval = context.MBI->tag_set_data( tag, *ents_to_set, &tag_storage_data[position] );MB_CHK_ERR( rval );
01750         // increment position to next tag
01751         position = position + tagLength * nents_to_be_set;
01752     }
01753     return moab::MB_SUCCESS;  // no error
01754 }
01755 
01756 ErrCode iMOAB_SetDoubleTagStorageWithGid( iMOAB_AppID pid,
01757                                           const iMOAB_String tag_storage_names,
01758                                           int* num_tag_storage_length,
01759                                           int* ent_type,
01760                                           double* tag_storage_data,
01761                                           int* globalIds )
01762 {
01763     ErrorCode rval;
01764     std::string tag_names( tag_storage_names );
01765     // exactly the same code as for int tag :) maybe should check the type of tag too
01766     std::vector< std::string > tagNames;
01767     std::vector< Tag > tagHandles;
01768     std::string separator( ":" );
01769     split_tag_names( tag_names, separator, tagNames );
01770 
01771     appData& data      = context.appDatas[*pid];
01772     Range* ents_to_set = NULL;
01773 
01774     if( *ent_type == 0 )  // vertices
01775     {
01776         ents_to_set = &data.all_verts;
01777     }
01778     else if( *ent_type == 1 )
01779     {
01780         ents_to_set = &data.primary_elems;
01781     }
01782 
01783     int nents_to_be_set = (int)( *ents_to_set ).size();
01784 
01785     Tag gidTag = context.MBI->globalId_tag();
01786     std::vector< int > gids;
01787     gids.resize( nents_to_be_set );
01788     rval = context.MBI->tag_get_data( gidTag, *ents_to_set, &gids[0] );MB_CHK_ERR( rval );
01789 
01790     // so we will need to set the tags according to the global id passed;
01791     // so the order in tag_storage_data is the same as the order in globalIds, but the order
01792     // in local range is gids
01793     std::map< int, EntityHandle > eh_by_gid;
01794     int i = 0;
01795     for( Range::iterator it = ents_to_set->begin(); it != ents_to_set->end(); ++it, ++i )
01796     {
01797         eh_by_gid[gids[i]] = *it;
01798     }
01799 
01800     std::vector< int > tagLengths( tagNames.size() );
01801     std::vector< Tag > tagList;
01802     size_t total_tag_len = 0;
01803     for( size_t i = 0; i < tagNames.size(); i++ )
01804     {
01805         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
01806         {
01807             MB_SET_ERR( moab::MB_FAILURE, "tag missing" );
01808         }  // some tag not defined yet in the app
01809 
01810         Tag tag = data.tagMap[tagNames[i]];
01811         tagList.push_back( tag );
01812 
01813         int tagLength = 0;
01814         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01815 
01816         total_tag_len += tagLength;
01817         tagLengths[i] = tagLength;
01818         DataType dtype;
01819         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01820 
01821         if( dtype != MB_TYPE_DOUBLE )
01822         {
01823             MB_SET_ERR( moab::MB_FAILURE, "tag not double type" );
01824         }
01825     }
01826     bool serial = true;
01827 #ifdef MOAB_HAVE_MPI
01828     ParallelComm* pco = context.pcomms[*pid];
01829     unsigned num_procs     = pco->size();
01830     if( num_procs > 1 ) serial = false;
01831 #endif
01832 
01833     if( serial )
01834     {
01835         assert( total_tag_len * nents_to_be_set - *num_tag_storage_length == 0 );
01836         // tags are unrolled, we loop over global ids first, then careful about tags
01837         for( int i = 0; i < nents_to_be_set; i++ )
01838         {
01839             int gid         = globalIds[i];
01840             EntityHandle eh = eh_by_gid[gid];
01841             // now loop over tags
01842             int indexInTagValues = 0;  //
01843             for( size_t j = 0; j < tagList.size(); j++ )
01844             {
01845                 indexInTagValues += i * tagLengths[j];
01846                 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, &tag_storage_data[indexInTagValues] );MB_CHK_ERR( rval );
01847                 // advance the pointer/index
01848                 indexInTagValues += ( nents_to_be_set - i ) * tagLengths[j];  // at the end of tag data
01849             }
01850         }
01851     }
01852 #ifdef MOAB_HAVE_MPI
01853     else  // it can be not serial only if pco->size() > 1, parallel
01854     {
01855         // in this case, we have to use 2 crystal routers, to send data to the processor that needs it
01856         // we will create first a tuple to rendevous points, then from there send to the processor that requested it
01857         // it is a 2-hop global gather scatter
01858         int nbLocalVals = *num_tag_storage_length / ( (int)tagNames.size() );
01859         assert( nbLocalVals * tagNames.size() - *num_tag_storage_length == 0 );
01860         TupleList TLsend;
01861         TLsend.initialize( 2, 0, 0, total_tag_len, nbLocalVals );  //  to proc, marker(gid), total_tag_len doubles
01862         TLsend.enableWriteAccess();
01863         // the processor id that processes global_id is global_id / num_ents_per_proc
01864 
01865         int indexInRealLocal = 0;
01866         for( int i = 0; i < nbLocalVals; i++ )
01867         {
01868             // to proc, marker, element local index, index in el
01869             int marker              = globalIds[i];
01870             int to_proc             = marker % num_procs;
01871             int n                   = TLsend.get_n();
01872             TLsend.vi_wr[2 * n]     = to_proc;  // send to processor
01873             TLsend.vi_wr[2 * n + 1] = marker;
01874             int indexInTagValues = 0;
01875             // tag data collect by number of tags
01876             for( size_t j = 0; j < tagList.size(); j++ )
01877             {
01878                 indexInTagValues += i * tagLengths[j];
01879                 for( int k = 0; k < tagLengths[j]; k++ )
01880                 {
01881                     TLsend.vr_wr[indexInRealLocal++] = tag_storage_data[indexInTagValues + k];
01882                 }
01883                 indexInTagValues += ( nbLocalVals - i ) * tagLengths[j];
01884             }
01885             TLsend.inc_n();
01886         }
01887 
01888         assert( nbLocalVals * total_tag_len - indexInRealLocal == 0 );
01889         // send now requests, basically inform the rendez-vous point who needs a particular global id
01890         // send the data to the other processors:
01891         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLsend, 0 );
01892         TupleList TLreq;
01893         TLreq.initialize( 2, 0, 0, 0, nents_to_be_set );
01894         TLreq.enableWriteAccess();
01895         for( int i = 0; i < nents_to_be_set; i++ )
01896         {
01897             // to proc, marker
01898             int marker             = gids[i];
01899             int to_proc            = marker % num_procs;
01900             int n                  = TLreq.get_n();
01901             TLreq.vi_wr[2 * n]     = to_proc;  // send to processor
01902             TLreq.vi_wr[2 * n + 1] = marker;
01903             // tag data collect by number of tags
01904             TLreq.inc_n();
01905         }
01906         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLreq, 0 );
01907 
01908         // we know now that process TLreq.vi_wr[2 * n] needs tags for gid TLreq.vi_wr[2 * n + 1]
01909         // we should first order by global id, and then build the new TL with send to proc, global id and
01910         // tags for it
01911         // sort by global ids the tuple lists
01912         moab::TupleList::buffer sort_buffer;
01913         sort_buffer.buffer_init( TLreq.get_n() );
01914         TLreq.sort( 1, &sort_buffer );
01915         sort_buffer.reset();
01916         sort_buffer.buffer_init( TLsend.get_n() );
01917         TLsend.sort( 1, &sort_buffer );
01918         sort_buffer.reset();
01919         // now send the tag values to the proc that requested it
01920         // in theory, for a full  partition, TLreq  and TLsend should have the same size, and
01921         // each dof should have exactly one target proc. Is that true or not in general ?
01922         // how do we plan to use this? Is it better to store the comm graph for future
01923 
01924         // start copy from comm graph settle
01925         TupleList TLBack;
01926         TLBack.initialize( 3, 0, 0, total_tag_len, 0 );  // to proc, marker, tag from proc , tag values
01927         TLBack.enableWriteAccess();
01928 
01929         int n1 = TLreq.get_n();
01930         int n2 = TLsend.get_n();
01931 
01932         int indexInTLreq  = 0;
01933         int indexInTLsend = 0;  // advance both, according to the marker
01934         if( n1 > 0 && n2 > 0 )
01935         {
01936 
01937             while( indexInTLreq < n1 && indexInTLsend < n2 )  // if any is over, we are done
01938             {
01939                 int currentValue1 = TLreq.vi_rd[2 * indexInTLreq + 1];
01940                 int currentValue2 = TLsend.vi_rd[2 * indexInTLsend + 1];
01941                 if( currentValue1 < currentValue2 )
01942                 {
01943                     // we have a big problem; basically, we are saying that
01944                     // dof currentValue is on one model and not on the other
01945                     // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
01946                     indexInTLreq++;
01947                     continue;
01948                 }
01949                 if( currentValue1 > currentValue2 )
01950                 {
01951                     // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
01952                     indexInTLsend++;
01953                     continue;
01954                 }
01955                 int size1 = 1;
01956                 int size2 = 1;
01957                 while( indexInTLreq + size1 < n1 && currentValue1 == TLreq.vi_rd[2 * ( indexInTLreq + size1 ) + 1] )
01958                     size1++;
01959                 while( indexInTLsend + size2 < n2 && currentValue2 == TLsend.vi_rd[2 * ( indexInTLsend + size2 ) + 1] )
01960                     size2++;
01961                 // must be found in both lists, find the start and end indices
01962                 for( int i1 = 0; i1 < size1; i1++ )
01963                 {
01964                     for( int i2 = 0; i2 < size2; i2++ )
01965                     {
01966                         // send the info back to components
01967                         int n = TLBack.get_n();
01968                         TLBack.reserve();
01969                         TLBack.vi_wr[3 * n] = TLreq.vi_rd[2 * ( indexInTLreq + i1 )];  // send back to the proc marker
01970                                                                                        // came from, info from comp2
01971                         TLBack.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend, just for verif ?)
01972                         TLBack.vi_wr[3 * n + 2] = TLsend.vi_rd[2 * ( indexInTLsend + i2 )];  // from proc on comp2
01973                         // also fill tag values
01974                         for( size_t k = 0; k < total_tag_len; k++ )
01975                         {
01976                             TLBack.vr_rd[total_tag_len * n + k] =
01977                                 TLsend.vr_rd[total_tag_len * indexInTLsend + k];  // deep copy of tag values
01978                         }
01979                     }
01980                 }
01981                 indexInTLreq += size1;
01982                 indexInTLsend += size2;
01983             }
01984         }
01985         ( pco->proc_config().crystal_router() )->gs_transfer( 1, TLBack, 0 );
01986         // end copy from comm graph
01987         // after we are done sending, we need to set those tag values, in a reverse process compared to send
01988         n1             = TLBack.get_n();
01989         double* ptrVal = &TLBack.vr_rd[0];  //
01990         for( int i = 0; i < n1; i++ )
01991         {
01992             int gid         = TLBack.vi_rd[3 * i + 1];  // marker
01993             EntityHandle eh = eh_by_gid[gid];
01994             // now loop over tags
01995 
01996             for( size_t j = 0; j < tagList.size(); j++ )
01997             {
01998                 rval = context.MBI->tag_set_data( tagList[j], &eh, 1, (void*)ptrVal );MB_CHK_ERR( rval );
01999                 // advance the pointer/index
02000                 ptrVal += tagLengths[j];  // at the end of tag data per call
02001             }
02002         }
02003     }
02004 #endif
02005     return MB_SUCCESS;
02006 }
02007 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid,
02008                                    const iMOAB_String tag_storage_names,
02009                                    int* num_tag_storage_length,
02010                                    int* ent_type,
02011                                    double* tag_storage_data )
02012 {
02013     ErrorCode rval;
02014     // exactly the same code, except tag type check
02015     std::string tag_names( tag_storage_names );
02016     // exactly the same code as for int tag :) maybe should check the type of tag too
02017     std::vector< std::string > tagNames;
02018     std::vector< Tag > tagHandles;
02019     std::string separator( ":" );
02020     split_tag_names( tag_names, separator, tagNames );
02021 
02022     appData& data = context.appDatas[*pid];
02023 
02024     // set it on a subset of entities, based on type and length
02025     Range* ents_to_get = NULL;
02026 
02027     if( *ent_type == 0 )  // vertices
02028     {
02029         ents_to_get = &data.all_verts;
02030     }
02031     else if( *ent_type == 1 )
02032     {
02033         ents_to_get = &data.primary_elems;
02034     }
02035     int nents_to_get = (int)ents_to_get->size();
02036     int position     = 0;
02037     for( size_t i = 0; i < tagNames.size(); i++ )
02038     {
02039         if( data.tagMap.find( tagNames[i] ) == data.tagMap.end() )
02040         {
02041             return moab::MB_FAILURE;
02042         }  // tag not defined
02043 
02044         Tag tag = data.tagMap[tagNames[i]];
02045 
02046         int tagLength = 0;
02047         rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
02048 
02049         DataType dtype;
02050         rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
02051 
02052         if( dtype != MB_TYPE_DOUBLE )
02053         {
02054             return moab::MB_FAILURE;
02055         }
02056 
02057         if( position + nents_to_get * tagLength > *num_tag_storage_length )
02058             return moab::MB_FAILURE;  // too many entity values to get
02059 
02060         rval = context.MBI->tag_get_data( tag, *ents_to_get, &tag_storage_data[position] );MB_CHK_ERR( rval );
02061         position = position + nents_to_get * tagLength;
02062     }
02063 
02064     return moab::MB_SUCCESS;  // no error
02065 }
02066 
02067 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
02068 {
02069 #ifdef MOAB_HAVE_MPI
02070     appData& data = context.appDatas[*pid];
02071     Range ent_exchange;
02072     std::vector< Tag > tags;
02073 
02074     for( int i = 0; i < *num_tag; i++ )
02075     {
02076         if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
02077         {
02078             return moab::MB_FAILURE;
02079         }  // error in tag index
02080 
02081         tags.push_back( data.tagList[tag_indices[i]] );
02082     }
02083 
02084     if( *ent_type == 0 )
02085     {
02086         ent_exchange = data.all_verts;
02087     }
02088     else if( *ent_type == 1 )
02089     {
02090         ent_exchange = data.primary_elems;
02091     }
02092     else
02093     {
02094         return moab::MB_FAILURE;
02095     }  // unexpected type
02096 
02097     ParallelComm* pco = context.pcomms[*pid];
02098 
02099     ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
02100 
02101 #else
02102     /* do nothing if serial */
02103     // just silence the warning
02104     // do not call sync tags in serial!
02105     int k = *pid + *num_tag + *tag_indices + *ent_type;
02106     k++;
02107 #endif
02108 
02109     return moab::MB_SUCCESS;
02110 }
02111 
02112 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
02113 {
02114 #ifdef MOAB_HAVE_MPI
02115     appData& data = context.appDatas[*pid];
02116     Range ent_exchange;
02117 
02118     if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
02119     {
02120         return moab::MB_FAILURE;
02121     }  // error in tag index
02122 
02123     Tag tagh = data.tagList[*tag_index];
02124 
02125     if( *ent_type == 0 )
02126     {
02127         ent_exchange = data.all_verts;
02128     }
02129     else if( *ent_type == 1 )
02130     {
02131         ent_exchange = data.primary_elems;
02132     }
02133     else
02134     {
02135         return moab::MB_FAILURE;
02136     }  // unexpected type
02137 
02138     ParallelComm* pco = context.pcomms[*pid];
02139     // we could do different MPI_Op; do not bother now, we will call from fortran
02140     ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
02141 
02142 #else
02143     /* do nothing if serial */
02144     // just silence the warning
02145     // do not call sync tags in serial!
02146     int k = *pid + *tag_index + *ent_type;
02147     k++;  // just do junk, to avoid complaints
02148 #endif
02149     return moab::MB_SUCCESS;
02150 }
02151 
02152 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid,
02153                                    iMOAB_LocalID* local_index,
02154                                    int* num_adjacent_elements,
02155                                    iMOAB_LocalID* adjacent_element_IDs )
02156 {
02157     ErrorCode rval;
02158 
02159     // one neighbor for each subentity of dimension-1
02160     MeshTopoUtil mtu( context.MBI );
02161     appData& data   = context.appDatas[*pid];
02162     EntityHandle eh = data.primary_elems[*local_index];
02163     Range adjs;
02164     rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
02165 
02166     if( *num_adjacent_elements < (int)adjs.size() )
02167     {
02168         return moab::MB_FAILURE;
02169     }  // not dimensioned correctly
02170 
02171     *num_adjacent_elements = (int)adjs.size();
02172 
02173     for( int i = 0; i < *num_adjacent_elements; i++ )
02174     {
02175         adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
02176     }
02177 
02178     return moab::MB_SUCCESS;
02179 }
02180 
02181 #if 0
02182 
02183 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
02184 {
02185     return moab::MB_SUCCESS;
02186 }
02187 
02188 #endif
02189 
02190 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
02191 {
02192     ErrorCode rval;
02193     appData& data = context.appDatas[*pid];
02194 
02195     if( !data.local_verts.empty() )  // we should have no vertices in the app
02196     {
02197         return moab::MB_FAILURE;
02198     }
02199 
02200     int nverts = *coords_len / *dim;
02201 
02202     rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
02203 
02204     rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
02205 
02206     // also add the vertices to the all_verts range
02207     data.all_verts.merge( data.local_verts );
02208     return moab::MB_SUCCESS;
02209 }
02210 
02211 ErrCode iMOAB_CreateElements( iMOAB_AppID pid,
02212                               int* num_elem,
02213                               int* type,
02214                               int* num_nodes_per_element,
02215                               int* connectivity,
02216                               int* block_ID )
02217 {
02218     // Create elements
02219     appData& data = context.appDatas[*pid];
02220 
02221     ReadUtilIface* read_iface;
02222     ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
02223 
02224     EntityType mbtype = (EntityType)( *type );
02225     EntityHandle actual_start_handle;
02226     EntityHandle* array = NULL;
02227     rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
02228 
02229     // fill up with actual connectivity from input; assume the vertices are in order, and start
02230     // vertex is the first in the current data vertex range
02231     EntityHandle firstVertex = data.local_verts[0];
02232 
02233     for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
02234     {
02235         array[j] = connectivity[j] + firstVertex - 1;
02236     }  // assumes connectivity uses 1 based array (from fortran, mostly)
02237 
02238     Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
02239 
02240     rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
02241 
02242     data.primary_elems.merge( new_elems );
02243 
02244     // add to adjacency
02245     rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
02246     // organize all new elements in block, with the given block ID; if the block set is not
02247     // existing, create  a new mesh set;
02248     Range sets;
02249     int set_no            = *block_ID;
02250     const void* setno_ptr = &set_no;
02251     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
02252                                                       sets );
02253     EntityHandle block_set;
02254 
02255     if( MB_FAILURE == rval || sets.empty() )
02256     {
02257         // create a new set, with this block ID
02258         rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
02259 
02260         rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
02261 
02262         // add the material set to file set
02263         rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
02264     }
02265     else
02266     {
02267         block_set = sets[0];
02268     }  // first set is the one we want
02269 
02270     /// add the new ents to the clock set
02271     rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
02272 
02273     return moab::MB_SUCCESS;
02274 }
02275 
02276 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02277 {
02278     appData& data            = context.appDatas[*pid];
02279     data.num_global_vertices = *num_global_verts;
02280     data.num_global_elements = *num_global_elems;
02281     return moab::MB_SUCCESS;
02282 }
02283 
02284 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
02285 {
02286     appData& data = context.appDatas[*pid];
02287     if( NULL != num_global_verts )
02288     {
02289         *num_global_verts = data.num_global_vertices;
02290     }
02291     if( NULL != num_global_elems )
02292     {
02293         *num_global_elems = data.num_global_elements;
02294     }
02295 
02296     return moab::MB_SUCCESS;
02297 }
02298 
02299 #ifdef MOAB_HAVE_MPI
02300 
02301 // this makes sense only for parallel runs
02302 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
02303 {
02304     appData& data     = context.appDatas[*pid];
02305     ParallelComm* pco = context.pcomms[*pid];
02306     EntityHandle cset = data.file_set;
02307     int dum_id        = 0;
02308     ErrorCode rval;
02309     if( data.primary_elems.empty() )
02310     {
02311         // skip actual resolve, assume vertices are distributed already ,
02312         // no need to share them
02313     }
02314     else
02315     {
02316         // create an integer tag for resolving ; maybe it can be a long tag in the future
02317         // (more than 2 B vertices;)
02318 
02319         Tag stag;
02320         rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
02321                                             &dum_id );MB_CHK_ERR( rval );
02322 
02323         if( *num_verts > (int)data.local_verts.size() )
02324         {
02325             return moab::MB_FAILURE;
02326         }  // we are not setting the size
02327 
02328         rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval );  // assumes integer tag
02329 
02330         rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
02331 
02332         rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
02333     }
02334     // provide partition tag equal to rank
02335     Tag part_tag;
02336     dum_id = -1;
02337     rval   = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02338                                           MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02339 
02340     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02341     {
02342         std::cout << " can't get par part tag.\n";
02343         return moab::MB_FAILURE;
02344     }
02345 
02346     int rank = pco->rank();
02347     rval     = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
02348 
02349     return moab::MB_SUCCESS;
02350 }
02351 
02352 // this assumes that this was not called before
02353 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
02354 {
02355     ErrorCode rval;
02356 
02357     // verify we have valid ghost layers input specified. If invalid, exit quick.
02358     if( *num_ghost_layers <= 0 )
02359     {
02360         return moab::MB_SUCCESS;
02361     }  // nothing to do
02362 
02363     appData& data     = context.appDatas[*pid];
02364     ParallelComm* pco = context.pcomms[*pid];
02365 
02366     int addl_ents =
02367         0;  // maybe we should be passing this too; most of the time we do not need additional ents collective call
02368     rval =
02369         pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
02370 
02371     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
02372     return iMOAB_UpdateMeshInfo( pid );
02373 }
02374 
02375 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
02376 {
02377     assert( join != nullptr );
02378     assert( receivingGroup != nullptr );
02379     assert( rcompid != nullptr );
02380 
02381     ErrorCode rval;
02382     int ierr;
02383     appData& data     = context.appDatas[*pid];
02384     ParallelComm* pco = context.pcomms[*pid];
02385 
02386     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02387     MPI_Group recvGroup =
02388         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
02389     MPI_Comm sender = pco->comm();  // the sender comm is obtained from parallel comm in moab; no need to pass it along
02390     // first see what are the processors in each group; get the sender group too, from the sender communicator
02391     MPI_Group senderGroup;
02392     ierr = MPI_Comm_group( sender, &senderGroup );
02393     if( ierr != 0 ) return moab::MB_FAILURE;
02394 
02395     // instantiate the par comm graph
02396     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02397     // int coid2)
02398     ParCommGraph* cgraph =
02399         new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
02400     // we should search if we have another pcomm with the same comp ids in the list already
02401     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02402     context.appDatas[*pid].pgraph[*rcompid] = cgraph;
02403 
02404     int sender_rank = -1;
02405     MPI_Comm_rank( sender, &sender_rank );
02406 
02407     // decide how to distribute elements to each processor
02408     // now, get the entities on local processor, and pack them into a buffer for various processors
02409     // we will do trivial partition: first get the total number of elements from "sender"
02410     std::vector< int > number_elems_per_part;
02411     // how to distribute local elements to receiving tasks?
02412     // trivial partition: compute first the total number of elements need to be sent
02413     Range owned = context.appDatas[*pid].owned_elems;
02414     if( owned.size() == 0 )
02415     {
02416         // must be vertices that we want to send then
02417         owned = context.appDatas[*pid].local_verts;
02418         // we should have some vertices here
02419     }
02420 
02421     if( *method == 0 )  // trivial partitioning, old method
02422     {
02423         int local_owned_elem = (int)owned.size();
02424         int size             = pco->size();
02425         int rank             = pco->rank();
02426         number_elems_per_part.resize( size );  //
02427         number_elems_per_part[rank] = local_owned_elem;
02428 #if( MPI_VERSION >= 2 )
02429         // Use "in place" option
02430         ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
02431 #else
02432         {
02433             std::vector< int > all_tmp( size );
02434             ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
02435             number_elems_per_part = all_tmp;
02436         }
02437 #endif
02438 
02439         if( ierr != 0 )
02440         {
02441             return moab::MB_FAILURE;
02442         }
02443 
02444         // every sender computes the trivial partition, it is cheap, and we need to send it anyway
02445         // to each sender
02446         rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
02447 
02448         rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
02449     }
02450     else  // *method != 0, so it is either graph or geometric, parallel
02451     {
02452         // owned are the primary elements on this app
02453         rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
02454 
02455         // basically, send the graph to the receiver side, with unblocking send
02456         rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
02457     }
02458     // pco is needed to pack, not for communication
02459     rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
02460 
02461     // mark for deletion
02462     MPI_Group_free( &senderGroup );
02463     return moab::MB_SUCCESS;
02464 }
02465 
02466 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
02467 {
02468     assert( join != nullptr );
02469     assert( sendingGroup != nullptr );
02470     assert( scompid != nullptr );
02471 
02472     ErrorCode rval;
02473     appData& data          = context.appDatas[*pid];
02474     ParallelComm* pco      = context.pcomms[*pid];
02475     MPI_Comm receive       = pco->comm();
02476     EntityHandle local_set = data.file_set;
02477 
02478     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02479     MPI_Group sendGroup =
02480         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
02481 
02482     // first see what are the processors in each group; get the sender group too, from the sender
02483     // communicator
02484     MPI_Group receiverGroup;
02485     int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
02486 
02487     // instantiate the par comm graph
02488     ParCommGraph* cgraph =
02489         new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
02490     // TODO we should search if we have another pcomm with the same comp ids in the list already
02491     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02492     context.appDatas[*pid].pgraph[*scompid] = cgraph;
02493 
02494     int receiver_rank = -1;
02495     MPI_Comm_rank( receive, &receiver_rank );
02496 
02497     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
02498     // knows what data to expect
02499     std::vector< int > pack_array;
02500     rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
02501 
02502     // senders across for the current receiver
02503     int current_receiver = cgraph->receiver( receiver_rank );
02504 
02505     std::vector< int > senders_local;
02506     size_t n = 0;
02507 
02508     while( n < pack_array.size() )
02509     {
02510         if( current_receiver == pack_array[n] )
02511         {
02512             for( int j = 0; j < pack_array[n + 1]; j++ )
02513             {
02514                 senders_local.push_back( pack_array[n + 2 + j] );
02515             }
02516 
02517             break;
02518         }
02519 
02520         n = n + 2 + pack_array[n + 1];
02521     }
02522 
02523 #ifdef VERBOSE
02524     std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
02525               << senders_local.size() << " tasks: ";
02526 
02527     for( int k = 0; k < (int)senders_local.size(); k++ )
02528     {
02529         std::cout << " " << senders_local[k];
02530     }
02531 
02532     std::cout << "\n";
02533 #endif
02534 
02535     if( senders_local.empty() )
02536     {
02537         std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
02538     }
02539     rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
02540 
02541     // after we are done, we could merge vertices that come from different senders, but
02542     // have the same global id
02543     Tag idtag;
02544     rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
02545 
02546     //   data.point_cloud = false;
02547     Range local_ents;
02548     rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
02549 
02550     // do not do merge if point cloud
02551     if( !local_ents.all_of_type( MBVERTEX ) )
02552     {
02553         if( (int)senders_local.size() >= 2 )  // need to remove duplicate vertices
02554         // that might come from different senders
02555         {
02556 
02557             Range local_verts = local_ents.subset_by_type( MBVERTEX );
02558             Range local_elems = subtract( local_ents, local_verts );
02559 
02560             // remove from local set the vertices
02561             rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
02562 
02563 #ifdef VERBOSE
02564             std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
02565 #endif
02566             MergeMesh mm( context.MBI );
02567 
02568             rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
02569 
02570             Range new_verts;  // local elems are local entities without vertices
02571             rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
02572 
02573 #ifdef VERBOSE
02574             std::cout << "after merging: new verts: " << new_verts.size() << "\n";
02575 #endif
02576             rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
02577         }
02578     }
02579     else
02580         data.point_cloud = true;
02581 
02582     if( !data.point_cloud )
02583     {
02584         // still need to resolve shared entities (in this case, vertices )
02585         rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
02586     }
02587     else
02588     {
02589         // if partition tag exists, set it to current rank; just to make it visible in VisIt
02590         Tag densePartTag;
02591         rval = context.MBI->tag_get_handle( "partition", densePartTag );
02592         if( NULL != densePartTag && MB_SUCCESS == rval )
02593         {
02594             Range local_verts;
02595             rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
02596             std::vector< int > vals;
02597             int rank = pco->rank();
02598             vals.resize( local_verts.size(), rank );
02599             rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
02600         }
02601     }
02602     // set the parallel partition tag
02603     Tag part_tag;
02604     int dum_id = -1;
02605     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02606                                               MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02607 
02608     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02609     {
02610         std::cout << " can't get par part tag.\n";
02611         return moab::MB_FAILURE;
02612     }
02613 
02614     int rank = pco->rank();
02615     rval     = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
02616 
02617     // populate the mesh with current data info
02618     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02619 
02620     // mark for deletion
02621     MPI_Group_free( &receiverGroup );
02622 
02623     return moab::MB_SUCCESS;
02624 }
02625 
02626 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02627 {
02628     appData& data                               = context.appDatas[*pid];
02629     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02630     if( mt == data.pgraph.end() )
02631     {
02632         return moab::MB_FAILURE;
02633     }
02634     ParCommGraph* cgraph = mt->second;
02635     ParallelComm* pco    = context.pcomms[*pid];
02636     Range owned          = data.owned_elems;
02637     ErrorCode rval;
02638     EntityHandle cover_set;
02639 
02640     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02641     if( data.point_cloud )
02642     {
02643         owned = data.local_verts;
02644     }
02645 
02646     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02647     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02648     // from ints remapper
02649 #ifdef MOAB_HAVE_TEMPESTREMAP
02650     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02651     {
02652         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02653         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02654         // should still have only quads ?
02655     }
02656 #else
02657     // now, in case we are sending from intx between ocn and atm, we involve coverage set
02658     // how do I know if this receiver already participated in an intersection driven by coupler?
02659     // also, what if this was the "source" mesh in intx?
02660     // in that case, the elements might have been instantiated in the coverage set locally, the
02661     // "owned" range can be different the elements are now in tempestRemap coverage_set
02662     cover_set = cgraph->get_cover_set();  // this will be non null only for intx app ?
02663 
02664     if( 0 != cover_set )
02665     {
02666         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02667     }
02668 #endif
02669 
02670     std::string tag_name( tag_storage_name );
02671 
02672     // basically, we assume everything is defined already on the tag,
02673     //   and we can get the tags just by its name
02674     // we assume that there are separators ":" between the tag names
02675     std::vector< std::string > tagNames;
02676     std::vector< Tag > tagHandles;
02677     std::string separator( ":" );
02678     split_tag_names( tag_name, separator, tagNames );
02679     for( size_t i = 0; i < tagNames.size(); i++ )
02680     {
02681         Tag tagHandle;
02682         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02683         if( MB_SUCCESS != rval || NULL == tagHandle )
02684         {
02685             std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
02686         }
02687         tagHandles.push_back( tagHandle );
02688     }
02689 
02690     // pco is needed to pack, and for moab instance, not for communication!
02691     // still use nonblocking communication, over the joint comm
02692     rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02693     // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
02694 
02695     return moab::MB_SUCCESS;
02696 }
02697 
02698 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02699 {
02700     appData& data = context.appDatas[*pid];
02701 
02702     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02703     if( mt == data.pgraph.end() )
02704     {
02705         return moab::MB_FAILURE;
02706     }
02707     ParCommGraph* cgraph = mt->second;
02708 
02709     MPI_Comm global   = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02710     ParallelComm* pco = context.pcomms[*pid];
02711     Range owned       = data.owned_elems;
02712 
02713     // how do I know if this receiver already participated in an intersection driven by coupler?
02714     // also, what if this was the "source" mesh in intx?
02715     // in that case, the elements might have been instantiated in the coverage set locally, the
02716     // "owned" range can be different the elements are now in tempestRemap coverage_set
02717     EntityHandle cover_set = cgraph->get_cover_set();
02718     ErrorCode rval;
02719     if( 0 != cover_set )
02720     {
02721         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02722     }
02723     if( data.point_cloud )
02724     {
02725         owned = data.local_verts;
02726     }
02727     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02728     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02729     // from ints remapper
02730 #ifdef MOAB_HAVE_TEMPESTREMAP
02731     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02732     {
02733         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02734         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02735         // should still have only quads ?
02736     }
02737 #endif
02738     /*
02739      * data_intx.remapper exists though only on the intersection application
02740      *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate
02741      * mesh )
02742      */
02743 
02744     std::string tag_name( tag_storage_name );
02745 
02746     // we assume that there are separators ";" between the tag names
02747     std::vector< std::string > tagNames;
02748     std::vector< Tag > tagHandles;
02749     std::string separator( ":" );
02750     split_tag_names( tag_name, separator, tagNames );
02751     for( size_t i = 0; i < tagNames.size(); i++ )
02752     {
02753         Tag tagHandle;
02754         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02755         if( MB_SUCCESS != rval || NULL == tagHandle )
02756         {
02757             std::cout << " can't get tag handle for tag named:" << tagNames[i].c_str() << " at index " << i << "\n";MB_CHK_SET_ERR( rval, "can't get tag handle" );
02758         }
02759         tagHandles.push_back( tagHandle );
02760     }
02761 
02762 #ifdef VERBOSE
02763     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
02764               << " and file set = " << ( data.file_set ) << "\n";
02765 #endif
02766     // pco is needed to pack, and for moab instance, not for communication!
02767     // still use nonblocking communication
02768     rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02769 
02770 #ifdef VERBOSE
02771     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
02772 #endif
02773 
02774     return moab::MB_SUCCESS;
02775 }
02776 
02777 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
02778 {
02779     // need first to find the pgraph that holds the information we need
02780     // this will be called on sender side only
02781     appData& data                               = context.appDatas[*pid];
02782     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02783     if( mt == data.pgraph.end() ) return moab::MB_FAILURE;  // error
02784 
02785     mt->second->release_send_buffers();
02786     return moab::MB_SUCCESS;
02787 }
02788 
02789 /**
02790 \brief compute a comm graph between 2 moab apps, based on ID matching
02791 <B>Operations:</B> Collective
02792 */
02793 //#define VERBOSE
02794 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
02795                                 iMOAB_AppID pid2,
02796                                 MPI_Comm* join,
02797                                 MPI_Group* group1,
02798                                 MPI_Group* group2,
02799                                 int* type1,
02800                                 int* type2,
02801                                 int* comp1,
02802                                 int* comp2 )
02803 {
02804     assert( join );
02805     assert( group1 );
02806     assert( group2 );
02807     ErrorCode rval = MB_SUCCESS;
02808     int localRank = 0, numProcs = 1;
02809 
02810     bool isFortran = false;
02811     if (*pid1>=0) isFortran = isFortran || context.appDatas[*pid1].is_fortran;
02812     if (*pid2>=0) isFortran = isFortran || context.appDatas[*pid2].is_fortran;
02813 
02814     MPI_Comm global = ( isFortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02815     MPI_Group srcGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
02816     MPI_Group tgtGroup = ( isFortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
02817 
02818     MPI_Comm_rank( global, &localRank );
02819     MPI_Comm_size( global, &numProcs );
02820     // instantiate the par comm graph
02821 
02822     // we should search if we have another pcomm with the same comp ids in the list already
02823     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02824     bool already_exists = false;
02825     if( *pid1 >= 0 )
02826     {
02827         appData& data                               = context.appDatas[*pid1];
02828         std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp2);
02829         if ( mt != data.pgraph.end() )
02830             already_exists = true;
02831     }
02832     if( *pid2 >= 0 )
02833     {
02834         appData& data                               = context.appDatas[*pid2];
02835         std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *comp1);
02836         if ( mt != data.pgraph.end() )
02837             already_exists = true;
02838     }
02839     // nothing to do if it already exists
02840     if (already_exists)
02841     {
02842 #ifdef VERBOSE
02843         if (!localRank)
02844             std::cout << " parcomgraph already existing between components "<<
02845             *comp1 << " and " << *comp2 << ". Do not compute again\n";
02846 #endif
02847         return moab::MB_SUCCESS;
02848     }
02849 
02850     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02851     // int coid2)
02852     ParCommGraph* cgraph = NULL;
02853     if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
02854     ParCommGraph* cgraph_rev = NULL;
02855     if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
02856 
02857     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
02858     if( *pid2 >= 0 ) context.appDatas[*pid2].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
02859     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
02860     // model on the joint comm
02861     TupleList TLcomp1;
02862     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02863     TupleList TLcomp2;
02864     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02865     // will push_back a new tuple, if needed
02866 
02867     TLcomp1.enableWriteAccess();
02868 
02869     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
02870     Tag gdsTag;
02871     // find the values on first cell
02872     int lenTagType1 = 1;
02873     if( 1 == *type1 || 1 == *type2 )
02874     {
02875         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
02876         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
02877     }
02878     Tag tagType2 = context.MBI->globalId_tag();
02879 
02880     std::vector< int > valuesComp1;
02881     // populate first tuple
02882     if( *pid1 >= 0 )
02883     {
02884         appData& data1     = context.appDatas[*pid1];
02885         EntityHandle fset1 = data1.file_set;
02886         // in case of tempest remap, get the coverage set
02887 #ifdef MOAB_HAVE_TEMPESTREMAP
02888         if( data1.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02889         {
02890             fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02891             // should still have only quads ?
02892         }
02893 #endif
02894         Range ents_of_interest;
02895         if( *type1 == 1 )
02896         {
02897             assert( gdsTag );
02898             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02899             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
02900             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
02901         }
02902         else if( *type1 == 2 )
02903         {
02904             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02905             valuesComp1.resize( ents_of_interest.size() );
02906             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02907         }
02908         else if( *type1 == 3 )  // for FV meshes, just get the global id of cell
02909         {
02910             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
02911             valuesComp1.resize( ents_of_interest.size() );
02912             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02913         }
02914         else
02915         {
02916             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
02917         }
02918         // now fill the tuple list with info and markers
02919         // because we will send only the ids, order and compress the list
02920         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
02921         TLcomp1.resize( uniq.size() );
02922         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02923         {
02924             // to proc, marker, element local index, index in el
02925             int marker               = *sit;
02926             int to_proc              = marker % numProcs;
02927             int n                    = TLcomp1.get_n();
02928             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
02929             TLcomp1.vi_wr[2 * n + 1] = marker;
02930             TLcomp1.inc_n();
02931         }
02932     }
02933 
02934     ProcConfig pc( global );  // proc config does the crystal router
02935     pc.crystal_router()->gs_transfer( 1, TLcomp1,
02936                                       0 );  // communication towards joint tasks, with markers
02937     // sort by value (key 1)
02938 #ifdef VERBOSE
02939     std::stringstream ff1;
02940     ff1 << "TLcomp1_" << localRank << ".txt";
02941     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02942 #endif
02943     moab::TupleList::buffer sort_buffer;
02944     sort_buffer.buffer_init( TLcomp1.get_n() );
02945     TLcomp1.sort( 1, &sort_buffer );
02946     sort_buffer.reset();
02947 #ifdef VERBOSE
02948     // after sorting
02949     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02950 #endif
02951     // do the same, for the other component, number2, with type2
02952     // start copy
02953     TLcomp2.enableWriteAccess();
02954     // populate second tuple
02955     std::vector< int > valuesComp2;
02956     if( *pid2 >= 0 )
02957     {
02958         appData& data2     = context.appDatas[*pid2];
02959         EntityHandle fset2 = data2.file_set;
02960         // in case of tempest remap, get the coverage set
02961 #ifdef MOAB_HAVE_TEMPESTREMAP
02962         if( data2.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02963         {
02964             fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02965             // should still have only quads ?
02966         }
02967 #endif
02968 
02969         Range ents_of_interest;
02970         if( *type2 == 1 )
02971         {
02972             assert( gdsTag );
02973             rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02974             valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
02975             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
02976         }
02977         else if( *type2 == 2 )
02978         {
02979             rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02980             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02981             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02982         }
02983         else if( *type2 == 3 )
02984         {
02985             rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
02986             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02987             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02988         }
02989         else
02990         {
02991             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2
02992         }
02993         // now fill the tuple list with info and markers
02994         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
02995         TLcomp2.resize( uniq.size() );
02996         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02997         {
02998             // to proc, marker, element local index, index in el
02999             int marker               = *sit;
03000             int to_proc              = marker % numProcs;
03001             int n                    = TLcomp2.get_n();
03002             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03003             TLcomp2.vi_wr[2 * n + 1] = marker;
03004             TLcomp2.inc_n();
03005         }
03006     }
03007     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03008                                       0 );  // communication towards joint tasks, with markers
03009     // sort by value (key 1)
03010 #ifdef VERBOSE
03011     std::stringstream ff2;
03012     ff2 << "TLcomp2_" << localRank << ".txt";
03013     TLcomp2.print_to_file( ff2.str().c_str() );
03014 #endif
03015     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03016     TLcomp2.sort( 1, &sort_buffer );
03017     sort_buffer.reset();
03018     // end copy
03019 #ifdef VERBOSE
03020     TLcomp2.print_to_file( ff2.str().c_str() );
03021 #endif
03022     // need to send back the info, from the rendezvous point, for each of the values
03023     /* so go over each value, on local process in joint communicator
03024 
03025     now have to send back the info needed for communication;
03026      loop in in sync over both TLComp1 and TLComp2, in local process;
03027       So, build new tuple lists, to send synchronous communication
03028       populate them at the same time, based on marker, that is indexed
03029     */
03030 
03031     TupleList TLBackToComp1;
03032     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03033     TLBackToComp1.enableWriteAccess();
03034 
03035     TupleList TLBackToComp2;
03036     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
03037     TLBackToComp2.enableWriteAccess();
03038 
03039     int n1 = TLcomp1.get_n();
03040     int n2 = TLcomp2.get_n();
03041 
03042     int indexInTLComp1 = 0;
03043     int indexInTLComp2 = 0;  // advance both, according to the marker
03044     if( n1 > 0 && n2 > 0 )
03045     {
03046 
03047         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
03048         {
03049             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
03050             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
03051             if( currentValue1 < currentValue2 )
03052             {
03053                 // we have a big problem; basically, we are saying that
03054                 // dof currentValue is on one model and not on the other
03055                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
03056                 indexInTLComp1++;
03057                 continue;
03058             }
03059             if( currentValue1 > currentValue2 )
03060             {
03061                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
03062                 indexInTLComp2++;
03063                 continue;
03064             }
03065             int size1 = 1;
03066             int size2 = 1;
03067             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
03068                 size1++;
03069             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
03070                 size2++;
03071             // must be found in both lists, find the start and end indices
03072             for( int i1 = 0; i1 < size1; i1++ )
03073             {
03074                 for( int i2 = 0; i2 < size2; i2++ )
03075                 {
03076                     // send the info back to components
03077                     int n = TLBackToComp1.get_n();
03078                     TLBackToComp1.reserve();
03079                     TLBackToComp1.vi_wr[3 * n] =
03080                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
03081                                                                      // came from, info from comp2
03082                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03083                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
03084                     n                              = TLBackToComp2.get_n();
03085                     TLBackToComp2.reserve();
03086                     TLBackToComp2.vi_wr[3 * n] =
03087                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
03088                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03089                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
03090                     // what if there are repeated markers in TLcomp2? increase just index2
03091                 }
03092             }
03093             indexInTLComp1 += size1;
03094             indexInTLComp2 += size2;
03095         }
03096     }
03097     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
03098     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
03099 
03100     if( *pid1 >= 0 )
03101     {
03102         // we are on original comp 1 tasks
03103         // before ordering
03104         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03105         // processors it communicates with
03106 #ifdef VERBOSE
03107         std::stringstream f1;
03108         f1 << "TLBack1_" << localRank << ".txt";
03109         TLBackToComp1.print_to_file( f1.str().c_str() );
03110 #endif
03111         sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
03112         TLBackToComp1.sort( 1, &sort_buffer );
03113         sort_buffer.reset();
03114 #ifdef VERBOSE
03115         TLBackToComp1.print_to_file( f1.str().c_str() );
03116 #endif
03117         // so we are now on pid1, we know now each marker were it has to go
03118         // add a new method to ParCommGraph, to set up the involved_IDs_map
03119         cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
03120     }
03121     if( *pid2 >= 0 )
03122     {
03123         // we are on original comp 1 tasks
03124         // before ordering
03125         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03126         // processors it communicates with
03127 #ifdef VERBOSE
03128         std::stringstream f2;
03129         f2 << "TLBack2_" << localRank << ".txt";
03130         TLBackToComp2.print_to_file( f2.str().c_str() );
03131 #endif
03132         sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
03133         TLBackToComp2.sort( 2, &sort_buffer );
03134         sort_buffer.reset();
03135 #ifdef VERBOSE
03136         TLBackToComp2.print_to_file( f2.str().c_str() );
03137 #endif
03138         cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
03139         //
03140     }
03141     return moab::MB_SUCCESS;
03142 }
03143 
03144 //#undef VERBOSE
03145 
03146 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
03147 {
03148     appData& data     = context.appDatas[*pid];
03149     ParallelComm* pco = context.pcomms[*pid];
03150     // collapse vertices and transform cells into triangles/quads /polys
03151     // tags we care about: area, frac, global id
03152     std::vector< Tag > tagsList;
03153     Tag tag;
03154     ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
03155     if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE;  // fatal error, abort
03156     tagsList.push_back( tag );
03157     rval = context.MBI->tag_get_handle( "area", tag );
03158     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
03159     rval = context.MBI->tag_get_handle( "frac", tag );
03160     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
03161     double tol = 1.0e-9;
03162     rval       = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
03163 
03164     // clean material sets of cells that were deleted
03165     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
03166                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
03167 
03168     if( !data.mat_sets.empty() )
03169     {
03170         EntityHandle matSet = data.mat_sets[0];
03171         Range elems;
03172         rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
03173         rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
03174         // put back only cells from data.file_set
03175         elems.clear();
03176         rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
03177         rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
03178     }
03179     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
03180 
03181     ParallelMergeMesh pmm( pco, tol );
03182     rval = pmm.merge( data.file_set,
03183                       /* do not do local merge*/ false,
03184                       /*  2d cells*/ 2 );MB_CHK_ERR( rval );
03185 
03186     // assign global ids only for vertices, cells have them fine
03187     rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
03188 
03189     // set the partition tag on the file set
03190     Tag part_tag;
03191     int dum_id = -1;
03192     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
03193                                               MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
03194 
03195     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
03196     {
03197         std::cout << " can't get par part tag.\n";
03198         return moab::MB_FAILURE;
03199     }
03200 
03201     int rank = pco->rank();
03202     rval     = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
03203 
03204     return moab::MB_SUCCESS;
03205 }
03206 
03207 #ifdef MOAB_HAVE_TEMPESTREMAP
03208 
03209 // this call must be collective on the joint communicator
03210 //  intersection tasks on coupler will need to send to the components tasks the list of
03211 // id elements that are relevant: they intersected some of the target elements (which are not needed
03212 // here)
03213 //  in the intersection
03214 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
03215                              iMOAB_AppID pid_src,
03216                              iMOAB_AppID pid_migr,
03217                              iMOAB_AppID pid_intx,
03218                              int* src_id,
03219                              int* migr_id,
03220                              int* context_id )
03221 {
03222     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
03223     // exchange
03224     ErrorCode rval;
03225     std::vector< int > srcSenders;
03226     std::vector< int > receivers;
03227     ParCommGraph* sendGraph = NULL;
03228     int ierr;
03229     int default_context_id  = -1;
03230     bool is_fortran_context = false;
03231 
03232     // First, find the original graph between PE sets
03233     // And based on this one, we will build the newly modified one for coverage
03234     if( *pid_src >= 0 )
03235     {
03236         default_context_id = *migr_id;  // the other one
03237         assert( context.appDatas[*pid_src].global_id == *src_id );
03238         is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
03239         sendGraph          = context.appDatas[*pid_src].pgraph[default_context_id];  // maybe check if it does not exist
03240 
03241         // report the sender and receiver tasks in the joint comm
03242         srcSenders = sendGraph->senders();
03243         receivers  = sendGraph->receivers();
03244 #ifdef VERBOSE
03245         std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
03246 #endif
03247     }
03248 
03249     ParCommGraph* recvGraph = NULL;  // will be non null on receiver tasks (intx tasks)
03250     if( *pid_migr >= 0 )
03251     {
03252         is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
03253         // find the original one
03254         default_context_id = *src_id;
03255         assert( context.appDatas[*pid_migr].global_id == *migr_id );
03256         recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
03257         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
03258         srcSenders = recvGraph->senders();
03259         receivers  = recvGraph->receivers();
03260 #ifdef VERBOSE
03261         std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
03262 #endif
03263     }
03264 
03265     if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
03266 
03267     // loop over pid_intx elements, to see what original processors in joint comm have sent the
03268     // coverage mesh;
03269     // If we are on intx tasks, send coverage info towards original component tasks,
03270     // about needed cells
03271     TupleList TLcovIDs;
03272     TLcovIDs.initialize( 2, 0, 0, 0, 0 );  // to proc, GLOBAL ID; estimate about 100 IDs to be sent
03273     // will push_back a new tuple, if needed
03274     TLcovIDs.enableWriteAccess();
03275     // the crystal router will send ID cell to the original source, on the component task
03276     // if we are on intx tasks, loop over all intx elements and
03277 
03278     MPI_Comm global = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
03279     int currentRankInJointComm = -1;
03280     ierr                       = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
03281 
03282     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
03283     // need to send information towards component tasks
03284     if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
03285         receivers.end() )  // we are on receivers tasks, we can request intx info
03286     {
03287         // find the pcomm for the intx pid
03288         if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
03289 
03290         appData& dataIntx = context.appDatas[*pid_intx];
03291         Tag parentTag, orgSendProcTag;
03292 
03293         rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval );                        // global id of the blue, source element
03294         if( !parentTag ) return moab::MB_FAILURE;  // fatal error, abort
03295 
03296         rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
03297         if( !orgSendProcTag ) return moab::MB_FAILURE;  // fatal error, abort
03298 
03299         // find the file set, red parents for intx cells, and put them in tuples
03300         EntityHandle intxSet = dataIntx.file_set;
03301         Range cells;
03302         // get all entities from the set, and look at their RedParent
03303         rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
03304 
03305         std::map< int, std::set< int > > idsFromProcs;  // send that info back to enhance parCommGraph cache
03306         for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03307         {
03308             EntityHandle intx_cell = *it;
03309             int gidCell, origProc;  // look at receivers
03310 
03311             rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
03312             rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
03313             // we have augmented the overlap set with ghost cells ; in that case, the
03314             // orig_sending_processor is not set so it will be -1;
03315             if( origProc < 0 ) continue;
03316 
03317             std::set< int >& setInts = idsFromProcs[origProc];
03318             setInts.insert( gidCell );
03319         }
03320 
03321         // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
03322         // from coverage set
03323         if( cells.empty() )
03324         {
03325             // get coverage set
03326             assert( *pid_intx >= 0 );
03327             appData& dataIntx      = context.appDatas[*pid_intx];
03328             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03329 
03330             // get all cells from coverage set
03331             Tag gidTag;
03332             rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
03333             rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
03334             // look at their orig_sending_processor
03335             for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03336             {
03337                 EntityHandle covCell = *it;
03338                 int gidCell, origProc;  // look at o
03339 
03340                 rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
03341                 rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
03342                 // we have augmented the overlap set with ghost cells ; in that case, the
03343                 // orig_sending_processor is not set so it will be -1;
03344                 if( origProc < 0 )  // it cannot < 0, I think
03345                     continue;
03346                 std::set< int >& setInts = idsFromProcs[origProc];
03347                 setInts.insert( gidCell );
03348             }
03349         }
03350 
03351 #ifdef VERBOSE
03352         std::ofstream dbfile;
03353         std::stringstream outf;
03354         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
03355         dbfile.open( outf.str().c_str() );
03356         dbfile << "Writing this to a file.\n";
03357 
03358         dbfile << " map size:" << idsFromProcs.size()
03359                << std::endl;  // on the receiver side, these show how much data to receive
03360         // from the sender (how many ids, and how much tag data later; we need to size up the
03361         // receiver buffer) arrange in tuples , use map iterators to send the ids
03362         for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
03363         {
03364             std::set< int >& setIds = mt->second;
03365             dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
03366             int counter = 0;
03367             for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
03368             {
03369                 int valueID = *st;
03370                 dbfile << " " << valueID;
03371                 counter++;
03372                 if( counter % 10 == 0 ) dbfile << "\n";
03373             }
03374             dbfile << "\n";
03375         }
03376         dbfile.close();
03377 #endif
03378         if( NULL != recvGraph )
03379         {
03380             ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph );  // just copy
03381             recvGraph1->set_context_id( *context_id );
03382             recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
03383             // this par comm graph will need to use the coverage set
03384             // so we are for sure on intx pes (the receiver is the coupler mesh)
03385             assert( *pid_intx >= 0 );
03386             appData& dataIntx      = context.appDatas[*pid_intx];
03387             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03388             recvGraph1->set_cover_set( cover_set );
03389             context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1; // possible memory leak if context_id is same
03390         }
03391         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
03392         {
03393             int procToSendTo       = mit->first;
03394             std::set< int >& idSet = mit->second;
03395             for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
03396             {
03397                 int n = TLcovIDs.get_n();
03398                 TLcovIDs.reserve();
03399                 TLcovIDs.vi_wr[2 * n]     = procToSendTo;  // send to processor
03400                 TLcovIDs.vi_wr[2 * n + 1] = *sit;          // global id needs index in the local_verts range
03401             }
03402         }
03403     }
03404 
03405     ProcConfig pc( global );  // proc config does the crystal router
03406     pc.crystal_router()->gs_transfer( 1, TLcovIDs,
03407                                       0 );  // communication towards component tasks, with what ids are needed
03408     // for each task from receiver
03409 
03410     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
03411     if( NULL != sendGraph )
03412     {
03413         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
03414         // receiver task
03415         ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph );  // just copy
03416         sendGraph1->set_context_id( *context_id );
03417         context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
03418         rval                                           = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
03419     }
03420     return moab::MB_SUCCESS;  // success
03421 }
03422 
03423 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
03424 {
03425     assert( prefix && strlen( prefix ) );
03426 
03427     ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
03428     std::string prefix_str( prefix );
03429 
03430     if( NULL != cgraph )
03431         cgraph->dump_comm_information( prefix_str, *is_sender );
03432     else
03433     {
03434         std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
03435                   << " context: " << *context_id << "\n";
03436     }
03437     return moab::MB_SUCCESS;
03438 }
03439 
03440 #endif  // #ifdef MOAB_HAVE_TEMPESTREMAP
03441 
03442 #endif  // #ifdef MOAB_HAVE_MPI
03443 
03444 #ifdef MOAB_HAVE_TEMPESTREMAP
03445 
03446 #ifdef MOAB_HAVE_NETCDF
03447 ErrCode iMOAB_LoadMappingWeightsFromFile(
03448     iMOAB_AppID pid_intersection,
03449     iMOAB_AppID pid_cpl,
03450     int* col_or_row,
03451     int* type,
03452     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03453     const iMOAB_String remap_weights_filename )
03454 {
03455     ErrorCode rval;
03456     bool row_based_partition = true;
03457     if( *col_or_row == 1 ) row_based_partition = false;  // do a column based partition;
03458 
03459     // get the local degrees of freedom, from the pid_cpl and type of mesh
03460 
03461     // Get the source and target data and pcomm objects
03462     appData& data_intx       = context.appDatas[*pid_intersection];
03463     TempestMapAppData& tdata = data_intx.tempestData;
03464 
03465     // Get the handle to the remapper object
03466     if( tdata.remapper == NULL )
03467     {
03468         // Now allocate and initialize the remapper object
03469 #ifdef MOAB_HAVE_MPI
03470         ParallelComm* pco = context.pcomms[*pid_intersection];
03471         tdata.remapper    = new moab::TempestRemapper( context.MBI, pco );
03472 #else
03473         tdata.remapper = new moab::TempestRemapper( context.MBI );
03474 #endif
03475         tdata.remapper->meshValidate     = true;
03476         tdata.remapper->constructEdgeMap = true;
03477         // Do not create new filesets; Use the sets from our respective applications
03478         tdata.remapper->initialize( false );
03479         tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03480     }
03481 
03482     // Setup loading of weights onto TempestOnlineMap
03483     // Set the context for the remapping weights computation
03484     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03485 
03486     // Now allocate and initialize the remapper object
03487     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03488     assert( weightMap != NULL );
03489 
03490     if( *pid_cpl >= 0 )  // it means we are looking for how to distribute the degrees of freedom, new map reader
03491     {
03492         appData& data1     = context.appDatas[*pid_cpl];
03493         EntityHandle fset1 = data1.file_set;  // this is source or target, depending on direction
03494 
03495         // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03496         Tag gdsTag;
03497 
03498         // find the values on first cell
03499         int lenTagType1 = 1;
03500         if( *type == 1 )
03501         {
03502             rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03503             rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03504         }
03505         Tag tagType2 = context.MBI->globalId_tag();
03506 
03507         std::vector< int > dofValues;
03508 
03509         // populate first tuple
03510         Range
03511             ents_of_interest;  // will be filled with entities on coupler, from which we will get the DOFs, based on type
03512         int ndofPerEl = 1;
03513 
03514         if( *type == 1 )
03515         {
03516             assert( gdsTag );
03517             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03518             dofValues.resize( ents_of_interest.size() * lenTagType1 );
03519             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );
03520             ndofPerEl = lenTagType1;
03521         }
03522         else if( *type == 2 )
03523         {
03524             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03525             dofValues.resize( ents_of_interest.size() );
03526             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03527         }
03528         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03529         {
03530             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03531             dofValues.resize( ents_of_interest.size() );
03532             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &dofValues[0] );MB_CHK_ERR( rval );  // just global ids
03533         }
03534         else
03535         {
03536             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03537         }
03538         // pass ordered dofs, and unique
03539         std::vector< int > orderDofs( dofValues.begin(), dofValues.end() );
03540 
03541         std::sort( orderDofs.begin(), orderDofs.end() );
03542         orderDofs.erase( std::unique( orderDofs.begin(), orderDofs.end() ), orderDofs.end() );  // remove duplicates
03543 
03544         rval = weightMap->ReadParallelMap( remap_weights_filename, orderDofs, row_based_partition );MB_CHK_ERR( rval );
03545 
03546         // if we are on target mesh (row based partition)
03547         if( row_based_partition )
03548         {
03549             tdata.pid_dest = pid_cpl;
03550             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset1, ents_of_interest );
03551             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03552             weightMap->set_row_dc_dofs( dofValues );  // will set row_dtoc_dofmap
03553         }
03554         else
03555         {
03556             tdata.pid_src = pid_cpl;
03557             tdata.remapper->SetMeshSet( Remapper::SourceMesh, fset1, ents_of_interest );
03558             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03559             weightMap->set_col_dc_dofs( dofValues );  // will set col_dtoc_dofmap
03560         }
03561     }
03562     else  // old reader, trivial distribution by row
03563     {
03564         std::vector< int > tmp_owned_ids;  // this will do a trivial row distribution
03565         rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
03566     }
03567 
03568     return moab::MB_SUCCESS;
03569 }
03570 
03571 ErrCode iMOAB_WriteMappingWeightsToFile(
03572     iMOAB_AppID pid_intersection,
03573     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03574     const iMOAB_String remap_weights_filename )
03575 {
03576     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03577     assert( remap_weights_filename && strlen( remap_weights_filename ) );
03578 
03579     ErrorCode rval;
03580 
03581     // Get the source and target data and pcomm objects
03582     appData& data_intx       = context.appDatas[*pid_intersection];
03583     TempestMapAppData& tdata = data_intx.tempestData;
03584 
03585     // Get the handle to the remapper object
03586     assert( tdata.remapper != NULL );
03587 
03588     // Now get online weights object and ensure it is valid
03589     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03590     assert( weightMap != NULL );
03591 
03592     std::string filename = std::string( remap_weights_filename );
03593 
03594     // Write the map file to disk in parallel using either HDF5 or SCRIP interface
03595     rval = weightMap->WriteParallelMap( filename );MB_CHK_ERR( rval );
03596 
03597     return moab::MB_SUCCESS;
03598 }
03599 
03600 #ifdef MOAB_HAVE_MPI
03601 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
03602                               iMOAB_AppID pid2,
03603                               iMOAB_AppID pid3,
03604                               MPI_Comm* jointcomm,
03605                               MPI_Group* groupA,
03606                               MPI_Group* groupB,
03607                               int* type,
03608                               int* comp1,
03609                               int* comp2,
03610                               int* direction )
03611 {
03612     assert( jointcomm );
03613     assert( groupA );
03614     assert( groupB );
03615     bool is_fortran = false;
03616     if (*pid1 >=0) is_fortran = context.appDatas[*pid1].is_fortran || is_fortran;
03617     if (*pid2 >=0) is_fortran = context.appDatas[*pid2].is_fortran || is_fortran;
03618     if (*pid3 >=0) is_fortran = context.appDatas[*pid3].is_fortran || is_fortran;
03619 
03620     MPI_Comm joint_communicator =
03621         ( is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) ) : *jointcomm );
03622     MPI_Group group_first =
03623         ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
03624     MPI_Group group_second =
03625         ( is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
03626 
03627     ErrorCode rval = MB_SUCCESS;
03628     int localRank = 0, numProcs = 1;
03629 
03630     // Get the local rank and number of processes in the joint communicator
03631     MPI_Comm_rank( joint_communicator, &localRank );
03632     MPI_Comm_size( joint_communicator, &numProcs );
03633 
03634     // Next instantiate the par comm graph
03635     // here we need to look at direction
03636     // this direction is good for atm source -> ocn target coupler example
03637     ParCommGraph* cgraph = NULL;
03638     if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
03639 
03640     ParCommGraph* cgraph_rev = NULL;
03641     if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
03642 
03643     // we should search if we have another pcomm with the same comp ids in the list already
03644     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
03645     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
03646     if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
03647 
03648     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
03649     // model on the joint_communicator
03650     TupleList TLcomp1;
03651     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03652     TupleList TLcomp2;
03653     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03654 
03655     // will push_back a new tuple, if needed
03656     TLcomp1.enableWriteAccess();
03657 
03658     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03659     Tag gdsTag;
03660 
03661     // find the values on first cell
03662     int lenTagType1 = 1;
03663     if( *type == 1 )
03664     {
03665         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03666         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03667     }
03668     Tag tagType2 = context.MBI->globalId_tag();
03669 
03670     std::vector< int > valuesComp1;
03671 
03672     // populate first tuple
03673     Range ents_of_interest;  // will be filled with entities on pid1, that need to be distributed,
03674                              // rearranged in split_ranges map
03675     if( *pid1 >= 0 )
03676     {
03677         appData& data1     = context.appDatas[*pid1];
03678         EntityHandle fset1 = data1.file_set;
03679 
03680         if( *type == 1 )
03681         {
03682             assert( gdsTag );
03683             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03684             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
03685             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
03686         }
03687         else if( *type == 2 )
03688         {
03689             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03690             valuesComp1.resize( ents_of_interest.size() );
03691             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03692         }
03693         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03694         {
03695             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03696             valuesComp1.resize( ents_of_interest.size() );
03697             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03698         }
03699         else
03700         {
03701             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03702         }
03703         // now fill the tuple list with info and markers
03704         // because we will send only the ids, order and compress the list
03705         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
03706         TLcomp1.resize( uniq.size() );
03707         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03708         {
03709             // to proc, marker, element local index, index in el
03710             int marker               = *sit;
03711             int to_proc              = marker % numProcs;
03712             int n                    = TLcomp1.get_n();
03713             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
03714             TLcomp1.vi_wr[2 * n + 1] = marker;
03715             TLcomp1.inc_n();
03716         }
03717     }
03718 
03719     ProcConfig pc( joint_communicator );  // proc config does the crystal router
03720     pc.crystal_router()->gs_transfer( 1, TLcomp1,
03721                                       0 );  // communication towards joint tasks, with markers
03722     // sort by value (key 1)
03723 #ifdef VERBOSE
03724     std::stringstream ff1;
03725     ff1 << "TLcomp1_" << localRank << ".txt";
03726     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03727 #endif
03728     moab::TupleList::buffer sort_buffer;
03729     sort_buffer.buffer_init( TLcomp1.get_n() );
03730     TLcomp1.sort( 1, &sort_buffer );
03731     sort_buffer.reset();
03732 #ifdef VERBOSE
03733     // after sorting
03734     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03735 #endif
03736     // do the same, for the other component, number2, with type2
03737     // start copy
03738     TLcomp2.enableWriteAccess();
03739 
03740     moab::TempestOnlineMap* weightMap = NULL;  // declare it outside, but it will make sense only for *pid >= 0
03741     // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
03742     // populate second tuple with ids  from read map: we need row_gdofmap and col_gdofmap
03743     std::vector< int > valuesComp2;
03744     if( *pid2 >= 0 )  // we are now on coupler, map side
03745     {
03746         appData& data2           = context.appDatas[*pid2];
03747         TempestMapAppData& tdata = data2.tempestData;
03748         // should be only one map, read from file
03749         assert( tdata.weightMaps.size() == 1 );
03750         // maybe we need to check it is the map we expect
03751         weightMap = tdata.weightMaps.begin()->second;
03752         // std::vector<int> ids_of_interest;
03753         // do a deep copy of the ids of interest: row ids or col ids, target or source direction
03754         if( *direction == 1 )
03755         {
03756             // we are interested in col ids, source
03757             // new method from moab::TempestOnlineMap
03758             rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
03759         }
03760         else if( *direction == 2 )
03761         {
03762             // we are interested in row ids, for target ids
03763             rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
03764         }
03765         //
03766 
03767         // now fill the tuple list with info and markers
03768         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
03769         TLcomp2.resize( uniq.size() );
03770         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03771         {
03772             // to proc, marker, element local index, index in el
03773             int marker               = *sit;
03774             int to_proc              = marker % numProcs;
03775             int n                    = TLcomp2.get_n();
03776             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03777             TLcomp2.vi_wr[2 * n + 1] = marker;
03778             TLcomp2.inc_n();
03779         }
03780     }
03781     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03782                                       0 );  // communication towards joint tasks, with markers
03783     // sort by value (key 1)
03784     // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
03785 #ifdef VERBOSE
03786     std::stringstream ff2;
03787     ff2 << "TLcomp2_" << localRank << ".txt";
03788     TLcomp2.print_to_file( ff2.str().c_str() );
03789 #endif
03790     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03791     TLcomp2.sort( 1, &sort_buffer );
03792     sort_buffer.reset();
03793     // end copy
03794 #ifdef VERBOSE
03795     TLcomp2.print_to_file( ff2.str().c_str() );
03796 #endif
03797     // need to send back the info, from the rendezvous point, for each of the values
03798     /* so go over each value, on local process in joint communicator,
03799 
03800     now have to send back the info needed for communication;
03801      loop in in sync over both TLComp1 and TLComp2, in local process;
03802       So, build new tuple lists, to send synchronous communication
03803       populate them at the same time, based on marker, that is indexed
03804     */
03805 
03806     TupleList TLBackToComp1;
03807     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03808     TLBackToComp1.enableWriteAccess();
03809 
03810     TupleList TLBackToComp2;
03811     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
03812     TLBackToComp2.enableWriteAccess();
03813 
03814     int n1 = TLcomp1.get_n();
03815     int n2 = TLcomp2.get_n();
03816 
03817     int indexInTLComp1 = 0;
03818     int indexInTLComp2 = 0;  // advance both, according to the marker
03819     if( n1 > 0 && n2 > 0 )
03820     {
03821 
03822         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
03823         {
03824             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
03825             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
03826             if( currentValue1 < currentValue2 )
03827             {
03828                 // we have a big problem; basically, we are saying that
03829                 // dof currentValue is on one model and not on the other
03830                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
03831                 indexInTLComp1++;
03832                 continue;
03833             }
03834             if( currentValue1 > currentValue2 )
03835             {
03836                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
03837                 indexInTLComp2++;
03838                 continue;
03839             }
03840             int size1 = 1;
03841             int size2 = 1;
03842             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
03843                 size1++;
03844             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
03845                 size2++;
03846             // must be found in both lists, find the start and end indices
03847             for( int i1 = 0; i1 < size1; i1++ )
03848             {
03849                 for( int i2 = 0; i2 < size2; i2++ )
03850                 {
03851                     // send the info back to components
03852                     int n = TLBackToComp1.get_n();
03853                     TLBackToComp1.reserve();
03854                     TLBackToComp1.vi_wr[3 * n] =
03855                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
03856                                                                      // came from, info from comp2
03857                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03858                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
03859                     n                              = TLBackToComp2.get_n();
03860                     TLBackToComp2.reserve();
03861                     TLBackToComp2.vi_wr[3 * n] =
03862                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
03863                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03864                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
03865                     // what if there are repeated markers in TLcomp2? increase just index2
03866                 }
03867             }
03868             indexInTLComp1 += size1;
03869             indexInTLComp2 += size2;
03870         }
03871     }
03872     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
03873     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
03874 
03875     TupleList TLv;  // vertices
03876     TupleList TLc;  // cells if needed (not type 2)
03877 
03878     if( *pid1 >= 0 )
03879     {
03880         // we are on original comp 1 tasks
03881         // before ordering
03882         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03883         // processors it communicates with
03884 #ifdef VERBOSE
03885         std::stringstream f1;
03886         f1 << "TLBack1_" << localRank << ".txt";
03887         TLBackToComp1.print_to_file( f1.str().c_str() );
03888 #endif
03889         // so we are now on pid1, we know now each marker were it has to go
03890         // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
03891         rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
03892         // we can just send vertices and elements, with crystal routers;
03893         // on the receiving end, make sure they are not duplicated, by looking at the global id
03894         // if *type is 1, also send global_dofs tag in the element tuple
03895         rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
03896     }
03897     else
03898     {
03899         TLv.initialize( 2, 0, 0, 3, 0 ); // no vertices here, for sure
03900         TLv.enableWriteAccess(); // to be able to receive stuff, even if nothing is here yet, on this task
03901         if (*type != 2) // for point cloud, we do not need to initialize TLc (for cells)
03902         {
03903             // we still need to initialize the tuples with the right size, as in form_tuples_to_migrate_mesh
03904             int size_tuple = 2 + ( ( *type != 1 ) ? 0 : lenTagType1 ) + 1 + 10;  // 10 is the max number of vertices in cell; kind of arbitrary
03905             TLc.initialize( size_tuple, 0, 0, 0, 0 );
03906             TLc.enableWriteAccess();
03907         }
03908 
03909     }
03910     pc.crystal_router()->gs_transfer( 1, TLv, 0 );  // communication towards coupler tasks, with mesh vertices
03911     if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );  // those are cells
03912 
03913     if( *pid3 >= 0 )  // will receive the mesh, on coupler pes!
03914     {
03915         appData& data3     = context.appDatas[*pid3];
03916         EntityHandle fset3 = data3.file_set;
03917         Range primary_ents3;                 // vertices for type 2, cells of dim 2 for type 1 or 3
03918         std::vector< int > values_entities;  // will be the size of primary_ents3 * lenTagType1
03919         rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
03920                                                   values_entities );MB_CHK_ERR( rval );
03921         iMOAB_UpdateMeshInfo( pid3 );
03922         int ndofPerEl = 1;
03923         if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
03924         // because we are on the coupler, we know that the read map pid2 exists
03925         assert( *pid2 >= 0 );
03926         appData& dataIntx        = context.appDatas[*pid2];
03927         TempestMapAppData& tdata = dataIntx.tempestData;
03928 
03929         // if we are on source coverage, direction 1, we can set covering mesh, covering cells
03930         if( 1 == *direction )
03931         {
03932             tdata.pid_src = pid3;
03933             tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, primary_ents3 );
03934             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03935             weightMap->set_col_dc_dofs( values_entities );  // will set col_dtoc_dofmap
03936         }
03937         // if we are on target, we can set the target cells
03938         else
03939         {
03940             tdata.pid_dest = pid3;
03941             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, primary_ents3 );
03942             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03943             weightMap->set_row_dc_dofs( values_entities );  // will set row_dtoc_dofmap
03944         }
03945     }
03946 
03947     return moab::MB_SUCCESS;
03948 }
03949 
03950 #endif  // #ifdef MOAB_HAVE_MPI
03951 
03952 #endif  // #ifdef MOAB_HAVE_NETCDF
03953 
03954 #define USE_API
03955 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
03956 {
03957     ErrorCode rval;
03958     moab::CartVect pos;
03959 
03960     Range& verts                   = context.appDatas[*pid].all_verts;
03961     moab::EntityHandle firstVertex = ( verts[0] );
03962 
03963     // coordinate data
03964     rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
03965 
03966     // compute the distance from origin
03967     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
03968     *radius = pos.length();
03969     return moab::MB_SUCCESS;
03970 }
03971 
03972 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03973 {
03974     ErrorCode rval;
03975     ErrCode ierr;
03976     bool validate = true;
03977 
03978     double radius_source = 1.0;
03979     double radius_target = 1.0;
03980     const double epsrel  = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
03981     const double boxeps  = 1.e-6;
03982 
03983     // Get the source and target data and pcomm objects
03984     appData& data_src  = context.appDatas[*pid_src];
03985     appData& data_tgt  = context.appDatas[*pid_tgt];
03986     appData& data_intx = context.appDatas[*pid_intx];
03987 #ifdef MOAB_HAVE_MPI
03988     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03989 #endif
03990 
03991     // Mesh intersection has already been computed; Return early.
03992     TempestMapAppData& tdata = data_intx.tempestData;
03993     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
03994 
03995     bool is_parallel = false, is_root = true;
03996     int rank = 0;
03997 #ifdef MOAB_HAVE_MPI
03998     if( pco_intx )
03999     {
04000         rank        = pco_intx->rank();
04001         is_parallel = ( pco_intx->size() > 1 );
04002         is_root     = ( rank == 0 );
04003         rval        = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
04004     }
04005 #endif
04006 
04007     moab::DebugOutput outputFormatter( std::cout, rank, 0 );
04008     outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
04009 
04010     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
04011     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
04012 
04013     // Rescale the radius of both to compute the intersection
04014     ComputeSphereRadius( pid_src, &radius_source );
04015     ComputeSphereRadius( pid_tgt, &radius_target );
04016 #ifdef VERBOSE
04017     if( is_root )
04018         outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
04019                                 radius_target );
04020 #endif
04021 
04022     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
04023      * unscale later. */
04024     double defaultradius = 1.0;
04025     if( fabs( radius_source - radius_target ) > 1e-10 )
04026     { /* the radii are different */
04027         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
04028         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
04029     }
04030 
04031     // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
04032 #ifdef MOAB_HAVE_TEMPESTREMAP
04033     IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
04034 #else
04035     IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
04036 #endif
04037 
04038     // print verbosely about the problem setting
04039     bool use_kdtree_search = false;
04040     double srctgt_areas[2], srctgt_areas_glb[2];
04041     {
04042         moab::Range rintxverts, rintxelems;
04043         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
04044         rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
04045         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
04046         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
04047         srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
04048 #ifdef VERBOSE
04049         if( is_root )
04050             outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
04051                                     rintxelems.size() );
04052 #endif
04053 
04054         moab::Range bintxverts, bintxelems;
04055         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
04056         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
04057         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
04058         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
04059         srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
04060 #ifdef VERBOSE
04061         if( is_root )
04062             outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
04063                                     bintxelems.size() );
04064 #endif
04065 #ifdef MOAB_HAVE_MPI
04066         MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
04067 #else
04068         srctgt_areas_glb[0] = srctgt_areas[0];
04069         srctgt_areas_glb[1] = srctgt_areas[1];
04070 #endif
04071         use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
04072     }
04073 
04074     data_intx.dimension = data_tgt.dimension;
04075     // set the context for the source and destination applications
04076     tdata.pid_src  = pid_src;
04077     tdata.pid_dest = pid_tgt;
04078 
04079     // Now allocate and initialize the remapper object
04080 #ifdef MOAB_HAVE_MPI
04081     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
04082 #else
04083     tdata.remapper = new moab::TempestRemapper( context.MBI );
04084 #endif
04085     tdata.remapper->meshValidate     = true;
04086     tdata.remapper->constructEdgeMap = true;
04087 
04088     // Do not create new filesets; Use the sets from our respective applications
04089     tdata.remapper->initialize( false );
04090     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
04091     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
04092     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
04093 
04094     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
04095     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
04096 
04097     // First, compute the covering source set.
04098     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
04099 
04100     // Next, compute intersections with MOAB.
04101     rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
04102 
04103     // Mapping computation done
04104     if( validate )
04105     {
04106         double local_area,
04107             global_areas[3];  // Array for Initial area, and through Method 1 and Method 2
04108         local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
04109 
04110         global_areas[0] = srctgt_areas_glb[0];
04111         global_areas[1] = srctgt_areas_glb[1];
04112 
04113 #ifdef MOAB_HAVE_MPI
04114         if( is_parallel )
04115         {
04116             MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
04117         }
04118         else
04119         {
04120             global_areas[2] = local_area;
04121         }
04122 #else
04123         global_areas[2] = local_area;
04124 #endif
04125         if( is_root )
04126         {
04127             outputFormatter.printf( 0,
04128                                     "initial area: source mesh = %12.14f, target mesh = %12.14f, "
04129                                     "overlap mesh = %12.14f\n",
04130                                     global_areas[0], global_areas[1], global_areas[2] );
04131             outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
04132                                     fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
04133                                     fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
04134         }
04135     }
04136 
04137     return moab::MB_SUCCESS;
04138 }
04139 
04140 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
04141 {
04142     ErrorCode rval;
04143     ErrCode ierr;
04144 
04145     double radius_source = 1.0;
04146     double radius_target = 1.0;
04147     const double epsrel  = ReferenceTolerance;  // ReferenceTolerance is defined in Defines.h in tempestremap source ;
04148     const double boxeps  = 1.e-8;
04149 
04150     // Get the source and target data and pcomm objects
04151     appData& data_src  = context.appDatas[*pid_src];
04152     appData& data_tgt  = context.appDatas[*pid_tgt];
04153     appData& data_intx = context.appDatas[*pid_intx];
04154 #ifdef MOAB_HAVE_MPI
04155     ParallelComm* pco_intx = context.pcomms[*pid_intx];
04156 #endif
04157 
04158     // Mesh intersection has already been computed; Return early.
04159     TempestMapAppData& tdata = data_intx.tempestData;
04160     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
04161 
04162 #ifdef MOAB_HAVE_MPI
04163     if( pco_intx )
04164     {
04165         rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
04166     }
04167 #endif
04168 
04169     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
04170     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
04171 
04172     // Rescale the radius of both to compute the intersection
04173     ComputeSphereRadius( pid_src, &radius_source );
04174     ComputeSphereRadius( pid_tgt, &radius_target );
04175 
04176     IntxAreaUtils areaAdaptor;
04177     // print verbosely about the problem setting
04178     {
04179         moab::Range rintxverts, rintxelems;
04180         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
04181         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
04182         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
04183         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
04184 #ifdef VERBOSE
04185         std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
04186                   << " elements \n";
04187 #endif
04188 
04189         moab::Range bintxverts, bintxelems;
04190         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
04191         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
04192         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
04193         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
04194 #ifdef VERBOSE
04195         std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
04196                   << " elements \n";
04197 #endif
04198     }
04199 
04200     data_intx.dimension = data_tgt.dimension;
04201     // set the context for the source and destination applications
04202     // set the context for the source and destination applications
04203     tdata.pid_src         = pid_src;
04204     tdata.pid_dest        = pid_tgt;
04205     data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
04206     assert( data_intx.point_cloud == true );
04207 
04208     // Now allocate and initialize the remapper object
04209 #ifdef MOAB_HAVE_MPI
04210     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
04211 #else
04212     tdata.remapper = new moab::TempestRemapper( context.MBI );
04213 #endif
04214     tdata.remapper->meshValidate     = true;
04215     tdata.remapper->constructEdgeMap = true;
04216 
04217     // Do not create new filesets; Use the sets from our respective applications
04218     tdata.remapper->initialize( false );
04219     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
04220     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
04221     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
04222 
04223     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
04224      * unscale later. */
04225     if( fabs( radius_source - radius_target ) > 1e-10 )
04226     { /* the radii are different */
04227         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
04228         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
04229     }
04230 
04231     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
04232     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
04233 
04234     // First, compute the covering source set.
04235     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
04236 
04237 #ifdef MOAB_HAVE_MPI
04238     /* VSM: This context should be set on the data_src but it would overwrite the source
04239        covering set context in case it is coupled to another APP as well.
04240        This needs a redesign. */
04241     // data_intx.covering_set = tdata.remapper->GetCoveringSet();
04242     // data_src.covering_set = tdata.remapper->GetCoveringSet();
04243 #endif
04244 
04245     // Now let us re-convert the MOAB mesh back to Tempest representation
04246     rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
04247 
04248     return moab::MB_SUCCESS;
04249 }
04250 
04251 ErrCode iMOAB_ComputeScalarProjectionWeights(
04252     iMOAB_AppID pid_intx,
04253     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
04254     const iMOAB_String disc_method_source,
04255     int* disc_order_source,
04256     const iMOAB_String disc_method_target,
04257     int* disc_order_target,
04258     int* fNoBubble,
04259     int* fMonotoneTypeID,
04260     int* fVolumetric,
04261     int* fInverseDistanceMap,
04262     int* fNoConservation,
04263     int* fValidate,
04264     const iMOAB_String source_solution_tag_dof_name,
04265     const iMOAB_String target_solution_tag_dof_name )
04266 {
04267     moab::ErrorCode rval;
04268 
04269     assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
04270     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
04271     assert( disc_method_source && strlen( disc_method_source ) );
04272     assert( disc_method_target && strlen( disc_method_target ) );
04273     assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
04274     assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
04275 
04276     // Get the source and target data and pcomm objects
04277     appData& data_intx       = context.appDatas[*pid_intx];
04278     TempestMapAppData& tdata = data_intx.tempestData;
04279 
04280     // Setup computation of weights
04281     // Set the context for the remapping weights computation
04282     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
04283 
04284     // Call to generate the remap weights with the tempest meshes
04285     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
04286     assert( weightMap != NULL );
04287 
04288     GenerateOfflineMapAlgorithmOptions mapOptions;
04289     mapOptions.nPin           = *disc_order_source;
04290     mapOptions.nPout          = *disc_order_target;
04291     mapOptions.fSourceConcave = false;
04292     mapOptions.fTargetConcave = false;
04293 
04294     mapOptions.strMethod = "";
04295     if( fMonotoneTypeID )
04296     {
04297         switch( *fMonotoneTypeID )
04298         {
04299             case 0:
04300                 mapOptions.fMonotone = false;
04301                 break;
04302             case 3:
04303                 mapOptions.strMethod += "mono3;";
04304                 break;
04305             case 2:
04306                 mapOptions.strMethod += "mono2;";
04307                 break;
04308             default:
04309                 mapOptions.fMonotone = true;
04310         }
04311     }
04312     else
04313         mapOptions.fMonotone = false;
04314 
04315     mapOptions.fNoBubble       = ( fNoBubble ? *fNoBubble : false );
04316     mapOptions.fNoConservation = ( fNoConservation ? *fNoConservation > 0 : false );
04317     mapOptions.fNoCorrectAreas = false;
04318     mapOptions.fNoCheck        = !( fValidate ? *fValidate : true );
04319     if( fVolumetric && *fVolumetric ) mapOptions.strMethod += "volumetric;";
04320     if( fInverseDistanceMap && *fInverseDistanceMap ) mapOptions.strMethod += "invdist;";
04321 
04322     // Now let us compute the local-global mapping and store it in the context
04323     // We need this mapping when computing matvec products and to do reductions in parallel
04324     // Additionally, the call below will also compute weights with TempestRemap
04325     rval = weightMap->GenerateRemappingWeights(
04326         std::string( disc_method_source ),            // const std::string& strInputType
04327         std::string( disc_method_target ),            // const std::string& strOutputType
04328         mapOptions,                                   // GenerateOfflineMapAlgorithmOptions& mapOptions
04329         std::string( source_solution_tag_dof_name ),  // const std::string& srcDofTagName = "GLOBAL_ID"
04330         std::string( target_solution_tag_dof_name )   // const std::string& tgtDofTagName = "GLOBAL_ID"
04331     );MB_CHK_ERR( rval );
04332 
04333     return moab::MB_SUCCESS;
04334 }
04335 
04336 ErrCode iMOAB_ApplyScalarProjectionWeights(
04337     iMOAB_AppID pid_intersection,
04338     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
04339     const iMOAB_String source_solution_tag_name,
04340     const iMOAB_String target_solution_tag_name )
04341 {
04342     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
04343     assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
04344     assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
04345 
04346     moab::ErrorCode rval;
04347 
04348     // Get the source and target data and pcomm objects
04349     appData& data_intx       = context.appDatas[*pid_intersection];
04350     TempestMapAppData& tdata = data_intx.tempestData;
04351 
04352     // Now allocate and initialize the remapper object
04353     moab::TempestRemapper* remapper   = tdata.remapper;
04354     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
04355 
04356     // we assume that there are separators ";" between the tag names
04357     std::vector< std::string > srcNames;
04358     std::vector< std::string > tgtNames;
04359     std::vector< Tag > srcTagHandles;
04360     std::vector< Tag > tgtTagHandles;
04361     std::string separator( ":" );
04362     std::string src_name( source_solution_tag_name );
04363     std::string tgt_name( target_solution_tag_name );
04364     split_tag_names( src_name, separator, srcNames );
04365     split_tag_names( tgt_name, separator, tgtNames );
04366     if( srcNames.size() != tgtNames.size() )
04367     {
04368         std::cout << " error in parsing source and target tag names. \n";
04369         return moab::MB_FAILURE;
04370     }
04371 
04372     for( size_t i = 0; i < srcNames.size(); i++ )
04373     {
04374         Tag tagHandle;
04375         rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
04376         if( MB_SUCCESS != rval || NULL == tagHandle )
04377         {
04378             return moab::MB_FAILURE;
04379         }
04380         srcTagHandles.push_back( tagHandle );
04381         rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
04382         if( MB_SUCCESS != rval || NULL == tagHandle )
04383         {
04384             return moab::MB_FAILURE;
04385         }
04386         tgtTagHandles.push_back( tagHandle );
04387     }
04388 
04389     std::vector< double > solSTagVals;
04390     std::vector< double > solTTagVals;
04391 
04392     moab::Range sents, tents;
04393     if( data_intx.point_cloud )
04394     {
04395         appData& data_src = context.appDatas[*tdata.pid_src];
04396         appData& data_tgt = context.appDatas[*tdata.pid_dest];
04397         if( data_src.point_cloud )
04398         {
04399             moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
04400             solSTagVals.resize( covSrcEnts.size(), -1.0 );
04401             sents = covSrcEnts;
04402         }
04403         else
04404         {
04405             moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04406             solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
04407                                     weightMap->GetSourceNDofsPerElement(),
04408                                 -1.0 );
04409             sents = covSrcEnts;
04410         }
04411         if( data_tgt.point_cloud )
04412         {
04413             moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
04414             solTTagVals.resize( tgtEnts.size(), -1.0 );
04415             tents = tgtEnts;
04416         }
04417         else
04418         {
04419             moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04420             solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04421                                     weightMap->GetDestinationNDofsPerElement(),
04422                                 -1.0 );
04423             tents = tgtEnts;
04424         }
04425     }
04426     else
04427     {
04428         moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04429         moab::Range& tgtEnts    = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04430         solSTagVals.resize(
04431             covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
04432         solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04433                                 weightMap->GetDestinationNDofsPerElement(),
04434                             -1.0 );
04435 
04436         sents = covSrcEnts;
04437         tents = tgtEnts;
04438     }
04439 
04440     for( size_t i = 0; i < srcTagHandles.size(); i++ )
04441     {
04442         // The tag data is np*np*n_el_src
04443         Tag ssolnTag = srcTagHandles[i];
04444         Tag tsolnTag = tgtTagHandles[i];
04445         rval         = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );MB_CHK_ERR( rval );
04446 
04447         // Compute the application of weights on the suorce solution data and store it in the
04448         // destination solution vector data Optionally, can also perform the transpose application
04449         // of the weight matrix. Set the 3rd argument to true if this is needed
04450         rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );MB_CHK_ERR( rval );
04451 
04452         // The tag data is np*np*n_el_dest
04453         rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );MB_CHK_ERR( rval );
04454     }
04455 
04456 // #define VERBOSE
04457 #ifdef VERBOSE
04458     ParallelComm* pco_intx = context.pcomms[*pid_intersection];
04459 
04460     int ivar = 0;
04461     {
04462         Tag ssolnTag = srcTagHandles[ivar];
04463         std::stringstream sstr;
04464         sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04465         std::ofstream output_file( sstr.str().c_str() );
04466         for( unsigned i = 0; i < sents.size(); ++i )
04467         {
04468             EntityHandle elem = sents[i];
04469             std::vector< double > locsolSTagVals( 16 );
04470             rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
04471             output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
04472             for( unsigned j = 0; j < 16; ++j )
04473                 output_file << locsolSTagVals[j] << " ";
04474         }
04475         output_file.flush();  // required here
04476         output_file.close();
04477     }
04478     {
04479         std::stringstream sstr;
04480         sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04481         EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
04482                                  context.appDatas[*tdata.pid_dest].file_set };
04483         rval                 = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04484     }
04485     {
04486         std::stringstream sstr;
04487         sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04488         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04489         EntityHandle covering_set = remapper->GetCoveringSet();
04490         EntityHandle sets[2]      = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
04491         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04492     }
04493     {
04494         std::stringstream sstr;
04495         sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04496         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04497         EntityHandle covering_set = remapper->GetCoveringSet();
04498         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
04499     }
04500     {
04501         std::stringstream sstr;
04502         sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04503         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04504         EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
04505         rval                    = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
04506     }
04507     {
04508         std::stringstream sstr;
04509         sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04510         std::ofstream output_file( sstr.str().c_str() );
04511         for( unsigned i = 0; i < solSTagVals.size(); ++i )
04512             output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
04513                         << solSTagVals[i] << "\n";
04514         output_file.flush();  // required here
04515         output_file.close();
04516     }
04517 #endif
04518     // #undef VERBOSE
04519 
04520     return moab::MB_SUCCESS;
04521 }
04522 
04523 #endif  // MOAB_HAVE_TEMPESTREMAP
04524 
04525 #ifdef __cplusplus
04526 }
04527 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines