MOAB: Mesh Oriented datABase  (version 5.3.1)
iMOAB.cpp
Go to the documentation of this file.
00001 /** \file iMOAB.cpp
00002  */
00003 
00004 #include "moab/MOABConfig.h"
00005 #include "moab/Core.hpp"
00006 
00007 #ifdef MOAB_HAVE_MPI
00008 #include "moab_mpi.h"
00009 #include "moab/ParallelComm.hpp"
00010 #include "moab/ParCommGraph.hpp"
00011 #include "moab/ParallelMergeMesh.hpp"
00012 #include "moab/IntxMesh/IntxUtils.hpp"
00013 #endif
00014 #include "DebugOutput.hpp"
00015 #include "moab/iMOAB.h"
00016 
00017 /* this is needed because of direct access to hdf5/mhdf */
00018 #ifdef MOAB_HAVE_HDF5
00019 #include "mhdf.h"
00020 #include <H5Tpublic.h>
00021 #endif
00022 
00023 #include "moab/CartVect.hpp"
00024 #include "MBTagConventions.hpp"
00025 #include "moab/MeshTopoUtil.hpp"
00026 #include "moab/ReadUtilIface.hpp"
00027 #include "moab/MergeMesh.hpp"
00028 
00029 #ifdef MOAB_HAVE_TEMPESTREMAP
00030 #include "STLStringHelper.h"
00031 #include "moab/IntxMesh/IntxUtils.hpp"
00032 
00033 #include "moab/Remapping/TempestRemapper.hpp"
00034 #include "moab/Remapping/TempestOnlineMap.hpp"
00035 #endif
00036 
00037 // C++ includes
00038 #include <cassert>
00039 #include <sstream>
00040 #include <iostream>
00041 
00042 using namespace moab;
00043 
00044 // #define VERBOSE
00045 
00046 // global variables ; should they be organized in a structure, for easier references?
00047 // or how do we keep them global?
00048 
00049 #ifdef __cplusplus
00050 extern "C" {
00051 #endif
00052 
00053 #ifdef MOAB_HAVE_TEMPESTREMAP
00054 struct TempestMapAppData
00055 {
00056     moab::TempestRemapper* remapper;
00057     std::map< std::string, moab::TempestOnlineMap* > weightMaps;
00058     iMOAB_AppID pid_src;
00059     iMOAB_AppID pid_dest;
00060 };
00061 #endif
00062 
00063 struct appData
00064 {
00065     EntityHandle file_set;
00066     int global_id;  // external component id, unique for application
00067     std::string name;
00068     Range all_verts;
00069     Range local_verts;  // it could include shared, but not owned at the interface
00070     // these vertices would be all_verts if no ghosting was required
00071     Range ghost_vertices;  // locally ghosted from other processors
00072     Range primary_elems;
00073     Range owned_elems;
00074     Range ghost_elems;
00075     int dimension;             // 2 or 3, dimension of primary elements (redundant?)
00076     long num_global_elements;  // reunion of all elements in primary_elements; either from hdf5
00077                                // reading or from reduce
00078     long num_global_vertices;  // reunion of all nodes, after sharing is resolved; it could be
00079                                // determined from hdf5 reading
00080     Range mat_sets;
00081     std::map< int, int > matIndex;  // map from global block id to index in mat_sets
00082     Range neu_sets;
00083     Range diri_sets;
00084     std::map< std::string, Tag > tagMap;
00085     std::vector< Tag > tagList;
00086     bool point_cloud;
00087     bool is_fortran;
00088 
00089 #ifdef MOAB_HAVE_MPI
00090     // constructor for this ParCommGraph takes the joint comm and the MPI groups for each
00091     // application
00092     std::map< int, ParCommGraph* > pgraph;  // map from context () to the parcommgraph*
00093 #endif
00094 
00095 #ifdef MOAB_HAVE_TEMPESTREMAP
00096     TempestMapAppData tempestData;
00097 #endif
00098 };
00099 
00100 struct GlobalContext
00101 {
00102     // are there reasons to have multiple moab inits? Is ref count needed?
00103     Interface* MBI;
00104     // we should also have the default tags stored, initialized
00105     Tag material_tag, neumann_tag, dirichlet_tag,
00106         globalID_tag;  // material, neumann, dirichlet,  globalID
00107     int refCountMB;
00108     int iArgc;
00109     iMOAB_String* iArgv;
00110     int unused_pid;
00111 
00112     std::map< std::string, int > appIdMap;  // from app string (uppercase) to app id
00113     std::map< int, int > appIdCompMap;      // from component id to app id
00114 
00115 #ifdef MOAB_HAVE_MPI
00116     std::vector< ParallelComm* > pcomms;  // created in order of applications, one moab::ParallelComm for each
00117 #endif
00118 
00119     std::vector< appData > appDatas;  // the same order as pcomms
00120     int globalrank, worldprocs;
00121     bool MPI_initialized;
00122 
00123     GlobalContext()
00124     {
00125         MBI        = 0;
00126         refCountMB = 0;
00127         unused_pid = 0;
00128     }
00129 };
00130 
00131 static struct GlobalContext context;
00132 
00133 ErrCode iMOAB_Initialize( int argc, iMOAB_String* argv )
00134 {
00135     if( argc ) IMOAB_CHECKPOINTER( argv, 1 );
00136 
00137     context.iArgc = argc;
00138     context.iArgv = argv;  // shallow copy
00139 
00140     if( 0 == context.refCountMB )
00141     {
00142         context.MBI = new( std::nothrow ) moab::Core;
00143         // retrieve the default tags
00144         const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
00145                                                      DIRICHLET_SET_TAG_NAME, GLOBAL_ID_TAG_NAME };
00146         // blocks, visible surfaceBC(neumann), vertexBC (Dirichlet), global id, parallel partition
00147         Tag gtags[4];
00148 
00149         for( int i = 0; i < 4; i++ )
00150         {
00151 
00152             ErrorCode rval =
00153                 context.MBI->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, gtags[i], MB_TAG_ANY );MB_CHK_ERR( rval );
00154         }
00155 
00156         context.material_tag  = gtags[0];
00157         context.neumann_tag   = gtags[1];
00158         context.dirichlet_tag = gtags[2];
00159         context.globalID_tag  = gtags[3];
00160     }
00161 
00162     context.MPI_initialized = false;
00163 #ifdef MOAB_HAVE_MPI
00164     int flagInit;
00165     MPI_Initialized( &flagInit );
00166 
00167     if( flagInit && !context.MPI_initialized )
00168     {
00169         MPI_Comm_size( MPI_COMM_WORLD, &context.worldprocs );
00170         MPI_Comm_rank( MPI_COMM_WORLD, &context.globalrank );
00171         context.MPI_initialized = true;
00172     }
00173 #endif
00174 
00175     context.refCountMB++;
00176     return moab::MB_SUCCESS;
00177 }
00178 
00179 ErrCode iMOAB_InitializeFortran()
00180 {
00181     return iMOAB_Initialize( 0, 0 );
00182 }
00183 
00184 ErrCode iMOAB_Finalize()
00185 {
00186     context.refCountMB--;
00187 
00188     if( 0 == context.refCountMB )
00189     {
00190         delete context.MBI;
00191     }
00192 
00193     return MB_SUCCESS;
00194 }
00195 
00196 ErrCode iMOAB_RegisterApplication( const iMOAB_String app_name,
00197 #ifdef MOAB_HAVE_MPI
00198                                    MPI_Comm* comm,
00199 #endif
00200                                    int* compid,
00201                                    iMOAB_AppID pid )
00202 {
00203     IMOAB_CHECKPOINTER( app_name, 1 );
00204 #ifdef MOAB_HAVE_MPI
00205     IMOAB_CHECKPOINTER( comm, 2 );
00206     IMOAB_CHECKPOINTER( compid, 3 );
00207 #else
00208     IMOAB_CHECKPOINTER( compid, 2 );
00209 #endif
00210 
00211     // will create a parallel comm for this application too, so there will be a
00212     // mapping from *pid to file set and to parallel comm instances
00213     std::string name( app_name );
00214 
00215     if( context.appIdMap.find( name ) != context.appIdMap.end() )
00216     {
00217         std::cout << " application " << name << " already registered \n";
00218         return moab::MB_FAILURE;
00219     }
00220 
00221     *pid                   = context.unused_pid++;
00222     context.appIdMap[name] = *pid;
00223     int rankHere           = 0;
00224 #ifdef MOAB_HAVE_MPI
00225     MPI_Comm_rank( *comm, &rankHere );
00226 #endif
00227     if( !rankHere ) std::cout << " application " << name << " with ID = " << *pid << " is registered now \n";
00228     if( *compid <= 0 )
00229     {
00230         std::cout << " convention for external application is to have its id positive \n";
00231         return moab::MB_FAILURE;
00232     }
00233 
00234     if( context.appIdCompMap.find( *compid ) != context.appIdCompMap.end() )
00235     {
00236         std::cout << " external application with comp id " << *compid << " is already registered\n";
00237         return moab::MB_FAILURE;
00238     }
00239 
00240     context.appIdCompMap[*compid] = *pid;
00241 
00242     // now create ParallelComm and a file set for this application
00243 #ifdef MOAB_HAVE_MPI
00244     if( comm )
00245     {
00246         ParallelComm* pco = new ParallelComm( context.MBI, *comm );
00247 
00248 #ifndef NDEBUG
00249         int index = pco->get_id();  // it could be useful to get app id from pcomm instance ...
00250         assert( index == *pid );
00251         // here, we assert the the pid is the same as the id of the ParallelComm instance
00252         // useful for writing in parallel
00253 #endif
00254         context.pcomms.push_back( pco );
00255     }
00256     else
00257     {
00258         context.pcomms.push_back( 0 );
00259     }
00260 #endif
00261 
00262     // create now the file set that will be used for loading the model in
00263     EntityHandle file_set;
00264     ErrorCode rval = context.MBI->create_meshset( MESHSET_SET, file_set );MB_CHK_ERR( rval );
00265 
00266     appData app_data;
00267     app_data.file_set  = file_set;
00268     app_data.global_id = *compid;  // will be used mostly for par comm graph
00269     app_data.name      = name;     // save the name of application
00270 
00271 #ifdef MOAB_HAVE_TEMPESTREMAP
00272     app_data.tempestData.remapper = NULL;  // Only allocate as needed
00273 #endif
00274 
00275     app_data.point_cloud = false;
00276     app_data.is_fortran  = false;
00277 
00278     context.appDatas.push_back(
00279         app_data );  // it will correspond to app_FileSets[*pid] will be the file set of interest
00280     return moab::MB_SUCCESS;
00281 }
00282 
00283 ErrCode iMOAB_RegisterApplicationFortran( const iMOAB_String app_name,
00284 #ifdef MOAB_HAVE_MPI
00285                                           int* comm,
00286 #endif
00287                                           int* compid,
00288                                           iMOAB_AppID pid )
00289 {
00290     IMOAB_CHECKPOINTER( app_name, 1 );
00291 #ifdef MOAB_HAVE_MPI
00292     IMOAB_CHECKPOINTER( comm, 2 );
00293     IMOAB_CHECKPOINTER( compid, 3 );
00294 #else
00295     IMOAB_CHECKPOINTER( compid, 2 );
00296 #endif
00297 
00298     ErrCode err;
00299     assert( app_name != nullptr );
00300     std::string name( app_name );
00301 
00302 #ifdef MOAB_HAVE_MPI
00303     MPI_Comm ccomm;
00304     if( comm )
00305     {
00306         // convert from Fortran communicator to a C communicator
00307         // see transfer of handles
00308         // http://www.mpi-forum.org/docs/mpi-2.2/mpi22-report/node361.htm
00309         ccomm = MPI_Comm_f2c( (MPI_Fint)*comm );
00310     }
00311 #endif
00312 
00313     // now call C style registration function:
00314     err = iMOAB_RegisterApplication( app_name,
00315 #ifdef MOAB_HAVE_MPI
00316                                      &ccomm,
00317 #endif
00318                                      compid, pid );
00319 
00320     // Now that we have created the application context, store that
00321     // the application being registered is from a Fortran context
00322     context.appDatas[*pid].is_fortran = true;
00323 
00324     return err;
00325 }
00326 
00327 ErrCode iMOAB_DeregisterApplication( iMOAB_AppID pid )
00328 {
00329     // the file set , parallel comm are all in vectors indexed by *pid
00330     // assume we did not delete anything yet
00331     // *pid will not be reused if we register another application
00332     appData& data = context.appDatas[*pid];
00333     int rankHere  = 0;
00334 #ifdef MOAB_HAVE_MPI
00335     ParallelComm* pco = context.pcomms[*pid];
00336     rankHere          = pco->rank();
00337 #endif
00338     if( !rankHere )
00339         std::cout << " application with ID: " << *pid << " global id: " << data.global_id << " name: " << data.name
00340                   << " is de-registered now \n";
00341 
00342     EntityHandle fileSet = data.file_set;
00343     // get all entities part of the file set
00344     Range fileents;
00345     ErrorCode rval = context.MBI->get_entities_by_handle( fileSet, fileents, /*recursive */ true );MB_CHK_ERR( rval );
00346 
00347     fileents.insert( fileSet );
00348 
00349     rval = context.MBI->get_entities_by_type( fileSet, MBENTITYSET, fileents );MB_CHK_ERR( rval );  // append all mesh sets
00350 
00351 #ifdef MOAB_HAVE_TEMPESTREMAP
00352     if( data.tempestData.remapper ) delete data.tempestData.remapper;
00353     if( data.tempestData.weightMaps.size() ) data.tempestData.weightMaps.clear();
00354 #endif
00355 
00356 #ifdef MOAB_HAVE_MPI
00357 
00358     // we could get the pco also with
00359     // ParallelComm * pcomm = ParallelComm::get_pcomm(context.MBI, *pid);
00360 
00361     std::map< int, ParCommGraph* >& pargs = data.pgraph;
00362 
00363     // free the parallel comm graphs associated with this app
00364     for( std::map< int, ParCommGraph* >::iterator mt = pargs.begin(); mt != pargs.end(); mt++ )
00365     {
00366         ParCommGraph* pgr = mt->second;
00367         delete pgr;
00368         pgr = NULL;
00369     }
00370     if( pco ) delete pco;
00371 #endif
00372 
00373     // delete first all except vertices
00374     Range vertices = fileents.subset_by_type( MBVERTEX );
00375     Range noverts  = subtract( fileents, vertices );
00376 
00377     rval = context.MBI->delete_entities( noverts );MB_CHK_ERR( rval );
00378     // now retrieve connected elements that still exist (maybe in other sets, pids?)
00379     Range adj_ents_left;
00380     rval = context.MBI->get_adjacencies( vertices, 1, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00381     rval = context.MBI->get_adjacencies( vertices, 2, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00382     rval = context.MBI->get_adjacencies( vertices, 3, false, adj_ents_left, Interface::UNION );MB_CHK_ERR( rval );
00383 
00384     if( !adj_ents_left.empty() )
00385     {
00386         Range conn_verts;
00387         rval = context.MBI->get_connectivity( adj_ents_left, conn_verts );MB_CHK_ERR( rval );
00388         vertices = subtract( vertices, conn_verts );
00389     }
00390 
00391     rval = context.MBI->delete_entities( vertices );MB_CHK_ERR( rval );
00392 
00393     std::map< std::string, int >::iterator mit;
00394 
00395     for( mit = context.appIdMap.begin(); mit != context.appIdMap.end(); mit++ )
00396     {
00397         int pidx = mit->second;
00398 
00399         if( *pid == pidx )
00400         {
00401             break;
00402         }
00403     }
00404 
00405     context.appIdMap.erase( mit );
00406     std::map< int, int >::iterator mit1;
00407 
00408     for( mit1 = context.appIdCompMap.begin(); mit1 != context.appIdCompMap.end(); mit1++ )
00409     {
00410         int pidx = mit1->second;
00411 
00412         if( *pid == pidx )
00413         {
00414             break;
00415         }
00416     }
00417 
00418     context.appIdCompMap.erase( mit1 );
00419 
00420     context.unused_pid--;  // we have to go backwards always TODO
00421     context.appDatas.pop_back();
00422 #ifdef MOAB_HAVE_MPI
00423     context.pcomms.pop_back();
00424 #endif
00425     return moab::MB_SUCCESS;
00426 }
00427 
00428 ErrCode iMOAB_DeregisterApplicationFortran( iMOAB_AppID pid )
00429 {
00430     // release any Fortran specific allocations here before we pass it on
00431     context.appDatas[*pid].is_fortran = false;
00432 
00433     // release all datastructure allocations
00434     return iMOAB_DeregisterApplication( pid );
00435 }
00436 
00437 // Utility function
00438 static void split_tag_names( std::string input_names,
00439                              std::string& separator,
00440                              std::vector< std::string >& list_tag_names )
00441 {
00442     size_t pos = 0;
00443     std::string token;
00444     while( ( pos = input_names.find( separator ) ) != std::string::npos )
00445     {
00446         token = input_names.substr( 0, pos );
00447         list_tag_names.push_back( token );
00448         // std::cout << token << std::endl;
00449         input_names.erase( 0, pos + separator.length() );
00450     }
00451     if( !input_names.empty() )
00452     {
00453         // if leftover something, or if not ended with delimiter
00454         list_tag_names.push_back( input_names );
00455     }
00456     return;
00457 }
00458 
00459 ErrCode iMOAB_ReadHeaderInfo( const iMOAB_String filename,
00460                               int* num_global_vertices,
00461                               int* num_global_elements,
00462                               int* num_dimension,
00463                               int* num_parts )
00464 {
00465     IMOAB_CHECKPOINTER( filename, 1 );
00466     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00467 
00468 #ifdef MOAB_HAVE_HDF5
00469     std::string filen( filename );
00470 
00471     int edges   = 0;
00472     int faces   = 0;
00473     int regions = 0;
00474     if( num_global_vertices ) *num_global_vertices = 0;
00475     if( num_global_elements ) *num_global_elements = 0;
00476     if( num_dimension ) *num_dimension = 0;
00477     if( num_parts ) *num_parts = 0;
00478 
00479     mhdf_FileHandle file;
00480     mhdf_Status status;
00481     unsigned long max_id;
00482     struct mhdf_FileDesc* data;
00483 
00484     file = mhdf_openFile( filen.c_str(), 0, &max_id, -1, &status );
00485 
00486     if( mhdf_isError( &status ) )
00487     {
00488         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00489         return moab::MB_FAILURE;
00490     }
00491 
00492     data = mhdf_getFileSummary( file, H5T_NATIVE_ULONG, &status,
00493                                 1 );  // will use extra set info; will get parallel partition tag info too!
00494 
00495     if( mhdf_isError( &status ) )
00496     {
00497         fprintf( stderr, "%s: %s\n", filename, mhdf_message( &status ) );
00498         return moab::MB_FAILURE;
00499     }
00500 
00501     if( num_dimension ) *num_dimension = data->nodes.vals_per_ent;
00502     if( num_global_vertices ) *num_global_vertices = (int)data->nodes.count;
00503 
00504     for( int i = 0; i < data->num_elem_desc; i++ )
00505     {
00506         struct mhdf_ElemDesc* el_desc = &( data->elems[i] );
00507         struct mhdf_EntDesc* ent_d    = &( el_desc->desc );
00508 
00509         if( 0 == strcmp( el_desc->type, mhdf_EDGE_TYPE_NAME ) )
00510         {
00511             edges += ent_d->count;
00512         }
00513 
00514         if( 0 == strcmp( el_desc->type, mhdf_TRI_TYPE_NAME ) )
00515         {
00516             faces += ent_d->count;
00517         }
00518 
00519         if( 0 == strcmp( el_desc->type, mhdf_QUAD_TYPE_NAME ) )
00520         {
00521             faces += ent_d->count;
00522         }
00523 
00524         if( 0 == strcmp( el_desc->type, mhdf_POLYGON_TYPE_NAME ) )
00525         {
00526             faces += ent_d->count;
00527         }
00528 
00529         if( 0 == strcmp( el_desc->type, mhdf_TET_TYPE_NAME ) )
00530         {
00531             regions += ent_d->count;
00532         }
00533 
00534         if( 0 == strcmp( el_desc->type, mhdf_PYRAMID_TYPE_NAME ) )
00535         {
00536             regions += ent_d->count;
00537         }
00538 
00539         if( 0 == strcmp( el_desc->type, mhdf_PRISM_TYPE_NAME ) )
00540         {
00541             regions += ent_d->count;
00542         }
00543 
00544         if( 0 == strcmp( el_desc->type, mdhf_KNIFE_TYPE_NAME ) )
00545         {
00546             regions += ent_d->count;
00547         }
00548 
00549         if( 0 == strcmp( el_desc->type, mdhf_HEX_TYPE_NAME ) )
00550         {
00551             regions += ent_d->count;
00552         }
00553 
00554         if( 0 == strcmp( el_desc->type, mhdf_POLYHEDRON_TYPE_NAME ) )
00555         {
00556             regions += ent_d->count;
00557         }
00558 
00559         if( 0 == strcmp( el_desc->type, mhdf_SEPTAHEDRON_TYPE_NAME ) )
00560         {
00561             regions += ent_d->count;
00562         }
00563     }
00564 
00565     if( num_parts ) *num_parts = data->numEntSets[0];
00566 
00567     // is this required?
00568     if( edges > 0 )
00569     {
00570         if( num_dimension ) *num_dimension = 1;  // I don't think it will ever return 1
00571         if( num_global_elements ) *num_global_elements = edges;
00572     }
00573 
00574     if( faces > 0 )
00575     {
00576         if( num_dimension ) *num_dimension = 2;
00577         if( num_global_elements ) *num_global_elements = faces;
00578     }
00579 
00580     if( regions > 0 )
00581     {
00582         if( num_dimension ) *num_dimension = 3;
00583         if( num_global_elements ) *num_global_elements = regions;
00584     }
00585 
00586     mhdf_closeFile( file, &status );
00587 
00588     free( data );
00589 
00590 #else
00591     std::cout << filename
00592               << ": Please reconfigure with HDF5. Cannot retrieve header information for file "
00593                  "formats other than a h5m file.\n";
00594     if( num_global_vertices ) *num_global_vertices = 0;
00595     if( num_global_elements ) *num_global_elements = 0;
00596     if( num_dimension ) *num_dimension = 0;
00597     if( num_parts ) *num_parts = 0;
00598 #endif
00599 
00600     return moab::MB_SUCCESS;
00601 }
00602 
00603 ErrCode iMOAB_LoadMesh( iMOAB_AppID pid,
00604                         const iMOAB_String filename,
00605                         const iMOAB_String read_options,
00606                         int* num_ghost_layers )
00607 {
00608     IMOAB_CHECKPOINTER( filename, 2 );
00609     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00610 
00611     // make sure we use the file set and pcomm associated with the *pid
00612     std::ostringstream newopts;
00613     if( read_options ) newopts << read_options;
00614 
00615 #ifdef MOAB_HAVE_MPI
00616 
00617     if( context.MPI_initialized )
00618     {
00619         if( context.worldprocs > 1 )
00620         {
00621             std::string opts( ( read_options ? read_options : "" ) );
00622             std::string pcid( "PARALLEL_COMM=" );
00623             std::size_t found = opts.find( pcid );
00624 
00625             if( found != std::string::npos )
00626             {
00627                 std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00628                 return moab::MB_FAILURE;
00629             }
00630 
00631             // in serial, apply PARALLEL_COMM option only for h5m files; it does not work for .g
00632             // files (used in test_remapping)
00633             std::string filen( filename );
00634             std::string::size_type idx = filen.rfind( '.' );
00635 
00636             if( idx != std::string::npos )
00637             {
00638                 std::string extension = filen.substr( idx + 1 );
00639                 if( extension == std::string( "h5m" ) ) newopts << ";;PARALLEL_COMM=" << *pid;
00640             }
00641 
00642             if( *num_ghost_layers >= 1 )
00643             {
00644                 // if we want ghosts, we will want additional entities, the last .1
00645                 // because the addl ents can be edges, faces that are part of the neumann sets
00646                 std::string pcid2( "PARALLEL_GHOSTS=" );
00647                 std::size_t found2 = opts.find( pcid2 );
00648 
00649                 if( found2 != std::string::npos )
00650                 {
00651                     std::cout << " PARALLEL_GHOSTS option is already specified, ignore passed "
00652                                  "number of layers \n";
00653                 }
00654                 else
00655                 {
00656                     // dimension of primary entities is 3 here, but it could be 2 for climate
00657                     // meshes; we would need to pass PARALLEL_GHOSTS explicitly for 2d meshes, for
00658                     // example:  ";PARALLEL_GHOSTS=2.0.1"
00659                     newopts << ";PARALLEL_GHOSTS=3.0." << *num_ghost_layers << ".3";
00660                 }
00661             }
00662         }
00663     }
00664 #else
00665     IMOAB_ASSERT( *num_ghost_layers == 0, "Cannot provide ghost layers in serial." );
00666 #endif
00667 
00668     // Now let us actually load the MOAB file with the appropriate read options
00669     ErrorCode rval = context.MBI->load_file( filename, &context.appDatas[*pid].file_set, newopts.str().c_str() );MB_CHK_ERR( rval );
00670 
00671 #ifdef VERBOSE
00672     // some debugging stuff
00673     std::ostringstream outfile;
00674 #ifdef MOAB_HAVE_MPI
00675     int rank   = context.pcomms[*pid]->rank();
00676     int nprocs = context.pcomms[*pid]->size();
00677     outfile << "TaskMesh_n" << nprocs << "." << rank << ".h5m";
00678 #else
00679     outfile << "TaskMesh_n1.0.h5m";
00680 #endif
00681     // the mesh contains ghosts too, but they are not part of mat/neumann set
00682     // write in serial the file, to see what tags are missing
00683     rval = context.MBI->write_file( outfile.str().c_str() );MB_CHK_ERR( rval );  // everything on current task, written in serial
00684 #endif
00685 
00686     // Update mesh information
00687     return iMOAB_UpdateMeshInfo( pid );
00688 }
00689 
00690 ErrCode iMOAB_WriteMesh( iMOAB_AppID pid, const iMOAB_String filename, const iMOAB_String write_options )
00691 {
00692     IMOAB_CHECKPOINTER( filename, 2 );
00693     IMOAB_ASSERT( strlen( filename ), "Invalid filename length." );
00694 
00695     std::ostringstream newopts;
00696 #ifdef MOAB_HAVE_MPI
00697     std::string write_opts( ( write_options ? write_options : "" ) );
00698     std::string pcid( "PARALLEL_COMM=" );
00699     std::size_t found = write_opts.find( pcid );
00700 
00701     if( found != std::string::npos )
00702     {
00703         std::cerr << " cannot specify PARALLEL_COMM option, it is implicit \n";
00704         return moab::MB_FAILURE;
00705     }
00706 
00707     // if write in parallel, add pc option, to be sure about which ParallelComm instance is used
00708     std::string pw( "PARALLEL=WRITE_PART" );
00709     found = write_opts.find( pw );
00710 
00711     if( found != std::string::npos )
00712     {
00713         newopts << "PARALLEL_COMM=" << *pid << ";";
00714     }
00715 
00716 #endif
00717 
00718     if( write_options ) newopts << write_options;
00719 
00720     // Now let us actually write the file to disk with appropriate options
00721     ErrorCode rval = context.MBI->write_file( filename, 0, newopts.str().c_str(), &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
00722 
00723     return moab::MB_SUCCESS;
00724 }
00725 
00726 ErrCode iMOAB_WriteLocalMesh( iMOAB_AppID pid, iMOAB_String prefix )
00727 {
00728     IMOAB_CHECKPOINTER( prefix, 2 );
00729     IMOAB_ASSERT( strlen( prefix ), "Invalid prefix string length." );
00730 
00731     std::ostringstream file_name;
00732     int rank = 0, size = 1;
00733 #ifdef MOAB_HAVE_MPI
00734     rank = context.pcomms[*pid]->rank();
00735     size = context.pcomms[*pid]->size();
00736 #endif
00737     file_name << prefix << "_" << size << "_" << rank << ".h5m";
00738     // Now let us actually write the file to disk with appropriate options
00739     ErrorCode rval = context.MBI->write_file( file_name.str().c_str(), 0, 0, &context.appDatas[*pid].file_set, 1 );MB_CHK_ERR( rval );
00740 
00741     return moab::MB_SUCCESS;
00742 }
00743 
00744 ErrCode iMOAB_UpdateMeshInfo( iMOAB_AppID pid )
00745 {
00746     // this will include ghost elements info
00747     appData& data        = context.appDatas[*pid];
00748     EntityHandle fileSet = data.file_set;
00749 
00750     // first clear all data ranges; this can be called after ghosting
00751     data.all_verts.clear();
00752     data.primary_elems.clear();
00753     data.local_verts.clear();
00754     data.ghost_vertices.clear();
00755     data.owned_elems.clear();
00756     data.ghost_elems.clear();
00757     data.mat_sets.clear();
00758     data.neu_sets.clear();
00759     data.diri_sets.clear();
00760 
00761     // Let us get all the vertex entities
00762     ErrorCode rval = context.MBI->get_entities_by_type( fileSet, MBVERTEX, data.all_verts, true );MB_CHK_ERR( rval );  // recursive
00763 
00764     // Let us check first entities of dimension = 3
00765     data.dimension = 3;
00766     rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00767 
00768     if( data.primary_elems.empty() )
00769     {
00770         // Now 3-D elements. Let us check entities of dimension = 2
00771         data.dimension = 2;
00772         rval           = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00773 
00774         if( data.primary_elems.empty() )
00775         {
00776             // Now 3-D/2-D elements. Let us check entities of dimension = 1
00777             data.dimension = 1;
00778             rval = context.MBI->get_entities_by_dimension( fileSet, data.dimension, data.primary_elems, true );MB_CHK_ERR( rval );  // recursive
00779 
00780             if( data.primary_elems.empty() )
00781             {
00782                 // no elements of dimension 1 or 2 or 3; it could happen for point clouds
00783                 data.dimension = 0;
00784             }
00785         }
00786     }
00787 
00788     // check if the current mesh is just a point cloud
00789     data.point_cloud = ( ( data.primary_elems.size() == 0 && data.all_verts.size() > 0 ) || data.dimension == 0 );
00790 
00791 #ifdef MOAB_HAVE_MPI
00792 
00793     if( context.MPI_initialized )
00794     {
00795         ParallelComm* pco = context.pcomms[*pid];
00796 
00797         // filter ghost vertices, from local
00798         rval = pco->filter_pstatus( data.all_verts, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.local_verts );MB_CHK_ERR( rval );
00799 
00800         // Store handles for all ghosted entities
00801         data.ghost_vertices = subtract( data.all_verts, data.local_verts );
00802 
00803         // filter ghost elements, from local
00804         rval = pco->filter_pstatus( data.primary_elems, PSTATUS_GHOST, PSTATUS_NOT, -1, &data.owned_elems );MB_CHK_ERR( rval );
00805 
00806         data.ghost_elems = subtract( data.primary_elems, data.owned_elems );
00807     }
00808     else
00809     {
00810         data.local_verts = data.all_verts;
00811         data.owned_elems = data.primary_elems;
00812     }
00813 
00814 #else
00815 
00816     data.local_verts = data.all_verts;
00817     data.owned_elems = data.primary_elems;
00818 
00819 #endif
00820 
00821     // Get the references for some standard internal tags such as material blocks, BCs, etc
00822     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00823                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00824 
00825     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00826                                                       data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00827 
00828     rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00829                                                       data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00830 
00831     return moab::MB_SUCCESS;
00832 }
00833 
00834 ErrCode iMOAB_GetMeshInfo( iMOAB_AppID pid,
00835                            int* num_visible_vertices,
00836                            int* num_visible_elements,
00837                            int* num_visible_blocks,
00838                            int* num_visible_surfaceBC,
00839                            int* num_visible_vertexBC )
00840 {
00841     ErrorCode rval;
00842     appData& data        = context.appDatas[*pid];
00843     EntityHandle fileSet = data.file_set;
00844 
00845     // this will include ghost elements
00846     // first clear all data ranges; this can be called after ghosting
00847     if( num_visible_elements )
00848     {
00849         num_visible_elements[2] = static_cast< int >( data.primary_elems.size() );
00850         // separate ghost and local/owned primary elements
00851         num_visible_elements[0] = static_cast< int >( data.owned_elems.size() );
00852         num_visible_elements[1] = static_cast< int >( data.ghost_elems.size() );
00853     }
00854     if( num_visible_vertices )
00855     {
00856         num_visible_vertices[2] = static_cast< int >( data.all_verts.size() );
00857         num_visible_vertices[1] = static_cast< int >( data.ghost_vertices.size() );
00858         // local are those that are not ghosts
00859         num_visible_vertices[0] = num_visible_vertices[2] - num_visible_vertices[1];
00860     }
00861 
00862     if( num_visible_blocks )
00863     {
00864         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.material_tag ), 0, 1,
00865                                                           data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
00866 
00867         num_visible_blocks[2] = data.mat_sets.size();
00868         num_visible_blocks[0] = num_visible_blocks[2];
00869         num_visible_blocks[1] = 0;
00870     }
00871 
00872     if( num_visible_surfaceBC )
00873     {
00874         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.neumann_tag ), 0, 1,
00875                                                           data.neu_sets, Interface::UNION );MB_CHK_ERR( rval );
00876 
00877         num_visible_surfaceBC[2] = 0;
00878         // count how many faces are in each neu set, and how many regions are
00879         // adjacent to them;
00880         int numNeuSets = (int)data.neu_sets.size();
00881 
00882         for( int i = 0; i < numNeuSets; i++ )
00883         {
00884             Range subents;
00885             EntityHandle nset = data.neu_sets[i];
00886             rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
00887 
00888             for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
00889             {
00890                 EntityHandle subent = *it;
00891                 Range adjPrimaryEnts;
00892                 rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
00893 
00894                 num_visible_surfaceBC[2] += (int)adjPrimaryEnts.size();
00895             }
00896         }
00897 
00898         num_visible_surfaceBC[0] = num_visible_surfaceBC[2];
00899         num_visible_surfaceBC[1] = 0;
00900     }
00901 
00902     if( num_visible_vertexBC )
00903     {
00904         rval = context.MBI->get_entities_by_type_and_tag( fileSet, MBENTITYSET, &( context.dirichlet_tag ), 0, 1,
00905                                                           data.diri_sets, Interface::UNION );MB_CHK_ERR( rval );
00906 
00907         num_visible_vertexBC[2] = 0;
00908         int numDiriSets         = (int)data.diri_sets.size();
00909 
00910         for( int i = 0; i < numDiriSets; i++ )
00911         {
00912             Range verts;
00913             EntityHandle diset = data.diri_sets[i];
00914             rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
00915 
00916             num_visible_vertexBC[2] += (int)verts.size();
00917         }
00918 
00919         num_visible_vertexBC[0] = num_visible_vertexBC[2];
00920         num_visible_vertexBC[1] = 0;
00921     }
00922 
00923     return moab::MB_SUCCESS;
00924 }
00925 
00926 ErrCode iMOAB_GetVertexID( iMOAB_AppID pid, int* vertices_length, iMOAB_GlobalID* global_vertex_ID )
00927 {
00928     IMOAB_CHECKPOINTER( vertices_length, 2 );
00929     IMOAB_CHECKPOINTER( global_vertex_ID, 3 );
00930 
00931     const Range& verts = context.appDatas[*pid].all_verts;
00932     // check for problems with array length
00933     IMOAB_ASSERT( *vertices_length == static_cast< int >( verts.size() ), "Invalid vertices length provided" );
00934 
00935     // global id tag is context.globalID_tag
00936     return context.MBI->tag_get_data( context.globalID_tag, verts, global_vertex_ID );
00937 }
00938 
00939 ErrCode iMOAB_GetVertexOwnership( iMOAB_AppID pid, int* vertices_length, int* visible_global_rank_ID )
00940 {
00941     assert( vertices_length && *vertices_length );
00942     assert( visible_global_rank_ID );
00943 
00944     Range& verts = context.appDatas[*pid].all_verts;
00945     int i        = 0;
00946 #ifdef MOAB_HAVE_MPI
00947     ParallelComm* pco = context.pcomms[*pid];
00948 
00949     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00950     {
00951         ErrorCode rval = pco->get_owner( *vit, visible_global_rank_ID[i] );MB_CHK_ERR( rval );
00952     }
00953 
00954     if( i != *vertices_length )
00955     {
00956         return moab::MB_FAILURE;
00957     }  // warning array allocation problem
00958 
00959 #else
00960 
00961     /* everything owned by proc 0 */
00962     if( (int)verts.size() != *vertices_length )
00963     {
00964         return moab::MB_FAILURE;
00965     }  // warning array allocation problem
00966 
00967     for( Range::iterator vit = verts.begin(); vit != verts.end(); vit++, i++ )
00968     {
00969         visible_global_rank_ID[i] = 0;
00970     }  // all vertices are owned by processor 0, as this is serial run
00971 
00972 #endif
00973 
00974     return moab::MB_SUCCESS;
00975 }
00976 
00977 ErrCode iMOAB_GetVisibleVerticesCoordinates( iMOAB_AppID pid, int* coords_length, double* coordinates )
00978 {
00979     Range& verts = context.appDatas[*pid].all_verts;
00980 
00981     // interleaved coordinates, so that means deep copy anyway
00982     if( *coords_length != 3 * (int)verts.size() )
00983     {
00984         return moab::MB_FAILURE;
00985     }
00986 
00987     ErrorCode rval = context.MBI->get_coords( verts, coordinates );MB_CHK_ERR( rval );
00988 
00989     return moab::MB_SUCCESS;
00990 }
00991 
00992 ErrCode iMOAB_GetBlockID( iMOAB_AppID pid, int* block_length, iMOAB_GlobalID* global_block_IDs )
00993 {
00994     // local id blocks? they are counted from 0 to number of visible blocks ...
00995     // will actually return material set tag value for global
00996     Range& matSets = context.appDatas[*pid].mat_sets;
00997 
00998     if( *block_length != (int)matSets.size() )
00999     {
01000         return moab::MB_FAILURE;
01001     }
01002 
01003     // return material set tag gtags[0 is material set tag
01004     ErrorCode rval = context.MBI->tag_get_data( context.material_tag, matSets, global_block_IDs );MB_CHK_ERR( rval );
01005 
01006     // populate map with index
01007     std::map< int, int >& matIdx = context.appDatas[*pid].matIndex;
01008     for( unsigned i = 0; i < matSets.size(); i++ )
01009     {
01010         matIdx[global_block_IDs[i]] = i;
01011     }
01012 
01013     return moab::MB_SUCCESS;
01014 }
01015 
01016 ErrCode iMOAB_GetBlockInfo( iMOAB_AppID pid,
01017                             iMOAB_GlobalID* global_block_ID,
01018                             int* vertices_per_element,
01019                             int* num_elements_in_block )
01020 {
01021     assert( global_block_ID );
01022 
01023     std::map< int, int >& matMap      = context.appDatas[*pid].matIndex;
01024     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01025 
01026     if( it == matMap.end() )
01027     {
01028         return moab::MB_FAILURE;
01029     }  // error in finding block with id
01030 
01031     int blockIndex          = matMap[*global_block_ID];
01032     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01033     Range blo_elems;
01034     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, blo_elems );
01035 
01036     if( MB_SUCCESS != rval || blo_elems.empty() )
01037     {
01038         return moab::MB_FAILURE;
01039     }
01040 
01041     EntityType type = context.MBI->type_from_handle( blo_elems[0] );
01042 
01043     if( !blo_elems.all_of_type( type ) )
01044     {
01045         return moab::MB_FAILURE;
01046     }  // not all of same  type
01047 
01048     const EntityHandle* conn = NULL;
01049     int num_verts            = 0;
01050     rval                     = context.MBI->get_connectivity( blo_elems[0], conn, num_verts );MB_CHK_ERR( rval );
01051 
01052     *vertices_per_element  = num_verts;
01053     *num_elements_in_block = (int)blo_elems.size();
01054 
01055     return moab::MB_SUCCESS;
01056 }
01057 
01058 ErrCode iMOAB_GetVisibleElementsInfo( iMOAB_AppID pid,
01059                                       int* num_visible_elements,
01060                                       iMOAB_GlobalID* element_global_IDs,
01061                                       int* ranks,
01062                                       iMOAB_GlobalID* block_IDs )
01063 {
01064     appData& data = context.appDatas[*pid];
01065 #ifdef MOAB_HAVE_MPI
01066     ParallelComm* pco = context.pcomms[*pid];
01067 #endif
01068 
01069     ErrorCode rval = context.MBI->tag_get_data( context.globalID_tag, data.primary_elems, element_global_IDs );MB_CHK_ERR( rval );
01070 
01071     int i = 0;
01072 
01073     for( Range::iterator eit = data.primary_elems.begin(); eit != data.primary_elems.end(); ++eit, ++i )
01074     {
01075 #ifdef MOAB_HAVE_MPI
01076         rval = pco->get_owner( *eit, ranks[i] );MB_CHK_ERR( rval );
01077 
01078 #else
01079         /* everything owned by task 0 */
01080         ranks[i]             = 0;
01081 #endif
01082     }
01083 
01084     for( Range::iterator mit = data.mat_sets.begin(); mit != data.mat_sets.end(); ++mit )
01085     {
01086         EntityHandle matMeshSet = *mit;
01087         Range elems;
01088         rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01089 
01090         int valMatTag;
01091         rval = context.MBI->tag_get_data( context.material_tag, &matMeshSet, 1, &valMatTag );MB_CHK_ERR( rval );
01092 
01093         for( Range::iterator eit = elems.begin(); eit != elems.end(); ++eit )
01094         {
01095             EntityHandle eh = *eit;
01096             int index       = data.primary_elems.index( eh );
01097 
01098             if( -1 == index )
01099             {
01100                 return moab::MB_FAILURE;
01101             }
01102 
01103             if( -1 >= *num_visible_elements )
01104             {
01105                 return moab::MB_FAILURE;
01106             }
01107 
01108             block_IDs[index] = valMatTag;
01109         }
01110     }
01111 
01112     return moab::MB_SUCCESS;
01113 }
01114 
01115 ErrCode iMOAB_GetBlockElementConnectivities( iMOAB_AppID pid,
01116                                              iMOAB_GlobalID* global_block_ID,
01117                                              int* connectivity_length,
01118                                              int* element_connectivity )
01119 {
01120     assert( global_block_ID );      // ensure global block ID argument is not null
01121     assert( connectivity_length );  // ensure connectivity length argument is not null
01122 
01123     appData& data                     = context.appDatas[*pid];
01124     std::map< int, int >& matMap      = data.matIndex;
01125     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01126 
01127     if( it == matMap.end() )
01128     {
01129         return moab::MB_FAILURE;
01130     }  // error in finding block with id
01131 
01132     int blockIndex          = matMap[*global_block_ID];
01133     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01134     std::vector< EntityHandle > elems;
01135 
01136     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01137 
01138     if( elems.empty() )
01139     {
01140         return moab::MB_FAILURE;
01141     }
01142 
01143     std::vector< EntityHandle > vconnect;
01144     rval = context.MBI->get_connectivity( &elems[0], elems.size(), vconnect );MB_CHK_ERR( rval );
01145 
01146     if( *connectivity_length != (int)vconnect.size() )
01147     {
01148         return moab::MB_FAILURE;
01149     }  // mismatched sizes
01150 
01151     for( int i = 0; i < *connectivity_length; i++ )
01152     {
01153         int inx = data.all_verts.index( vconnect[i] );
01154 
01155         if( -1 == inx )
01156         {
01157             return moab::MB_FAILURE;
01158         }  // error, vertex not in local range
01159 
01160         element_connectivity[i] = inx;
01161     }
01162 
01163     return moab::MB_SUCCESS;
01164 }
01165 
01166 ErrCode iMOAB_GetElementConnectivity( iMOAB_AppID pid,
01167                                       iMOAB_LocalID* elem_index,
01168                                       int* connectivity_length,
01169                                       int* element_connectivity )
01170 {
01171     assert( elem_index );           // ensure element index argument is not null
01172     assert( connectivity_length );  // ensure connectivity length argument is not null
01173 
01174     appData& data = context.appDatas[*pid];
01175     assert( ( *elem_index >= 0 ) && ( *elem_index < (int)data.primary_elems.size() ) );
01176 
01177     int num_nodes;
01178     const EntityHandle* conn;
01179 
01180     EntityHandle eh = data.primary_elems[*elem_index];
01181 
01182     ErrorCode rval = context.MBI->get_connectivity( eh, conn, num_nodes );MB_CHK_ERR( rval );
01183 
01184     if( *connectivity_length < num_nodes )
01185     {
01186         return moab::MB_FAILURE;
01187     }  // wrong number of vertices
01188 
01189     for( int i = 0; i < num_nodes; i++ )
01190     {
01191         int index = data.all_verts.index( conn[i] );
01192 
01193         if( -1 == index )
01194         {
01195             return moab::MB_FAILURE;
01196         }
01197 
01198         element_connectivity[i] = index;
01199     }
01200 
01201     *connectivity_length = num_nodes;
01202 
01203     return moab::MB_SUCCESS;
01204 }
01205 
01206 ErrCode iMOAB_GetElementOwnership( iMOAB_AppID pid,
01207                                    iMOAB_GlobalID* global_block_ID,
01208                                    int* num_elements_in_block,
01209                                    int* element_ownership )
01210 {
01211     assert( global_block_ID );        // ensure global block ID argument is not null
01212     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01213 
01214     std::map< int, int >& matMap = context.appDatas[*pid].matIndex;
01215 
01216     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01217 
01218     if( it == matMap.end() )
01219     {
01220         return moab::MB_FAILURE;
01221     }  // error in finding block with id
01222 
01223     int blockIndex          = matMap[*global_block_ID];
01224     EntityHandle matMeshSet = context.appDatas[*pid].mat_sets[blockIndex];
01225     Range elems;
01226 
01227     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01228 
01229     if( elems.empty() )
01230     {
01231         return moab::MB_FAILURE;
01232     }
01233 
01234     if( *num_elements_in_block != (int)elems.size() )
01235     {
01236         return moab::MB_FAILURE;
01237     }  // bad memory allocation
01238 
01239     int i = 0;
01240 #ifdef MOAB_HAVE_MPI
01241     ParallelComm* pco = context.pcomms[*pid];
01242 #endif
01243 
01244     for( Range::iterator vit = elems.begin(); vit != elems.end(); vit++, i++ )
01245     {
01246 #ifdef MOAB_HAVE_MPI
01247         rval = pco->get_owner( *vit, element_ownership[i] );MB_CHK_ERR( rval );
01248 #else
01249         element_ownership[i] = 0; /* owned by 0 */
01250 #endif
01251     }
01252 
01253     return moab::MB_SUCCESS;
01254 }
01255 
01256 ErrCode iMOAB_GetElementID( iMOAB_AppID pid,
01257                             iMOAB_GlobalID* global_block_ID,
01258                             int* num_elements_in_block,
01259                             iMOAB_GlobalID* global_element_ID,
01260                             iMOAB_LocalID* local_element_ID )
01261 {
01262     assert( global_block_ID );        // ensure global block ID argument is not null
01263     assert( num_elements_in_block );  // ensure number of elements in block argument is not null
01264 
01265     appData& data                = context.appDatas[*pid];
01266     std::map< int, int >& matMap = data.matIndex;
01267 
01268     std::map< int, int >::iterator it = matMap.find( *global_block_ID );
01269 
01270     if( it == matMap.end() )
01271     {
01272         return moab::MB_FAILURE;
01273     }  // error in finding block with id
01274 
01275     int blockIndex          = matMap[*global_block_ID];
01276     EntityHandle matMeshSet = data.mat_sets[blockIndex];
01277     Range elems;
01278     ErrorCode rval = context.MBI->get_entities_by_handle( matMeshSet, elems );MB_CHK_ERR( rval );
01279 
01280     if( elems.empty() )
01281     {
01282         return moab::MB_FAILURE;
01283     }
01284 
01285     if( *num_elements_in_block != (int)elems.size() )
01286     {
01287         return moab::MB_FAILURE;
01288     }  // bad memory allocation
01289 
01290     rval = context.MBI->tag_get_data( context.globalID_tag, elems, global_element_ID );MB_CHK_ERR( rval );
01291 
01292     // check that elems are among primary_elems in data
01293     for( int i = 0; i < *num_elements_in_block; i++ )
01294     {
01295         local_element_ID[i] = data.primary_elems.index( elems[i] );
01296 
01297         if( -1 == local_element_ID[i] )
01298         {
01299             return moab::MB_FAILURE;
01300         }  // error, not in local primary elements
01301     }
01302 
01303     return moab::MB_SUCCESS;
01304 }
01305 
01306 ErrCode iMOAB_GetPointerToSurfaceBC( iMOAB_AppID pid,
01307                                      int* surface_BC_length,
01308                                      iMOAB_LocalID* local_element_ID,
01309                                      int* reference_surface_ID,
01310                                      int* boundary_condition_value )
01311 {
01312     // we have to fill bc data for neumann sets;/
01313     ErrorCode rval;
01314 
01315     // it was counted above, in GetMeshInfo
01316     appData& data  = context.appDatas[*pid];
01317     int numNeuSets = (int)data.neu_sets.size();
01318 
01319     int index = 0;  // index [0, surface_BC_length) for the arrays returned
01320 
01321     for( int i = 0; i < numNeuSets; i++ )
01322     {
01323         Range subents;
01324         EntityHandle nset = data.neu_sets[i];
01325         rval              = context.MBI->get_entities_by_dimension( nset, data.dimension - 1, subents );MB_CHK_ERR( rval );
01326 
01327         int neuVal;
01328         rval = context.MBI->tag_get_data( context.neumann_tag, &nset, 1, &neuVal );MB_CHK_ERR( rval );
01329 
01330         for( Range::iterator it = subents.begin(); it != subents.end(); ++it )
01331         {
01332             EntityHandle subent = *it;
01333             Range adjPrimaryEnts;
01334             rval = context.MBI->get_adjacencies( &subent, 1, data.dimension, false, adjPrimaryEnts );MB_CHK_ERR( rval );
01335 
01336             // get global id of the primary ents, and side number of the quad/subentity
01337             // this is moab ordering
01338             for( Range::iterator pit = adjPrimaryEnts.begin(); pit != adjPrimaryEnts.end(); pit++ )
01339             {
01340                 EntityHandle primaryEnt = *pit;
01341                 // get global id
01342                 /*int globalID;
01343                 rval = context.MBI->tag_get_data(gtags[3], &primaryEnt, 1, &globalID);
01344                 if (MB_SUCCESS!=rval)
01345                   return moab::MB_FAILURE;
01346                 global_element_ID[index] = globalID;*/
01347 
01348                 // get local element id
01349                 local_element_ID[index] = data.primary_elems.index( primaryEnt );
01350 
01351                 if( -1 == local_element_ID[index] )
01352                 {
01353                     return moab::MB_FAILURE;
01354                 }  // did not find the element locally
01355 
01356                 int side_number, sense, offset;
01357                 rval = context.MBI->side_number( primaryEnt, subent, side_number, sense, offset );MB_CHK_ERR( rval );
01358 
01359                 reference_surface_ID[index]     = side_number + 1;  // moab is from 0 to 5, it needs 1 to 6
01360                 boundary_condition_value[index] = neuVal;
01361                 index++;
01362             }
01363         }
01364     }
01365 
01366     if( index != *surface_BC_length )
01367     {
01368         return moab::MB_FAILURE;
01369     }  // error in array allocations
01370 
01371     return moab::MB_SUCCESS;
01372 }
01373 
01374 ErrCode iMOAB_GetPointerToVertexBC( iMOAB_AppID pid,
01375                                     int* vertex_BC_length,
01376                                     iMOAB_LocalID* local_vertex_ID,
01377                                     int* boundary_condition_value )
01378 {
01379     ErrorCode rval;
01380 
01381     // it was counted above, in GetMeshInfo
01382     appData& data   = context.appDatas[*pid];
01383     int numDiriSets = (int)data.diri_sets.size();
01384     int index       = 0;  // index [0, *vertex_BC_length) for the arrays returned
01385 
01386     for( int i = 0; i < numDiriSets; i++ )
01387     {
01388         Range verts;
01389         EntityHandle diset = data.diri_sets[i];
01390         rval               = context.MBI->get_entities_by_dimension( diset, 0, verts );MB_CHK_ERR( rval );
01391 
01392         int diriVal;
01393         rval = context.MBI->tag_get_data( context.dirichlet_tag, &diset, 1, &diriVal );MB_CHK_ERR( rval );
01394 
01395         for( Range::iterator vit = verts.begin(); vit != verts.end(); ++vit )
01396         {
01397             EntityHandle vt = *vit;
01398             /*int vgid;
01399             rval = context.MBI->tag_get_data(gtags[3], &vt, 1, &vgid);
01400             if (MB_SUCCESS!=rval)
01401               return moab::MB_FAILURE;
01402             global_vertext_ID[index] = vgid;*/
01403             local_vertex_ID[index] = data.all_verts.index( vt );
01404 
01405             if( -1 == local_vertex_ID[index] )
01406             {
01407                 return moab::MB_FAILURE;
01408             }  // vertex was not found
01409 
01410             boundary_condition_value[index] = diriVal;
01411             index++;
01412         }
01413     }
01414 
01415     if( *vertex_BC_length != index )
01416     {
01417         return moab::MB_FAILURE;
01418     }  // array allocation issue
01419 
01420     return moab::MB_SUCCESS;
01421 }
01422 
01423 ErrCode iMOAB_DefineTagStorage( iMOAB_AppID pid,
01424                                 const iMOAB_String tag_storage_name,
01425                                 int* tag_type,
01426                                 int* components_per_entity,
01427                                 int* tag_index )
01428 {
01429     // see if the tag is already existing, and if yes, check the type, length
01430     if( *tag_type < 0 || *tag_type > 5 )
01431     {
01432         return moab::MB_FAILURE;
01433     }  // we have 6 types of tags supported so far
01434 
01435     DataType tagDataType;
01436     TagType tagType;
01437     void* defaultVal        = NULL;
01438     int* defInt             = new int[*components_per_entity];
01439     double* defDouble       = new double[*components_per_entity];
01440     EntityHandle* defHandle = new EntityHandle[*components_per_entity];
01441 
01442     for( int i = 0; i < *components_per_entity; i++ )
01443     {
01444         defInt[i]    = 0;
01445         defDouble[i] = -1e+10;
01446         defHandle[i] = (EntityHandle)0;
01447     }
01448 
01449     switch( *tag_type )
01450     {
01451         case 0:
01452             tagDataType = MB_TYPE_INTEGER;
01453             tagType     = MB_TAG_DENSE;
01454             defaultVal  = defInt;
01455             break;
01456 
01457         case 1:
01458             tagDataType = MB_TYPE_DOUBLE;
01459             tagType     = MB_TAG_DENSE;
01460             defaultVal  = defDouble;
01461             break;
01462 
01463         case 2:
01464             tagDataType = MB_TYPE_HANDLE;
01465             tagType     = MB_TAG_DENSE;
01466             defaultVal  = defHandle;
01467             break;
01468 
01469         case 3:
01470             tagDataType = MB_TYPE_INTEGER;
01471             tagType     = MB_TAG_SPARSE;
01472             defaultVal  = defInt;
01473             break;
01474 
01475         case 4:
01476             tagDataType = MB_TYPE_DOUBLE;
01477             tagType     = MB_TAG_SPARSE;
01478             defaultVal  = defDouble;
01479             break;
01480 
01481         case 5:
01482             tagDataType = MB_TYPE_HANDLE;
01483             tagType     = MB_TAG_SPARSE;
01484             defaultVal  = defHandle;
01485             break;
01486 
01487         default: {
01488             delete[] defInt;
01489             delete[] defDouble;
01490             delete[] defHandle;
01491             return moab::MB_FAILURE;
01492         }  // error
01493     }
01494 
01495     Tag tagHandle;
01496     std::string tag_name( tag_storage_name );
01497     ErrorCode rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
01498                                                   tagType, defaultVal );
01499 
01500     if( MB_TAG_NOT_FOUND == rval )
01501     {
01502         rval = context.MBI->tag_get_handle( tag_name.c_str(), *components_per_entity, tagDataType, tagHandle,
01503                                             tagType | MB_TAG_CREAT, defaultVal );
01504     }
01505 
01506     // we don't need default values anymore, avoid leaks
01507     delete[] defInt;
01508     delete[] defDouble;
01509     delete[] defHandle;
01510 
01511     appData& data = context.appDatas[*pid];
01512 
01513     if( MB_ALREADY_ALLOCATED == rval )
01514     {
01515         std::map< std::string, Tag >& mTags        = data.tagMap;
01516         std::map< std::string, Tag >::iterator mit = mTags.find( tag_name );
01517 
01518         if( mit == mTags.end() )
01519         {
01520             // add it to the map
01521             mTags[tag_name] = tagHandle;
01522             // push it to the list of tags, too
01523             *tag_index = (int)data.tagList.size();
01524             data.tagList.push_back( tagHandle );
01525         }
01526 
01527         return moab::MB_SUCCESS;  // OK, we found it, and we have it stored in the map tag
01528     }
01529     else if( MB_SUCCESS == rval )
01530     {
01531         data.tagMap[tag_name] = tagHandle;
01532         *tag_index            = (int)data.tagList.size();
01533         data.tagList.push_back( tagHandle );
01534         return moab::MB_SUCCESS;
01535     }
01536     else
01537         return moab::MB_FAILURE;  // some error, maybe the tag was not created
01538 }
01539 
01540 ErrCode iMOAB_SetIntTagStorage( iMOAB_AppID pid,
01541                                 const iMOAB_String tag_storage_name,
01542                                 int* num_tag_storage_length,
01543                                 int* ent_type,
01544                                 int* tag_storage_data )
01545 {
01546     std::string tag_name( tag_storage_name );
01547 
01548     // Get the application data
01549     appData& data = context.appDatas[*pid];
01550 
01551     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01552     {
01553         return moab::MB_FAILURE;
01554     }  // tag not defined
01555 
01556     Tag tag = data.tagMap[tag_name];
01557 
01558     int tagLength  = 0;
01559     ErrorCode rval = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01560 
01561     DataType dtype;
01562     rval = context.MBI->tag_get_data_type( tag, dtype );
01563 
01564     if( MB_SUCCESS != rval || dtype != MB_TYPE_INTEGER )
01565     {
01566         return moab::MB_FAILURE;
01567     }
01568 
01569     // set it on a subset of entities, based on type and length
01570     Range* ents_to_set;
01571 
01572     if( *ent_type == 0 )  // vertices
01573     {
01574         ents_to_set = &data.all_verts;
01575     }
01576     else  // if (*ent_type == 1) // *ent_type can be 0 (vertices) or 1 (elements)
01577     {
01578         ents_to_set = &data.primary_elems;
01579     }
01580 
01581     int nents_to_be_set = *num_tag_storage_length / tagLength;
01582 
01583     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01584     {
01585         return moab::MB_FAILURE;
01586     }  // to many entities to be set or too few
01587 
01588     // restrict the range; everything is contiguous; or not?
01589 
01590     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01591     // 1 ) );
01592     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
01593 
01594     return moab::MB_SUCCESS;  // no error
01595 }
01596 
01597 ErrCode iMOAB_GetIntTagStorage( iMOAB_AppID pid,
01598                                 const iMOAB_String tag_storage_name,
01599                                 int* num_tag_storage_length,
01600                                 int* ent_type,
01601                                 int* tag_storage_data )
01602 {
01603     ErrorCode rval;
01604     std::string tag_name( tag_storage_name );
01605 
01606     appData& data = context.appDatas[*pid];
01607 
01608     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01609     {
01610         return moab::MB_FAILURE;
01611     }  // tag not defined
01612 
01613     Tag tag = data.tagMap[tag_name];
01614 
01615     int tagLength = 0;
01616     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01617 
01618     DataType dtype;
01619     rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01620 
01621     if( dtype != MB_TYPE_INTEGER )
01622     {
01623         return moab::MB_FAILURE;
01624     }
01625 
01626     // set it on a subset of entities, based on type and length
01627     Range* ents_to_get;
01628 
01629     if( *ent_type == 0 )  // vertices
01630     {
01631         ents_to_get = &data.all_verts;
01632     }
01633     else  // if (*ent_type == 1)
01634     {
01635         ents_to_get = &data.primary_elems;
01636     }
01637 
01638     int nents_to_get = *num_tag_storage_length / tagLength;
01639 
01640     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01641     {
01642         return moab::MB_FAILURE;
01643     }  // to many entities to get, or too little
01644 
01645     // restrict the range; everything is contiguous; or not?
01646     // Range contig_range ( * ( ents_to_get->begin() ), * ( ents_to_get->begin() + nents_to_get - 1
01647     // ) );
01648 
01649     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
01650 
01651     return moab::MB_SUCCESS;  // no error
01652 }
01653 
01654 ErrCode iMOAB_SetDoubleTagStorage( iMOAB_AppID pid,
01655                                    const iMOAB_String tag_storage_name,
01656                                    int* num_tag_storage_length,
01657                                    int* ent_type,
01658                                    double* tag_storage_data )
01659 {
01660     ErrorCode rval;
01661     // exactly the same code as for int tag :) maybe should check the type of tag too
01662     std::string tag_name( tag_storage_name );
01663 
01664     appData& data = context.appDatas[*pid];
01665 
01666     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01667     {
01668         return moab::MB_FAILURE;
01669     }  // tag not defined
01670 
01671     Tag tag = data.tagMap[tag_name];
01672 
01673     int tagLength = 0;
01674     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01675 
01676     DataType dtype;
01677     rval = context.MBI->tag_get_data_type( tag, dtype );
01678 
01679     if( MB_SUCCESS != rval || dtype != MB_TYPE_DOUBLE )
01680     {
01681         return moab::MB_FAILURE;
01682     }
01683 
01684     // set it on a subset of entities, based on type and length
01685     Range* ents_to_set = NULL;
01686 
01687     if( *ent_type == 0 )  // vertices
01688     {
01689         ents_to_set = &data.all_verts;
01690     }
01691     else if( *ent_type == 1 )
01692     {
01693         ents_to_set = &data.primary_elems;
01694     }
01695 
01696     int nents_to_be_set = *num_tag_storage_length / tagLength;
01697 
01698     if( nents_to_be_set > (int)ents_to_set->size() || nents_to_be_set < 1 )
01699     {
01700         return moab::MB_FAILURE;
01701     }  // to many entities to be set
01702 
01703     // restrict the range; everything is contiguous; or not?
01704     // Range contig_range ( * ( ents_to_set->begin() ), * ( ents_to_set->begin() + nents_to_be_set -
01705     // 1 ) );
01706 
01707     rval = context.MBI->tag_set_data( tag, *ents_to_set, tag_storage_data );MB_CHK_ERR( rval );
01708 
01709     return moab::MB_SUCCESS;  // no error
01710 }
01711 
01712 ErrCode iMOAB_GetDoubleTagStorage( iMOAB_AppID pid,
01713                                    const iMOAB_String tag_storage_name,
01714                                    int* num_tag_storage_length,
01715                                    int* ent_type,
01716                                    double* tag_storage_data )
01717 {
01718     ErrorCode rval;
01719     // exactly the same code, except tag type check
01720     std::string tag_name( tag_storage_name );
01721 
01722     appData& data = context.appDatas[*pid];
01723 
01724     if( data.tagMap.find( tag_name ) == data.tagMap.end() )
01725     {
01726         return moab::MB_FAILURE;
01727     }  // tag not defined
01728 
01729     Tag tag = data.tagMap[tag_name];
01730 
01731     int tagLength = 0;
01732     rval          = context.MBI->tag_get_length( tag, tagLength );MB_CHK_ERR( rval );
01733 
01734     DataType dtype;
01735     rval = context.MBI->tag_get_data_type( tag, dtype );MB_CHK_ERR( rval );
01736 
01737     if( dtype != MB_TYPE_DOUBLE )
01738     {
01739         return moab::MB_FAILURE;
01740     }
01741 
01742     // set it on a subset of entities, based on type and length
01743     Range* ents_to_get = NULL;
01744 
01745     if( *ent_type == 0 )  // vertices
01746     {
01747         ents_to_get = &data.all_verts;
01748     }
01749     else if( *ent_type == 1 )
01750     {
01751         ents_to_get = &data.primary_elems;
01752     }
01753 
01754     int nents_to_get = *num_tag_storage_length / tagLength;
01755 
01756     if( nents_to_get > (int)ents_to_get->size() || nents_to_get < 1 )
01757     {
01758         return moab::MB_FAILURE;
01759     }  // to many entities to get
01760 
01761     // restrict the range; everything is contiguous; or not?
01762     // Range contig_range( *( ents_to_get->begin() ), *( ents_to_get->begin() + nents_to_get - 1 ) );
01763     rval = context.MBI->tag_get_data( tag, *ents_to_get, tag_storage_data );MB_CHK_ERR( rval );
01764 
01765     return moab::MB_SUCCESS;  // no error
01766 }
01767 
01768 ErrCode iMOAB_SynchronizeTags( iMOAB_AppID pid, int* num_tag, int* tag_indices, int* ent_type )
01769 {
01770 #ifdef MOAB_HAVE_MPI
01771     appData& data = context.appDatas[*pid];
01772     Range ent_exchange;
01773     std::vector< Tag > tags;
01774 
01775     for( int i = 0; i < *num_tag; i++ )
01776     {
01777         if( tag_indices[i] < 0 || tag_indices[i] >= (int)data.tagList.size() )
01778         {
01779             return moab::MB_FAILURE;
01780         }  // error in tag index
01781 
01782         tags.push_back( data.tagList[tag_indices[i]] );
01783     }
01784 
01785     if( *ent_type == 0 )
01786     {
01787         ent_exchange = data.all_verts;
01788     }
01789     else if( *ent_type == 1 )
01790     {
01791         ent_exchange = data.primary_elems;
01792     }
01793     else
01794     {
01795         return moab::MB_FAILURE;
01796     }  // unexpected type
01797 
01798     ParallelComm* pco = context.pcomms[*pid];
01799 
01800     ErrorCode rval = pco->exchange_tags( tags, tags, ent_exchange );MB_CHK_ERR( rval );
01801 
01802 #else
01803     /* do nothing if serial */
01804     // just silence the warning
01805     // do not call sync tags in serial!
01806     int k = *pid + *num_tag + *tag_indices + *ent_type;
01807     k++;
01808 #endif
01809 
01810     return moab::MB_SUCCESS;
01811 }
01812 
01813 ErrCode iMOAB_ReduceTagsMax( iMOAB_AppID pid, int* tag_index, int* ent_type )
01814 {
01815 #ifdef MOAB_HAVE_MPI
01816     appData& data = context.appDatas[*pid];
01817     Range ent_exchange;
01818 
01819     if( *tag_index < 0 || *tag_index >= (int)data.tagList.size() )
01820     {
01821         return moab::MB_FAILURE;
01822     }  // error in tag index
01823 
01824     Tag tagh = data.tagList[*tag_index];
01825 
01826     if( *ent_type == 0 )
01827     {
01828         ent_exchange = data.all_verts;
01829     }
01830     else if( *ent_type == 1 )
01831     {
01832         ent_exchange = data.primary_elems;
01833     }
01834     else
01835     {
01836         return moab::MB_FAILURE;
01837     }  // unexpected type
01838 
01839     ParallelComm* pco = context.pcomms[*pid];
01840     // we could do different MPI_Op; do not bother now, we will call from fortran
01841     ErrorCode rval = pco->reduce_tags( tagh, MPI_MAX, ent_exchange );MB_CHK_ERR( rval );
01842 
01843 #else
01844     /* do nothing if serial */
01845     // just silence the warning
01846     // do not call sync tags in serial!
01847     int k = *pid + *tag_index + *ent_type;
01848     k++;  // just do junk, to avoid complaints
01849 #endif
01850     return moab::MB_SUCCESS;
01851 }
01852 
01853 ErrCode iMOAB_GetNeighborElements( iMOAB_AppID pid,
01854                                    iMOAB_LocalID* local_index,
01855                                    int* num_adjacent_elements,
01856                                    iMOAB_LocalID* adjacent_element_IDs )
01857 {
01858     ErrorCode rval;
01859 
01860     // one neighbor for each subentity of dimension-1
01861     MeshTopoUtil mtu( context.MBI );
01862     appData& data   = context.appDatas[*pid];
01863     EntityHandle eh = data.primary_elems[*local_index];
01864     Range adjs;
01865     rval = mtu.get_bridge_adjacencies( eh, data.dimension - 1, data.dimension, adjs );MB_CHK_ERR( rval );
01866 
01867     if( *num_adjacent_elements < (int)adjs.size() )
01868     {
01869         return moab::MB_FAILURE;
01870     }  // not dimensioned correctly
01871 
01872     *num_adjacent_elements = (int)adjs.size();
01873 
01874     for( int i = 0; i < *num_adjacent_elements; i++ )
01875     {
01876         adjacent_element_IDs[i] = data.primary_elems.index( adjs[i] );
01877     }
01878 
01879     return moab::MB_SUCCESS;
01880 }
01881 
01882 #if 0
01883 
01884 ErrCode iMOAB_GetNeighborVertices ( iMOAB_AppID pid, iMOAB_LocalID* local_vertex_ID, int* num_adjacent_vertices, iMOAB_LocalID* adjacent_vertex_IDs )
01885 {
01886     return moab::MB_SUCCESS;
01887 }
01888 
01889 #endif
01890 
01891 ErrCode iMOAB_CreateVertices( iMOAB_AppID pid, int* coords_len, int* dim, double* coordinates )
01892 {
01893     ErrorCode rval;
01894     appData& data = context.appDatas[*pid];
01895 
01896     if( !data.local_verts.empty() )  // we should have no vertices in the app
01897     {
01898         return moab::MB_FAILURE;
01899     }
01900 
01901     int nverts = *coords_len / *dim;
01902 
01903     rval = context.MBI->create_vertices( coordinates, nverts, data.local_verts );MB_CHK_ERR( rval );
01904 
01905     rval = context.MBI->add_entities( data.file_set, data.local_verts );MB_CHK_ERR( rval );
01906 
01907     // also add the vertices to the all_verts range
01908     data.all_verts.merge( data.local_verts );
01909     return moab::MB_SUCCESS;
01910 }
01911 
01912 ErrCode iMOAB_CreateElements( iMOAB_AppID pid,
01913                               int* num_elem,
01914                               int* type,
01915                               int* num_nodes_per_element,
01916                               int* connectivity,
01917                               int* block_ID )
01918 {
01919     // Create elements
01920     appData& data = context.appDatas[*pid];
01921 
01922     ReadUtilIface* read_iface;
01923     ErrorCode rval = context.MBI->query_interface( read_iface );MB_CHK_ERR( rval );
01924 
01925     EntityType mbtype = ( EntityType )( *type );
01926     EntityHandle actual_start_handle;
01927     EntityHandle* array = NULL;
01928     rval = read_iface->get_element_connect( *num_elem, *num_nodes_per_element, mbtype, 1, actual_start_handle, array );MB_CHK_ERR( rval );
01929 
01930     // fill up with actual connectivity from input; assume the vertices are in order, and start
01931     // vertex is the first in the current data vertex range
01932     EntityHandle firstVertex = data.local_verts[0];
01933 
01934     for( int j = 0; j < *num_elem * ( *num_nodes_per_element ); j++ )
01935     {
01936         array[j] = connectivity[j] + firstVertex - 1;
01937     }  // assumes connectivity uses 1 based array (from fortran, mostly)
01938 
01939     Range new_elems( actual_start_handle, actual_start_handle + *num_elem - 1 );
01940 
01941     rval = context.MBI->add_entities( data.file_set, new_elems );MB_CHK_ERR( rval );
01942 
01943     data.primary_elems.merge( new_elems );
01944 
01945     // add to adjacency
01946     rval = read_iface->update_adjacencies( actual_start_handle, *num_elem, *num_nodes_per_element, array );MB_CHK_ERR( rval );
01947     // organize all new elements in block, with the given block ID; if the block set is not
01948     // existing, create  a new mesh set;
01949     Range sets;
01950     int set_no            = *block_ID;
01951     const void* setno_ptr = &set_no;
01952     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &context.material_tag, &setno_ptr, 1,
01953                                                       sets );
01954     EntityHandle block_set;
01955 
01956     if( MB_FAILURE == rval || sets.empty() )
01957     {
01958         // create a new set, with this block ID
01959         rval = context.MBI->create_meshset( MESHSET_SET, block_set );MB_CHK_ERR( rval );
01960 
01961         rval = context.MBI->tag_set_data( context.material_tag, &block_set, 1, &set_no );MB_CHK_ERR( rval );
01962 
01963         // add the material set to file set
01964         rval = context.MBI->add_entities( data.file_set, &block_set, 1 );MB_CHK_ERR( rval );
01965     }
01966     else
01967     {
01968         block_set = sets[0];
01969     }  // first set is the one we want
01970 
01971     /// add the new ents to the clock set
01972     rval = context.MBI->add_entities( block_set, new_elems );MB_CHK_ERR( rval );
01973 
01974     return moab::MB_SUCCESS;
01975 }
01976 
01977 ErrCode iMOAB_SetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
01978 {
01979     appData& data            = context.appDatas[*pid];
01980     data.num_global_vertices = *num_global_verts;
01981     data.num_global_elements = *num_global_elems;
01982     return moab::MB_SUCCESS;
01983 }
01984 
01985 ErrCode iMOAB_GetGlobalInfo( iMOAB_AppID pid, int* num_global_verts, int* num_global_elems )
01986 {
01987     appData& data = context.appDatas[*pid];
01988     if( NULL != num_global_verts )
01989     {
01990         *num_global_verts = data.num_global_vertices;
01991     }
01992     if( NULL != num_global_elems )
01993     {
01994         *num_global_elems = data.num_global_elements;
01995     }
01996 
01997     return moab::MB_SUCCESS;
01998 }
01999 
02000 #ifdef MOAB_HAVE_MPI
02001 
02002 // this makes sense only for parallel runs
02003 ErrCode iMOAB_ResolveSharedEntities( iMOAB_AppID pid, int* num_verts, int* marker )
02004 {
02005     appData& data     = context.appDatas[*pid];
02006     ParallelComm* pco = context.pcomms[*pid];
02007     EntityHandle cset = data.file_set;
02008     int dum_id        = 0;
02009     ErrorCode rval;
02010     if( data.primary_elems.empty() )
02011     {
02012         // skip actual resolve, assume vertices are distributed already ,
02013         // no need to share them
02014     }
02015     else
02016     {
02017         // create an integer tag for resolving ; maybe it can be a long tag in the future
02018         // (more than 2 B vertices;)
02019 
02020         Tag stag;
02021         rval = context.MBI->tag_get_handle( "__sharedmarker", 1, MB_TYPE_INTEGER, stag, MB_TAG_CREAT | MB_TAG_DENSE,
02022                                             &dum_id );MB_CHK_ERR( rval );
02023 
02024         if( *num_verts > (int)data.local_verts.size() )
02025         {
02026             return moab::MB_FAILURE;
02027         }  // we are not setting the size
02028 
02029         rval = context.MBI->tag_set_data( stag, data.local_verts, (void*)marker );MB_CHK_ERR( rval );  // assumes integer tag
02030 
02031         rval = pco->resolve_shared_ents( cset, -1, -1, &stag );MB_CHK_ERR( rval );
02032 
02033         rval = context.MBI->tag_delete( stag );MB_CHK_ERR( rval );
02034     }
02035     // provide partition tag equal to rank
02036     Tag part_tag;
02037     dum_id = -1;
02038     rval   = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02039                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02040 
02041     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02042     {
02043         std::cout << " can't get par part tag.\n";
02044         return moab::MB_FAILURE;
02045     }
02046 
02047     int rank = pco->rank();
02048     rval     = context.MBI->tag_set_data( part_tag, &cset, 1, &rank );MB_CHK_ERR( rval );
02049 
02050     return moab::MB_SUCCESS;
02051 }
02052 
02053 // this assumes that this was not called before
02054 ErrCode iMOAB_DetermineGhostEntities( iMOAB_AppID pid, int* ghost_dim, int* num_ghost_layers, int* bridge_dim )
02055 {
02056     ErrorCode rval;
02057 
02058     // verify we have valid ghost layers input specified. If invalid, exit quick.
02059     if( *num_ghost_layers <= 0 )
02060     {
02061         return moab::MB_SUCCESS;
02062     }  // nothing to do
02063 
02064     appData& data     = context.appDatas[*pid];
02065     ParallelComm* pco = context.pcomms[*pid];
02066 
02067     int addl_ents =
02068         0;  // maybe we should be passing this too; most of the time we do not need additional ents collective call
02069     rval =
02070         pco->exchange_ghost_cells( *ghost_dim, *bridge_dim, *num_ghost_layers, addl_ents, true, true, &data.file_set );MB_CHK_ERR( rval );
02071 
02072     // now re-establish all mesh info; will reconstruct mesh info, based solely on what is in the file set
02073     return iMOAB_UpdateMeshInfo( pid );
02074 }
02075 
02076 ErrCode iMOAB_SendMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* receivingGroup, int* rcompid, int* method )
02077 {
02078     assert( join != nullptr );
02079     assert( receivingGroup != nullptr );
02080     assert( rcompid != nullptr );
02081 
02082     ErrorCode rval;
02083     int ierr;
02084     appData& data     = context.appDatas[*pid];
02085     ParallelComm* pco = context.pcomms[*pid];
02086 
02087     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02088     MPI_Group recvGroup =
02089         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( receivingGroup ) ) : *receivingGroup );
02090     MPI_Comm sender = pco->comm();  // the sender comm is obtained from parallel comm in moab; no need to pass it along
02091     // first see what are the processors in each group; get the sender group too, from the sender communicator
02092     MPI_Group senderGroup;
02093     ierr = MPI_Comm_group( sender, &senderGroup );
02094     if( ierr != 0 ) return moab::MB_FAILURE;
02095 
02096     // instantiate the par comm graph
02097     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02098     // int coid2)
02099     ParCommGraph* cgraph =
02100         new ParCommGraph( global, senderGroup, recvGroup, context.appDatas[*pid].global_id, *rcompid );
02101     // we should search if we have another pcomm with the same comp ids in the list already
02102     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02103     context.appDatas[*pid].pgraph[*rcompid] = cgraph;
02104 
02105     int sender_rank = -1;
02106     MPI_Comm_rank( sender, &sender_rank );
02107 
02108     // decide how to distribute elements to each processor
02109     // now, get the entities on local processor, and pack them into a buffer for various processors
02110     // we will do trivial partition: first get the total number of elements from "sender"
02111     std::vector< int > number_elems_per_part;
02112     // how to distribute local elements to receiving tasks?
02113     // trivial partition: compute first the total number of elements need to be sent
02114     Range owned = context.appDatas[*pid].owned_elems;
02115     if( owned.size() == 0 )
02116     {
02117         // must be vertices that we want to send then
02118         owned = context.appDatas[*pid].local_verts;
02119         // we should have some vertices here
02120     }
02121 
02122     if( *method == 0 )  // trivial partitioning, old method
02123     {
02124         int local_owned_elem = (int)owned.size();
02125         int size             = pco->size();
02126         int rank             = pco->rank();
02127         number_elems_per_part.resize( size );  //
02128         number_elems_per_part[rank] = local_owned_elem;
02129 #if( MPI_VERSION >= 2 )
02130         // Use "in place" option
02131         ierr = MPI_Allgather( MPI_IN_PLACE, 0, MPI_DATATYPE_NULL, &number_elems_per_part[0], 1, MPI_INT, sender );
02132 #else
02133         {
02134             std::vector< int > all_tmp( size );
02135             ierr = MPI_Allgather( &number_elems_per_part[rank], 1, MPI_INT, &all_tmp[0], 1, MPI_INT, sender );
02136             number_elems_per_part = all_tmp;
02137         }
02138 #endif
02139 
02140         if( ierr != 0 )
02141         {
02142             return moab::MB_FAILURE;
02143         }
02144 
02145         // every sender computes the trivial partition, it is cheap, and we need to send it anyway
02146         // to each sender
02147         rval = cgraph->compute_trivial_partition( number_elems_per_part );MB_CHK_ERR( rval );
02148 
02149         rval = cgraph->send_graph( global );MB_CHK_ERR( rval );
02150     }
02151     else  // *method != 0, so it is either graph or geometric, parallel
02152     {
02153         // owned are the primary elements on this app
02154         rval = cgraph->compute_partition( pco, owned, *method );MB_CHK_ERR( rval );
02155 
02156         // basically, send the graph to the receiver side, with unblocking send
02157         rval = cgraph->send_graph_partition( pco, global );MB_CHK_ERR( rval );
02158     }
02159     // pco is needed to pack, not for communication
02160     rval = cgraph->send_mesh_parts( global, pco, owned );MB_CHK_ERR( rval );
02161 
02162     // mark for deletion
02163     MPI_Group_free( &senderGroup );
02164     return moab::MB_SUCCESS;
02165 }
02166 
02167 ErrCode iMOAB_ReceiveMesh( iMOAB_AppID pid, MPI_Comm* join, MPI_Group* sendingGroup, int* scompid )
02168 {
02169     assert( join != nullptr );
02170     assert( sendingGroup != nullptr );
02171     assert( scompid != nullptr );
02172 
02173     ErrorCode rval;
02174     appData& data          = context.appDatas[*pid];
02175     ParallelComm* pco      = context.pcomms[*pid];
02176     MPI_Comm receive       = pco->comm();
02177     EntityHandle local_set = data.file_set;
02178 
02179     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02180     MPI_Group sendGroup =
02181         ( data.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( sendingGroup ) ) : *sendingGroup );
02182 
02183     // first see what are the processors in each group; get the sender group too, from the sender
02184     // communicator
02185     MPI_Group receiverGroup;
02186     int ierr = MPI_Comm_group( receive, &receiverGroup );CHK_MPI_ERR( ierr );
02187 
02188     // instantiate the par comm graph
02189     ParCommGraph* cgraph =
02190         new ParCommGraph( global, sendGroup, receiverGroup, *scompid, context.appDatas[*pid].global_id );
02191     // TODO we should search if we have another pcomm with the same comp ids in the list already
02192     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02193     context.appDatas[*pid].pgraph[*scompid] = cgraph;
02194 
02195     int receiver_rank = -1;
02196     MPI_Comm_rank( receive, &receiver_rank );
02197 
02198     // first, receive from sender_rank 0, the communication graph (matrix), so each receiver
02199     // knows what data to expect
02200     std::vector< int > pack_array;
02201     rval = cgraph->receive_comm_graph( global, pco, pack_array );MB_CHK_ERR( rval );
02202 
02203     // senders across for the current receiver
02204     int current_receiver = cgraph->receiver( receiver_rank );
02205 
02206     std::vector< int > senders_local;
02207     size_t n = 0;
02208 
02209     while( n < pack_array.size() )
02210     {
02211         if( current_receiver == pack_array[n] )
02212         {
02213             for( int j = 0; j < pack_array[n + 1]; j++ )
02214             {
02215                 senders_local.push_back( pack_array[n + 2 + j] );
02216             }
02217 
02218             break;
02219         }
02220 
02221         n = n + 2 + pack_array[n + 1];
02222     }
02223 
02224 #ifdef VERBOSE
02225     std::cout << " receiver " << current_receiver << " at rank " << receiver_rank << " will receive from "
02226               << senders_local.size() << " tasks: ";
02227 
02228     for( int k = 0; k < (int)senders_local.size(); k++ )
02229     {
02230         std::cout << " " << senders_local[k];
02231     }
02232 
02233     std::cout << "\n";
02234 #endif
02235 
02236     if( senders_local.empty() )
02237     {
02238         std::cout << " we do not have any senders for receiver rank " << receiver_rank << "\n";
02239     }
02240     rval = cgraph->receive_mesh( global, pco, local_set, senders_local );MB_CHK_ERR( rval );
02241 
02242     // after we are done, we could merge vertices that come from different senders, but
02243     // have the same global id
02244     Tag idtag;
02245     rval = context.MBI->tag_get_handle( "GLOBAL_ID", idtag );MB_CHK_ERR( rval );
02246 
02247     //   data.point_cloud = false;
02248     Range local_ents;
02249     rval = context.MBI->get_entities_by_handle( local_set, local_ents );MB_CHK_ERR( rval );
02250 
02251     // do not do merge if point cloud
02252     if( !local_ents.all_of_type( MBVERTEX ) )
02253     {
02254         if( (int)senders_local.size() >= 2 )  // need to remove duplicate vertices
02255         // that might come from different senders
02256         {
02257 
02258             Range local_verts = local_ents.subset_by_type( MBVERTEX );
02259             Range local_elems = subtract( local_ents, local_verts );
02260 
02261             // remove from local set the vertices
02262             rval = context.MBI->remove_entities( local_set, local_verts );MB_CHK_ERR( rval );
02263 
02264 #ifdef VERBOSE
02265             std::cout << "current_receiver " << current_receiver << " local verts: " << local_verts.size() << "\n";
02266 #endif
02267             MergeMesh mm( context.MBI );
02268 
02269             rval = mm.merge_using_integer_tag( local_verts, idtag );MB_CHK_ERR( rval );
02270 
02271             Range new_verts;  // local elems are local entities without vertices
02272             rval = context.MBI->get_connectivity( local_elems, new_verts );MB_CHK_ERR( rval );
02273 
02274 #ifdef VERBOSE
02275             std::cout << "after merging: new verts: " << new_verts.size() << "\n";
02276 #endif
02277             rval = context.MBI->add_entities( local_set, new_verts );MB_CHK_ERR( rval );
02278         }
02279     }
02280     else
02281         data.point_cloud = true;
02282 
02283     if( !data.point_cloud )
02284     {
02285         // still need to resolve shared entities (in this case, vertices )
02286         rval = pco->resolve_shared_ents( local_set, -1, -1, &idtag );MB_CHK_ERR( rval );
02287     }
02288     else
02289     {
02290         // if partition tag exists, set it to current rank; just to make it visible in VisIt
02291         Tag densePartTag;
02292         rval = context.MBI->tag_get_handle( "partition", densePartTag );
02293         if( NULL != densePartTag && MB_SUCCESS == rval )
02294         {
02295             Range local_verts;
02296             rval = context.MBI->get_entities_by_dimension( local_set, 0, local_verts );MB_CHK_ERR( rval );
02297             std::vector< int > vals;
02298             int rank = pco->rank();
02299             vals.resize( local_verts.size(), rank );
02300             rval = context.MBI->tag_set_data( densePartTag, local_verts, &vals[0] );MB_CHK_ERR( rval );
02301         }
02302     }
02303     // set the parallel partition tag
02304     Tag part_tag;
02305     int dum_id = -1;
02306     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02307                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02308 
02309     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02310     {
02311         std::cout << " can't get par part tag.\n";
02312         return moab::MB_FAILURE;
02313     }
02314 
02315     int rank = pco->rank();
02316     rval     = context.MBI->tag_set_data( part_tag, &local_set, 1, &rank );MB_CHK_ERR( rval );
02317 
02318     // populate the mesh with current data info
02319     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02320 
02321     // mark for deletion
02322     MPI_Group_free( &receiverGroup );
02323 
02324     return moab::MB_SUCCESS;
02325 }
02326 
02327 ErrCode iMOAB_SendElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02328 {
02329     appData& data                               = context.appDatas[*pid];
02330     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02331     if( mt == data.pgraph.end() )
02332     {
02333         return moab::MB_FAILURE;
02334     }
02335     ParCommGraph* cgraph = mt->second;
02336     ParallelComm* pco    = context.pcomms[*pid];
02337     Range owned          = data.owned_elems;
02338     ErrorCode rval;
02339     EntityHandle cover_set;
02340 
02341     MPI_Comm global = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02342     if( data.point_cloud )
02343     {
02344         owned = data.local_verts;
02345     }
02346 
02347     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02348     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02349     // from ints remapper
02350 #ifdef MOAB_HAVE_TEMPESTREMAP
02351     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02352     {
02353         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02354         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02355         // should still have only quads ?
02356     }
02357 #else
02358     // now, in case we are sending from intx between ocn and atm, we involve coverage set
02359     // how do I know if this receiver already participated in an intersection driven by coupler?
02360     // also, what if this was the "source" mesh in intx?
02361     // in that case, the elements might have been instantiated in the coverage set locally, the
02362     // "owned" range can be different the elements are now in tempestRemap coverage_set
02363     cover_set = cgraph->get_cover_set();  // this will be non null only for intx app ?
02364 
02365     if( 0 != cover_set )
02366     {
02367         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02368     }
02369 #endif
02370 
02371     std::string tag_name( tag_storage_name );
02372 
02373     // basically, we assume everything is defined already on the tag,
02374     //   and we can get the tags just by its name
02375     // we assume that there are separators ";" between the tag names
02376     std::vector< std::string > tagNames;
02377     std::vector< Tag > tagHandles;
02378     std::string separator( ";" );
02379     split_tag_names( tag_name, separator, tagNames );
02380     for( size_t i = 0; i < tagNames.size(); i++ )
02381     {
02382         Tag tagHandle;
02383         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02384         if( MB_SUCCESS != rval || NULL == tagHandle )
02385         {
02386             return moab::MB_FAILURE;
02387         }
02388         tagHandles.push_back( tagHandle );
02389     }
02390 
02391     // pco is needed to pack, and for moab instance, not for communication!
02392     // still use nonblocking communication, over the joint comm
02393     rval = cgraph->send_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02394     // now, send to each corr_tasks[i] tag data for corr_sizes[i] primary entities
02395 
02396     return moab::MB_SUCCESS;
02397 }
02398 
02399 ErrCode iMOAB_ReceiveElementTag( iMOAB_AppID pid, const iMOAB_String tag_storage_name, MPI_Comm* join, int* context_id )
02400 {
02401     appData& data = context.appDatas[*pid];
02402 
02403     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02404     if( mt == data.pgraph.end() )
02405     {
02406         return moab::MB_FAILURE;
02407     }
02408     ParCommGraph* cgraph = mt->second;
02409 
02410     MPI_Comm global   = ( data.is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02411     ParallelComm* pco = context.pcomms[*pid];
02412     Range owned       = data.owned_elems;
02413 
02414     // how do I know if this receiver already participated in an intersection driven by coupler?
02415     // also, what if this was the "source" mesh in intx?
02416     // in that case, the elements might have been instantiated in the coverage set locally, the
02417     // "owned" range can be different the elements are now in tempestRemap coverage_set
02418     EntityHandle cover_set = cgraph->get_cover_set();
02419     ErrorCode rval;
02420     if( 0 != cover_set )
02421     {
02422         rval = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02423     }
02424     if( data.point_cloud )
02425     {
02426         owned = data.local_verts;
02427     }
02428     // another possibility is for par comm graph to be computed from iMOAB_ComputeCommGraph, for
02429     // after atm ocn intx, from phys (case from imoab_phatm_ocn_coupler.cpp) get then the cover set
02430     // from ints remapper
02431 #ifdef MOAB_HAVE_TEMPESTREMAP
02432     if( data.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02433     {
02434         cover_set = data.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02435         rval      = context.MBI->get_entities_by_dimension( cover_set, 2, owned );MB_CHK_ERR( rval );
02436         // should still have only quads ?
02437     }
02438 #endif
02439     /*
02440      * data_intx.remapper exists though only on the intersection application
02441      *  how do we get from here ( we know the pid that receives, and the commgraph used by migrate
02442      * mesh )
02443      */
02444 
02445     std::string tag_name( tag_storage_name );
02446 
02447     // we assume that there are separators ";" between the tag names
02448     std::vector< std::string > tagNames;
02449     std::vector< Tag > tagHandles;
02450     std::string separator( ";" );
02451     split_tag_names( tag_name, separator, tagNames );
02452     for( size_t i = 0; i < tagNames.size(); i++ )
02453     {
02454         Tag tagHandle;
02455         rval = context.MBI->tag_get_handle( tagNames[i].c_str(), tagHandle );
02456         if( MB_SUCCESS != rval || NULL == tagHandle )
02457         {
02458             return moab::MB_FAILURE;
02459         }
02460         tagHandles.push_back( tagHandle );
02461     }
02462 
02463 #ifdef VERBOSE
02464     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name
02465               << " and file set = " << ( data.file_set ) << "\n";
02466 #endif
02467     // pco is needed to pack, and for moab instance, not for communication!
02468     // still use nonblocking communication
02469     rval = cgraph->receive_tag_values( global, pco, owned, tagHandles );MB_CHK_ERR( rval );
02470 
02471 #ifdef VERBOSE
02472     std::cout << pco->rank() << ". Looking to receive data for tags: " << tag_name << "\n";
02473 #endif
02474 
02475     return moab::MB_SUCCESS;
02476 }
02477 
02478 ErrCode iMOAB_FreeSenderBuffers( iMOAB_AppID pid, int* context_id )
02479 {
02480     // need first to find the pgraph that holds the information we need
02481     // this will be called on sender side only
02482     appData& data                               = context.appDatas[*pid];
02483     std::map< int, ParCommGraph* >::iterator mt = data.pgraph.find( *context_id );
02484     if( mt == data.pgraph.end() ) return moab::MB_FAILURE;  // error
02485 
02486     mt->second->release_send_buffers();
02487     return moab::MB_SUCCESS;
02488 }
02489 
02490 /**
02491 \brief compute a comm graph between 2 moab apps, based on ID matching
02492 <B>Operations:</B> Collective
02493 */
02494 //#define VERBOSE
02495 ErrCode iMOAB_ComputeCommGraph( iMOAB_AppID pid1,
02496                                 iMOAB_AppID pid2,
02497                                 MPI_Comm* join,
02498                                 MPI_Group* group1,
02499                                 MPI_Group* group2,
02500                                 int* type1,
02501                                 int* type2,
02502                                 int* comp1,
02503                                 int* comp2 )
02504 {
02505     assert( join );
02506     assert( group1 );
02507     assert( group2 );
02508     ErrorCode rval = MB_SUCCESS;
02509     int localRank = 0, numProcs = 1;
02510 
02511     appData& sData = context.appDatas[*pid1];
02512     appData& tData = context.appDatas[*pid2];
02513 
02514     MPI_Comm global =
02515         ( ( sData.is_fortran || tData.is_fortran ) ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02516     MPI_Group srcGroup = ( sData.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group1 ) ) : *group1 );
02517     MPI_Group tgtGroup = ( tData.is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( group2 ) ) : *group2 );
02518 
02519     MPI_Comm_rank( global, &localRank );
02520     MPI_Comm_size( global, &numProcs );
02521     // instantiate the par comm graph
02522     // ParCommGraph::ParCommGraph(MPI_Comm joincomm, MPI_Group group1, MPI_Group group2, int coid1,
02523     // int coid2)
02524     ParCommGraph* cgraph = NULL;
02525     if( *pid1 >= 0 ) cgraph = new ParCommGraph( global, srcGroup, tgtGroup, *comp1, *comp2 );
02526     ParCommGraph* cgraph_rev = NULL;
02527     if( *pid2 >= 0 ) cgraph_rev = new ParCommGraph( global, tgtGroup, srcGroup, *comp2, *comp1 );
02528     // we should search if we have another pcomm with the same comp ids in the list already
02529     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
02530     if( *pid1 >= 0 ) sData.pgraph[*comp2] = cgraph;      // the context will be the other comp
02531     if( *pid2 >= 0 ) tData.pgraph[*comp1] = cgraph_rev;  // from 2 to 1
02532     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
02533     // model on the joint comm
02534     TupleList TLcomp1;
02535     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02536     TupleList TLcomp2;
02537     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
02538     // will push_back a new tuple, if needed
02539 
02540     TLcomp1.enableWriteAccess();
02541 
02542     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
02543     Tag gdsTag;
02544     // find the values on first cell
02545     int lenTagType1 = 1;
02546     if( 1 == *type1 || 1 == *type2 )
02547     {
02548         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
02549         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
02550     }
02551     Tag tagType2 = context.MBI->globalId_tag();
02552 
02553     std::vector< int > valuesComp1;
02554     // populate first tuple
02555     if( *pid1 >= 0 )
02556     {
02557         appData& data1     = context.appDatas[*pid1];
02558         EntityHandle fset1 = data1.file_set;
02559         // in case of tempest remap, get the coverage set
02560 #ifdef MOAB_HAVE_TEMPESTREMAP
02561         if( data1.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02562         {
02563             fset1 = data1.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02564             // should still have only quads ?
02565         }
02566 #endif
02567         Range ents_of_interest;
02568         if( *type1 == 1 )
02569         {
02570             assert( gdsTag );
02571             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02572             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
02573             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
02574         }
02575         else if( *type1 == 2 )
02576         {
02577             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02578             valuesComp1.resize( ents_of_interest.size() );
02579             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02580         }
02581         else if( *type1 == 3 )  // for FV meshes, just get the global id of cell
02582         {
02583             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
02584             valuesComp1.resize( ents_of_interest.size() );
02585             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
02586         }
02587         else
02588         {
02589             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
02590         }
02591         // now fill the tuple list with info and markers
02592         // because we will send only the ids, order and compress the list
02593         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
02594         TLcomp1.resize( uniq.size() );
02595         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02596         {
02597             // to proc, marker, element local index, index in el
02598             int marker               = *sit;
02599             int to_proc              = marker % numProcs;
02600             int n                    = TLcomp1.get_n();
02601             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
02602             TLcomp1.vi_wr[2 * n + 1] = marker;
02603             TLcomp1.inc_n();
02604         }
02605     }
02606 
02607     ProcConfig pc( global );  // proc config does the crystal router
02608     pc.crystal_router()->gs_transfer( 1, TLcomp1,
02609                                       0 );  // communication towards joint tasks, with markers
02610     // sort by value (key 1)
02611 #ifdef VERBOSE
02612     std::stringstream ff1;
02613     ff1 << "TLcomp1_" << localRank << ".txt";
02614     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02615 #endif
02616     moab::TupleList::buffer sort_buffer;
02617     sort_buffer.buffer_init( TLcomp1.get_n() );
02618     TLcomp1.sort( 1, &sort_buffer );
02619     sort_buffer.reset();
02620 #ifdef VERBOSE
02621     // after sorting
02622     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
02623 #endif
02624     // do the same, for the other component, number2, with type2
02625     // start copy
02626     TLcomp2.enableWriteAccess();
02627     // populate second tuple
02628     std::vector< int > valuesComp2;
02629     if( *pid2 >= 0 )
02630     {
02631         appData& data2     = context.appDatas[*pid2];
02632         EntityHandle fset2 = data2.file_set;
02633         // in case of tempest remap, get the coverage set
02634 #ifdef MOAB_HAVE_TEMPESTREMAP
02635         if( data2.tempestData.remapper != NULL )  // this is the case this is part of intx;;
02636         {
02637             fset2 = data2.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
02638             // should still have only quads ?
02639         }
02640 #endif
02641 
02642         Range ents_of_interest;
02643         if( *type2 == 1 )
02644         {
02645             assert( gdsTag );
02646             rval = context.MBI->get_entities_by_type( fset2, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
02647             valuesComp2.resize( ents_of_interest.size() * lenTagType1 );
02648             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );
02649         }
02650         else if( *type2 == 2 )
02651         {
02652             rval = context.MBI->get_entities_by_type( fset2, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
02653             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02654             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02655         }
02656         else if( *type2 == 3 )
02657         {
02658             rval = context.MBI->get_entities_by_dimension( fset2, 2, ents_of_interest );MB_CHK_ERR( rval );
02659             valuesComp2.resize( ents_of_interest.size() );  // stride is 1 here
02660             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp2[0] );MB_CHK_ERR( rval );  // just global ids
02661         }
02662         else
02663         {
02664             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2
02665         }
02666         // now fill the tuple list with info and markers
02667         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
02668         TLcomp2.resize( uniq.size() );
02669         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
02670         {
02671             // to proc, marker, element local index, index in el
02672             int marker               = *sit;
02673             int to_proc              = marker % numProcs;
02674             int n                    = TLcomp2.get_n();
02675             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
02676             TLcomp2.vi_wr[2 * n + 1] = marker;
02677             TLcomp2.inc_n();
02678         }
02679     }
02680     pc.crystal_router()->gs_transfer( 1, TLcomp2,
02681                                       0 );  // communication towards joint tasks, with markers
02682     // sort by value (key 1)
02683 #ifdef VERBOSE
02684     std::stringstream ff2;
02685     ff2 << "TLcomp2_" << localRank << ".txt";
02686     TLcomp2.print_to_file( ff2.str().c_str() );
02687 #endif
02688     sort_buffer.buffer_reserve( TLcomp2.get_n() );
02689     TLcomp2.sort( 1, &sort_buffer );
02690     sort_buffer.reset();
02691     // end copy
02692 #ifdef VERBOSE
02693     TLcomp2.print_to_file( ff2.str().c_str() );
02694 #endif
02695     // need to send back the info, from the rendezvous point, for each of the values
02696     /* so go over each value, on local process in joint communicator
02697 
02698     now have to send back the info needed for communication;
02699      loop in in sync over both TLComp1 and TLComp2, in local process;
02700       So, build new tuple lists, to send synchronous communication
02701       populate them at the same time, based on marker, that is indexed
02702     */
02703 
02704     TupleList TLBackToComp1;
02705     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
02706     TLBackToComp1.enableWriteAccess();
02707 
02708     TupleList TLBackToComp2;
02709     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
02710     TLBackToComp2.enableWriteAccess();
02711 
02712     int n1 = TLcomp1.get_n();
02713     int n2 = TLcomp2.get_n();
02714 
02715     int indexInTLComp1 = 0;
02716     int indexInTLComp2 = 0;  // advance both, according to the marker
02717     if( n1 > 0 && n2 > 0 )
02718     {
02719 
02720         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
02721         {
02722             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
02723             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
02724             if( currentValue1 < currentValue2 )
02725             {
02726                 // we have a big problem; basically, we are saying that
02727                 // dof currentValue is on one model and not on the other
02728                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
02729                 indexInTLComp1++;
02730                 continue;
02731             }
02732             if( currentValue1 > currentValue2 )
02733             {
02734                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
02735                 indexInTLComp2++;
02736                 continue;
02737             }
02738             int size1 = 1;
02739             int size2 = 1;
02740             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
02741                 size1++;
02742             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
02743                 size2++;
02744             // must be found in both lists, find the start and end indices
02745             for( int i1 = 0; i1 < size1; i1++ )
02746             {
02747                 for( int i2 = 0; i2 < size2; i2++ )
02748                 {
02749                     // send the info back to components
02750                     int n = TLBackToComp1.get_n();
02751                     TLBackToComp1.reserve();
02752                     TLBackToComp1.vi_wr[3 * n] =
02753                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
02754                                                                      // came from, info from comp2
02755                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02756                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
02757                     n                              = TLBackToComp2.get_n();
02758                     TLBackToComp2.reserve();
02759                     TLBackToComp2.vi_wr[3 * n] =
02760                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
02761                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
02762                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
02763                     // what if there are repeated markers in TLcomp2? increase just index2
02764                 }
02765             }
02766             indexInTLComp1 += size1;
02767             indexInTLComp2 += size2;
02768         }
02769     }
02770     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
02771     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
02772 
02773     if( *pid1 >= 0 )
02774     {
02775         // we are on original comp 1 tasks
02776         // before ordering
02777         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02778         // processors it communicates with
02779 #ifdef VERBOSE
02780         std::stringstream f1;
02781         f1 << "TLBack1_" << localRank << ".txt";
02782         TLBackToComp1.print_to_file( f1.str().c_str() );
02783 #endif
02784         sort_buffer.buffer_reserve( TLBackToComp1.get_n() );
02785         TLBackToComp1.sort( 1, &sort_buffer );
02786         sort_buffer.reset();
02787 #ifdef VERBOSE
02788         TLBackToComp1.print_to_file( f1.str().c_str() );
02789 #endif
02790         // so we are now on pid1, we know now each marker were it has to go
02791         // add a new method to ParCommGraph, to set up the involved_IDs_map
02792         cgraph->settle_comm_by_ids( *comp1, TLBackToComp1, valuesComp1 );
02793     }
02794     if( *pid2 >= 0 )
02795     {
02796         // we are on original comp 1 tasks
02797         // before ordering
02798         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
02799         // processors it communicates with
02800 #ifdef VERBOSE
02801         std::stringstream f2;
02802         f2 << "TLBack2_" << localRank << ".txt";
02803         TLBackToComp2.print_to_file( f2.str().c_str() );
02804 #endif
02805         sort_buffer.buffer_reserve( TLBackToComp2.get_n() );
02806         TLBackToComp2.sort( 2, &sort_buffer );
02807         sort_buffer.reset();
02808 #ifdef VERBOSE
02809         TLBackToComp2.print_to_file( f2.str().c_str() );
02810 #endif
02811         cgraph_rev->settle_comm_by_ids( *comp2, TLBackToComp2, valuesComp2 );
02812         //
02813     }
02814     return moab::MB_SUCCESS;
02815 }
02816 
02817 //#undef VERBOSE
02818 
02819 ErrCode iMOAB_MergeVertices( iMOAB_AppID pid )
02820 {
02821     appData& data     = context.appDatas[*pid];
02822     ParallelComm* pco = context.pcomms[*pid];
02823     // collapse vertices and transform cells into triangles/quads /polys
02824     // tags we care about: area, frac, global id
02825     std::vector< Tag > tagsList;
02826     Tag tag;
02827     ErrorCode rval = context.MBI->tag_get_handle( "GLOBAL_ID", tag );
02828     if( !tag || rval != MB_SUCCESS ) return moab::MB_FAILURE;  // fatal error, abort
02829     tagsList.push_back( tag );
02830     rval = context.MBI->tag_get_handle( "area", tag );
02831     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02832     rval = context.MBI->tag_get_handle( "frac", tag );
02833     if( tag && rval == MB_SUCCESS ) tagsList.push_back( tag );
02834     double tol = 1.0e-9;
02835     rval       = IntxUtils::remove_duplicate_vertices( context.MBI, data.file_set, tol, tagsList );MB_CHK_ERR( rval );
02836 
02837     // clean material sets of cells that were deleted
02838     rval = context.MBI->get_entities_by_type_and_tag( data.file_set, MBENTITYSET, &( context.material_tag ), 0, 1,
02839                                                       data.mat_sets, Interface::UNION );MB_CHK_ERR( rval );
02840 
02841     if( !data.mat_sets.empty() )
02842     {
02843         EntityHandle matSet = data.mat_sets[0];
02844         Range elems;
02845         rval = context.MBI->get_entities_by_dimension( matSet, 2, elems );MB_CHK_ERR( rval );
02846         rval = context.MBI->remove_entities( matSet, elems );MB_CHK_ERR( rval );
02847         // put back only cells from data.file_set
02848         elems.clear();
02849         rval = context.MBI->get_entities_by_dimension( data.file_set, 2, elems );MB_CHK_ERR( rval );
02850         rval = context.MBI->add_entities( matSet, elems );MB_CHK_ERR( rval );
02851     }
02852     rval = iMOAB_UpdateMeshInfo( pid );MB_CHK_ERR( rval );
02853 
02854     ParallelMergeMesh pmm( pco, tol );
02855     rval = pmm.merge( data.file_set,
02856                       /* do not do local merge*/ false,
02857                       /*  2d cells*/ 2 );MB_CHK_ERR( rval );
02858 
02859     // assign global ids only for vertices, cells have them fine
02860     rval = pco->assign_global_ids( data.file_set, /*dim*/ 0 );MB_CHK_ERR( rval );
02861 
02862     // set the partition tag on the file set
02863     Tag part_tag;
02864     int dum_id = -1;
02865     rval       = context.MBI->tag_get_handle( "PARALLEL_PARTITION", 1, MB_TYPE_INTEGER, part_tag,
02866                                         MB_TAG_CREAT | MB_TAG_SPARSE, &dum_id );
02867 
02868     if( part_tag == NULL || ( ( rval != MB_SUCCESS ) && ( rval != MB_ALREADY_ALLOCATED ) ) )
02869     {
02870         std::cout << " can't get par part tag.\n";
02871         return moab::MB_FAILURE;
02872     }
02873 
02874     int rank = pco->rank();
02875     rval     = context.MBI->tag_set_data( part_tag, &data.file_set, 1, &rank );MB_CHK_ERR( rval );
02876 
02877     return moab::MB_SUCCESS;
02878 }
02879 
02880 #ifdef MOAB_HAVE_TEMPESTREMAP
02881 
02882 // this call must be collective on the joint communicator
02883 //  intersection tasks on coupler will need to send to the components tasks the list of
02884 // id elements that are relevant: they intersected some of the target elements (which are not needed
02885 // here)
02886 //  in the intersection
02887 ErrCode iMOAB_CoverageGraph( MPI_Comm* join,
02888                              iMOAB_AppID pid_src,
02889                              iMOAB_AppID pid_migr,
02890                              iMOAB_AppID pid_intx,
02891                              int* src_id,
02892                              int* migr_id,
02893                              int* context_id )
02894 {
02895     // first, based on the scompid and migrcomp, find the parCommGraph corresponding to this
02896     // exchange
02897     ErrorCode rval;
02898     std::vector< int > srcSenders;
02899     std::vector< int > receivers;
02900     ParCommGraph* sendGraph = NULL;
02901     int ierr;
02902     int default_context_id  = -1;
02903     bool is_fortran_context = false;
02904 
02905     // First, find the original graph between PE sets
02906     // And based on this one, we will build the newly modified one for coverage
02907     if( *pid_src >= 0 )
02908     {
02909         default_context_id = *migr_id;  // the other one
02910         assert( context.appDatas[*pid_src].global_id == *src_id );
02911         is_fortran_context = context.appDatas[*pid_src].is_fortran || is_fortran_context;
02912         sendGraph          = context.appDatas[*pid_src].pgraph[default_context_id];  // maybe check if it does not exist
02913 
02914         // report the sender and receiver tasks in the joint comm
02915         srcSenders = sendGraph->senders();
02916         receivers  = sendGraph->receivers();
02917 #ifdef VERBOSE
02918         std::cout << "senders: " << srcSenders.size() << " first sender: " << srcSenders[0] << std::endl;
02919 #endif
02920     }
02921 
02922     ParCommGraph* recvGraph = NULL;  // will be non null on receiver tasks (intx tasks)
02923     if( *pid_migr >= 0 )
02924     {
02925         is_fortran_context = context.appDatas[*pid_migr].is_fortran || is_fortran_context;
02926         // find the original one
02927         default_context_id = *src_id;
02928         assert( context.appDatas[*pid_migr].global_id == *migr_id );
02929         recvGraph = context.appDatas[*pid_migr].pgraph[default_context_id];
02930         // report the sender and receiver tasks in the joint comm, from migrated mesh pt of view
02931         srcSenders = recvGraph->senders();
02932         receivers  = recvGraph->receivers();
02933 #ifdef VERBOSE
02934         std::cout << "receivers: " << receivers.size() << " first receiver: " << receivers[0] << std::endl;
02935 #endif
02936     }
02937 
02938     if( *pid_intx >= 0 ) is_fortran_context = context.appDatas[*pid_intx].is_fortran || is_fortran_context;
02939 
02940     // loop over pid_intx elements, to see what original processors in joint comm have sent the
02941     // coverage mesh;
02942     // If we are on intx tasks, send coverage info towards original component tasks,
02943     // about needed cells
02944     TupleList TLcovIDs;
02945     TLcovIDs.initialize( 2, 0, 0, 0, 0 );  // to proc, GLOBAL ID; estimate about 100 IDs to be sent
02946     // will push_back a new tuple, if needed
02947     TLcovIDs.enableWriteAccess();
02948     // the crystal router will send ID cell to the original source, on the component task
02949     // if we are on intx tasks, loop over all intx elements and
02950 
02951     MPI_Comm global            = ( is_fortran_context ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( join ) ) : *join );
02952     int currentRankInJointComm = -1;
02953     ierr                       = MPI_Comm_rank( global, &currentRankInJointComm );CHK_MPI_ERR( ierr );
02954 
02955     // if currentRankInJointComm is in receivers list, it means that we are on intx tasks too, we
02956     // need to send information towards component tasks
02957     if( find( receivers.begin(), receivers.end(), currentRankInJointComm ) !=
02958         receivers.end() )  // we are on receivers tasks, we can request intx info
02959     {
02960         // find the pcomm for the intx pid
02961         if( *pid_intx >= (int)context.appDatas.size() ) return moab::MB_FAILURE;
02962 
02963         appData& dataIntx = context.appDatas[*pid_intx];
02964         Tag parentTag, orgSendProcTag;
02965 
02966         rval = context.MBI->tag_get_handle( "SourceParent", parentTag );MB_CHK_ERR( rval );                        // global id of the blue, source element
02967         if( !parentTag ) return moab::MB_FAILURE;  // fatal error, abort
02968 
02969         rval = context.MBI->tag_get_handle( "orig_sending_processor", orgSendProcTag );MB_CHK_ERR( rval );
02970         if( !orgSendProcTag ) return moab::MB_FAILURE;  // fatal error, abort
02971 
02972         // find the file set, red parents for intx cells, and put them in tuples
02973         EntityHandle intxSet = dataIntx.file_set;
02974         Range cells;
02975         // get all entities from the set, and look at their RedParent
02976         rval = context.MBI->get_entities_by_dimension( intxSet, 2, cells );MB_CHK_ERR( rval );
02977 
02978         std::map< int, std::set< int > > idsFromProcs;  // send that info back to enhance parCommGraph cache
02979         for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
02980         {
02981             EntityHandle intx_cell = *it;
02982             int gidCell, origProc;  // look at receivers
02983 
02984             rval = context.MBI->tag_get_data( parentTag, &intx_cell, 1, &gidCell );MB_CHK_ERR( rval );
02985             rval = context.MBI->tag_get_data( orgSendProcTag, &intx_cell, 1, &origProc );MB_CHK_ERR( rval );
02986             // we have augmented the overlap set with ghost cells ; in that case, the
02987             // orig_sending_processor is not set so it will be -1;
02988             if( origProc < 0 ) continue;
02989 
02990             std::set< int >& setInts = idsFromProcs[origProc];
02991             setInts.insert( gidCell );
02992         }
02993 
02994         // if we have no intx cells, it means we are on point clouds; quick fix just use all cells
02995         // from coverage set
02996         if( cells.empty() )
02997         {
02998             // get coverage set
02999             assert( *pid_intx >= 0 );
03000             appData& dataIntx      = context.appDatas[*pid_intx];
03001             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03002 
03003             // get all cells from coverage set
03004             Tag gidTag;
03005             rval = context.MBI->tag_get_handle( "GLOBAL_ID", gidTag );MB_CHK_ERR( rval );
03006             rval = context.MBI->get_entities_by_dimension( cover_set, 2, cells );MB_CHK_ERR( rval );
03007             // look at their orig_sending_processor
03008             for( Range::iterator it = cells.begin(); it != cells.end(); it++ )
03009             {
03010                 EntityHandle covCell = *it;
03011                 int gidCell, origProc;  // look at o
03012 
03013                 rval = context.MBI->tag_get_data( gidTag, &covCell, 1, &gidCell );MB_CHK_ERR( rval );
03014                 rval = context.MBI->tag_get_data( orgSendProcTag, &covCell, 1, &origProc );MB_CHK_ERR( rval );
03015                 // we have augmented the overlap set with ghost cells ; in that case, the
03016                 // orig_sending_processor is not set so it will be -1;
03017                 if( origProc < 0 )  // it cannot < 0, I think
03018                     continue;
03019                 std::set< int >& setInts = idsFromProcs[origProc];
03020                 setInts.insert( gidCell );
03021             }
03022         }
03023 
03024 #ifdef VERBOSE
03025         std::ofstream dbfile;
03026         std::stringstream outf;
03027         outf << "idsFromProc_0" << currentRankInJointComm << ".txt";
03028         dbfile.open( outf.str().c_str() );
03029         dbfile << "Writing this to a file.\n";
03030 
03031         dbfile << " map size:" << idsFromProcs.size()
03032                << std::endl;  // on the receiver side, these show how much data to receive
03033         // from the sender (how many ids, and how much tag data later; we need to size up the
03034         // receiver buffer) arrange in tuples , use map iterators to send the ids
03035         for( std::map< int, std::set< int > >::iterator mt = idsFromProcs.begin(); mt != idsFromProcs.end(); mt++ )
03036         {
03037             std::set< int >& setIds = mt->second;
03038             dbfile << "from id: " << mt->first << " receive " << setIds.size() << " cells \n";
03039             int counter = 0;
03040             for( std::set< int >::iterator st = setIds.begin(); st != setIds.end(); st++ )
03041             {
03042                 int valueID = *st;
03043                 dbfile << " " << valueID;
03044                 counter++;
03045                 if( counter % 10 == 0 ) dbfile << "\n";
03046             }
03047             dbfile << "\n";
03048         }
03049         dbfile.close();
03050 #endif
03051         if( NULL != recvGraph )
03052         {
03053             ParCommGraph* recvGraph1 = new ParCommGraph( *recvGraph );  // just copy
03054             recvGraph1->set_context_id( *context_id );
03055             recvGraph1->SetReceivingAfterCoverage( idsFromProcs );
03056             // this par comm graph will need to use the coverage set
03057             // so we are for sure on intx pes (the receiver is the coupler mesh)
03058             assert( *pid_intx >= 0 );
03059             appData& dataIntx      = context.appDatas[*pid_intx];
03060             EntityHandle cover_set = dataIntx.tempestData.remapper->GetMeshSet( Remapper::CoveringMesh );
03061             recvGraph1->set_cover_set( cover_set );
03062             context.appDatas[*pid_migr].pgraph[*context_id] = recvGraph1;
03063         }
03064         for( std::map< int, std::set< int > >::iterator mit = idsFromProcs.begin(); mit != idsFromProcs.end(); mit++ )
03065         {
03066             int procToSendTo       = mit->first;
03067             std::set< int >& idSet = mit->second;
03068             for( std::set< int >::iterator sit = idSet.begin(); sit != idSet.end(); sit++ )
03069             {
03070                 int n = TLcovIDs.get_n();
03071                 TLcovIDs.reserve();
03072                 TLcovIDs.vi_wr[2 * n]     = procToSendTo;  // send to processor
03073                 TLcovIDs.vi_wr[2 * n + 1] = *sit;          // global id needs index in the local_verts range
03074             }
03075         }
03076     }
03077 
03078     ProcConfig pc( global );  // proc config does the crystal router
03079     pc.crystal_router()->gs_transfer( 1, TLcovIDs,
03080                                       0 );  // communication towards component tasks, with what ids are needed
03081     // for each task from receiver
03082 
03083     // a test to know if we are on the sender tasks (original component, in this case, atmosphere)
03084     if( NULL != sendGraph )
03085     {
03086         // collect TLcovIDs tuple, will set in a local map/set, the ids that are sent to each
03087         // receiver task
03088         ParCommGraph* sendGraph1 = new ParCommGraph( *sendGraph );  // just copy
03089         sendGraph1->set_context_id( *context_id );
03090         context.appDatas[*pid_src].pgraph[*context_id] = sendGraph1;
03091         rval                                           = sendGraph1->settle_send_graph( TLcovIDs );MB_CHK_ERR( rval );
03092     }
03093     return moab::MB_SUCCESS;  // success
03094 }
03095 
03096 ErrCode iMOAB_DumpCommGraph( iMOAB_AppID pid, int* context_id, int* is_sender, const iMOAB_String prefix )
03097 {
03098     assert( prefix && strlen( prefix ) );
03099 
03100     ParCommGraph* cgraph = context.appDatas[*pid].pgraph[*context_id];
03101     std::string prefix_str( prefix );
03102 
03103     if( NULL != cgraph )
03104         cgraph->dump_comm_information( prefix_str, *is_sender );
03105     else
03106     {
03107         std::cout << " cannot find ParCommGraph on app with pid " << *pid << " name: " << context.appDatas[*pid].name
03108                   << " context: " << *context_id << "\n";
03109     }
03110     return moab::MB_SUCCESS;
03111 }
03112 
03113 #endif  // #ifdef MOAB_HAVE_TEMPESTREMAP
03114 
03115 #endif  // #ifdef MOAB_HAVE_MPI
03116 
03117 #ifdef MOAB_HAVE_TEMPESTREMAP
03118 
03119 #ifdef MOAB_HAVE_NETCDF
03120 
03121 ErrCode iMOAB_LoadMappingWeightsFromFile(
03122     iMOAB_AppID pid_intersection,
03123     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03124     const iMOAB_String remap_weights_filename )
03125 {
03126     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03127     assert( remap_weights_filename && strlen( remap_weights_filename ) );
03128 
03129     ErrorCode rval;
03130     bool row_based_partition = true;
03131 
03132     // Get the source and target data and pcomm objects
03133     appData& data_intx       = context.appDatas[*pid_intersection];
03134     TempestMapAppData& tdata = data_intx.tempestData;
03135 
03136     // Get the handle to the remapper object
03137     if( tdata.remapper == NULL )
03138     {
03139         // Now allocate and initialize the remapper object
03140 #ifdef MOAB_HAVE_MPI
03141         ParallelComm* pco = context.pcomms[*pid_intersection];
03142         tdata.remapper    = new moab::TempestRemapper( context.MBI, pco );
03143 #else
03144         tdata.remapper = new moab::TempestRemapper( context.MBI );
03145 #endif
03146         tdata.remapper->meshValidate     = true;
03147         tdata.remapper->constructEdgeMap = true;
03148 
03149         // Do not create new filesets; Use the sets from our respective applications
03150         tdata.remapper->initialize( false );
03151         // tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03152         // tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03153         tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03154     }
03155 
03156     // Setup loading of weights onto TempestOnlineMap
03157     // Set the context for the remapping weights computation
03158     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03159 
03160     // Now allocate and initialize the remapper object
03161     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03162     assert( weightMap != NULL );
03163 
03164     std::vector< int > tmp_owned_ids;  // this will do a trivial row distribution
03165     rval = weightMap->ReadParallelMap( remap_weights_filename, tmp_owned_ids, row_based_partition );MB_CHK_ERR( rval );
03166 
03167     return moab::MB_SUCCESS;
03168 }
03169 
03170 #ifdef MOAB_HAVE_MPI
03171 ErrCode iMOAB_MigrateMapMeshFortran( iMOAB_AppID pid1,
03172                                      iMOAB_AppID pid2,
03173                                      iMOAB_AppID pid3,
03174                                      int* join,
03175                                      int* group1,
03176                                      int* group2,
03177                                      int* type,
03178                                      int* comp1,
03179                                      int* comp2,
03180                                      int* direction )
03181 {
03182     MPI_Comm jcomm = MPI_Comm_f2c( (MPI_Fint)*join );
03183     MPI_Group gr1  = MPI_Group_f2c( (MPI_Fint)*group1 );
03184     MPI_Group gr2  = MPI_Group_f2c( (MPI_Fint)*group2 );
03185     return iMOAB_MigrateMapMesh( pid1, pid2, pid3, &jcomm, &gr1, &gr2, type, comp1, comp2, direction );
03186 }
03187 ErrCode iMOAB_MigrateMapMesh( iMOAB_AppID pid1,
03188                               iMOAB_AppID pid2,
03189                               iMOAB_AppID pid3,
03190                               MPI_Comm* jointcomm,
03191                               MPI_Group* groupA,
03192                               MPI_Group* groupB,
03193                               int* type,
03194                               int* comp1,
03195                               int* comp2,
03196                               int* direction )
03197 {
03198     assert( jointcomm );
03199     assert( groupA );
03200     assert( groupB );
03201 
03202     MPI_Comm joint_communicator =
03203         ( context.appDatas[*pid1].is_fortran ? MPI_Comm_f2c( *reinterpret_cast< MPI_Fint* >( jointcomm ) )
03204                                              : *jointcomm );
03205     MPI_Group group_first =
03206         ( context.appDatas[*pid1].is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupA ) ) : *groupA );
03207     MPI_Group group_second =
03208         ( context.appDatas[*pid1].is_fortran ? MPI_Group_f2c( *reinterpret_cast< MPI_Fint* >( groupB ) ) : *groupB );
03209 
03210     ErrorCode rval = MB_SUCCESS;
03211     int localRank = 0, numProcs = 1;
03212 
03213     // Get the local rank and number of processes in the joint communicator
03214     MPI_Comm_rank( joint_communicator, &localRank );
03215     MPI_Comm_size( joint_communicator, &numProcs );
03216 
03217     // Next instantiate the par comm graph
03218     // here we need to look at direction
03219     // this direction is good for atm source -> ocn target coupler example
03220     ParCommGraph* cgraph = NULL;
03221     if( *pid1 >= 0 ) cgraph = new ParCommGraph( joint_communicator, group_first, group_second, *comp1, *comp2 );
03222 
03223     ParCommGraph* cgraph_rev = NULL;
03224     if( *pid3 >= 0 ) cgraph_rev = new ParCommGraph( joint_communicator, group_second, group_first, *comp2, *comp1 );
03225 
03226     // we should search if we have another pcomm with the same comp ids in the list already
03227     // sort of check existing comm graphs in the map context.appDatas[*pid].pgraph
03228     if( *pid1 >= 0 ) context.appDatas[*pid1].pgraph[*comp2] = cgraph;      // the context will be the other comp
03229     if( *pid3 >= 0 ) context.appDatas[*pid3].pgraph[*comp1] = cgraph_rev;  // from 2 to 1
03230 
03231     // each model has a list of global ids that will need to be sent by gs to rendezvous the other
03232     // model on the joint_communicator
03233     TupleList TLcomp1;
03234     TLcomp1.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03235     TupleList TLcomp2;
03236     TLcomp2.initialize( 2, 0, 0, 0, 0 );  // to proc, marker
03237 
03238     // will push_back a new tuple, if needed
03239     TLcomp1.enableWriteAccess();
03240 
03241     // tags of interest are either GLOBAL_DOFS or GLOBAL_ID
03242     Tag gdsTag;
03243 
03244     // find the values on first cell
03245     int lenTagType1 = 1;
03246     if( *type == 1 )
03247     {
03248         rval = context.MBI->tag_get_handle( "GLOBAL_DOFS", gdsTag );MB_CHK_ERR( rval );
03249         rval = context.MBI->tag_get_length( gdsTag, lenTagType1 );MB_CHK_ERR( rval );  // usually it is 16
03250     }
03251     Tag tagType2 = context.MBI->globalId_tag();
03252     ;
03253 
03254     std::vector< int > valuesComp1;
03255 
03256     // populate first tuple
03257     Range ents_of_interest;  // will be filled with entities on pid1, that need to be distributed,
03258                              // rearranged in split_ranges map
03259     if( *pid1 >= 0 )
03260     {
03261         appData& data1     = context.appDatas[*pid1];
03262         EntityHandle fset1 = data1.file_set;
03263 
03264         if( *type == 1 )
03265         {
03266             assert( gdsTag );
03267             rval = context.MBI->get_entities_by_type( fset1, MBQUAD, ents_of_interest );MB_CHK_ERR( rval );
03268             valuesComp1.resize( ents_of_interest.size() * lenTagType1 );
03269             rval = context.MBI->tag_get_data( gdsTag, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );
03270         }
03271         else if( *type == 2 )
03272         {
03273             rval = context.MBI->get_entities_by_type( fset1, MBVERTEX, ents_of_interest );MB_CHK_ERR( rval );
03274             valuesComp1.resize( ents_of_interest.size() );
03275             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03276         }
03277         else if( *type == 3 )  // for FV meshes, just get the global id of cell
03278         {
03279             rval = context.MBI->get_entities_by_dimension( fset1, 2, ents_of_interest );MB_CHK_ERR( rval );
03280             valuesComp1.resize( ents_of_interest.size() );
03281             rval = context.MBI->tag_get_data( tagType2, ents_of_interest, &valuesComp1[0] );MB_CHK_ERR( rval );  // just global ids
03282         }
03283         else
03284         {
03285             MB_CHK_ERR( MB_FAILURE );  // we know only type 1 or 2 or 3
03286         }
03287         // now fill the tuple list with info and markers
03288         // because we will send only the ids, order and compress the list
03289         std::set< int > uniq( valuesComp1.begin(), valuesComp1.end() );
03290         TLcomp1.resize( uniq.size() );
03291         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03292         {
03293             // to proc, marker, element local index, index in el
03294             int marker               = *sit;
03295             int to_proc              = marker % numProcs;
03296             int n                    = TLcomp1.get_n();
03297             TLcomp1.vi_wr[2 * n]     = to_proc;  // send to processor
03298             TLcomp1.vi_wr[2 * n + 1] = marker;
03299             TLcomp1.inc_n();
03300         }
03301     }
03302 
03303     ProcConfig pc( joint_communicator );  // proc config does the crystal router
03304     pc.crystal_router()->gs_transfer( 1, TLcomp1,
03305                                       0 );  // communication towards joint tasks, with markers
03306     // sort by value (key 1)
03307 #ifdef VERBOSE
03308     std::stringstream ff1;
03309     ff1 << "TLcomp1_" << localRank << ".txt";
03310     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03311 #endif
03312     moab::TupleList::buffer sort_buffer;
03313     sort_buffer.buffer_init( TLcomp1.get_n() );
03314     TLcomp1.sort( 1, &sort_buffer );
03315     sort_buffer.reset();
03316 #ifdef VERBOSE
03317     // after sorting
03318     TLcomp1.print_to_file( ff1.str().c_str() );  // it will append!
03319 #endif
03320     // do the same, for the other component, number2, with type2
03321     // start copy
03322     TLcomp2.enableWriteAccess();
03323 
03324     moab::TempestOnlineMap* weightMap = NULL;  // declare it outside, but it will make sense only for *pid >= 0
03325     // we know that :) (or *pid2 >= 0, it means we are on the coupler PEs, read map exists, and coupler procs exist)
03326     // populate second tuple with ids  from read map: we need row_gdofmap and col_gdofmap
03327     std::vector< int > valuesComp2;
03328     if( *pid2 >= 0 )  // we are now on coupler, map side
03329     {
03330         appData& data2           = context.appDatas[*pid2];
03331         TempestMapAppData& tdata = data2.tempestData;
03332         // should be only one map, read from file
03333         assert( tdata.weightMaps.size() == 1 );
03334         // maybe we need to check it is the map we expect
03335         weightMap = tdata.weightMaps.begin()->second;
03336         // std::vector<int> ids_of_interest;
03337         // do a deep copy of the ids of interest: row ids or col ids, target or source direction
03338         if( *direction == 1 )
03339         {
03340             // we are interested in col ids, source
03341             // new method from moab::TempestOnlineMap
03342             rval = weightMap->fill_col_ids( valuesComp2 );MB_CHK_ERR( rval );
03343         }
03344         else if( *direction == 2 )
03345         {
03346             // we are interested in row ids, for target ids
03347             rval = weightMap->fill_row_ids( valuesComp2 );MB_CHK_ERR( rval );
03348         }
03349         //
03350 
03351         // now fill the tuple list with info and markers
03352         std::set< int > uniq( valuesComp2.begin(), valuesComp2.end() );
03353         TLcomp2.resize( uniq.size() );
03354         for( std::set< int >::iterator sit = uniq.begin(); sit != uniq.end(); sit++ )
03355         {
03356             // to proc, marker, element local index, index in el
03357             int marker               = *sit;
03358             int to_proc              = marker % numProcs;
03359             int n                    = TLcomp2.get_n();
03360             TLcomp2.vi_wr[2 * n]     = to_proc;  // send to processor
03361             TLcomp2.vi_wr[2 * n + 1] = marker;
03362             TLcomp2.inc_n();
03363         }
03364     }
03365     pc.crystal_router()->gs_transfer( 1, TLcomp2,
03366                                       0 );  // communication towards joint tasks, with markers
03367     // sort by value (key 1)
03368     // in the rendez-vous approach, markers meet at meet point (rendez-vous) processor marker % nprocs
03369 #ifdef VERBOSE
03370     std::stringstream ff2;
03371     ff2 << "TLcomp2_" << localRank << ".txt";
03372     TLcomp2.print_to_file( ff2.str().c_str() );
03373 #endif
03374     sort_buffer.buffer_reserve( TLcomp2.get_n() );
03375     TLcomp2.sort( 1, &sort_buffer );
03376     sort_buffer.reset();
03377     // end copy
03378 #ifdef VERBOSE
03379     TLcomp2.print_to_file( ff2.str().c_str() );
03380 #endif
03381     // need to send back the info, from the rendezvous point, for each of the values
03382     /* so go over each value, on local process in joint communicator,
03383 
03384     now have to send back the info needed for communication;
03385      loop in in sync over both TLComp1 and TLComp2, in local process;
03386       So, build new tuple lists, to send synchronous communication
03387       populate them at the same time, based on marker, that is indexed
03388     */
03389 
03390     TupleList TLBackToComp1;
03391     TLBackToComp1.initialize( 3, 0, 0, 0, 0 );  // to proc, marker, from proc on comp2,
03392     TLBackToComp1.enableWriteAccess();
03393 
03394     TupleList TLBackToComp2;
03395     TLBackToComp2.initialize( 3, 0, 0, 0, 0 );  // to proc, marker,  from proc,
03396     TLBackToComp2.enableWriteAccess();
03397 
03398     int n1 = TLcomp1.get_n();
03399     int n2 = TLcomp2.get_n();
03400 
03401     int indexInTLComp1 = 0;
03402     int indexInTLComp2 = 0;  // advance both, according to the marker
03403     if( n1 > 0 && n2 > 0 )
03404     {
03405 
03406         while( indexInTLComp1 < n1 && indexInTLComp2 < n2 )  // if any is over, we are done
03407         {
03408             int currentValue1 = TLcomp1.vi_rd[2 * indexInTLComp1 + 1];
03409             int currentValue2 = TLcomp2.vi_rd[2 * indexInTLComp2 + 1];
03410             if( currentValue1 < currentValue2 )
03411             {
03412                 // we have a big problem; basically, we are saying that
03413                 // dof currentValue is on one model and not on the other
03414                 // std::cout << " currentValue1:" << currentValue1 << " missing in comp2" << "\n";
03415                 indexInTLComp1++;
03416                 continue;
03417             }
03418             if( currentValue1 > currentValue2 )
03419             {
03420                 // std::cout << " currentValue2:" << currentValue2 << " missing in comp1" << "\n";
03421                 indexInTLComp2++;
03422                 continue;
03423             }
03424             int size1 = 1;
03425             int size2 = 1;
03426             while( indexInTLComp1 + size1 < n1 && currentValue1 == TLcomp1.vi_rd[2 * ( indexInTLComp1 + size1 ) + 1] )
03427                 size1++;
03428             while( indexInTLComp2 + size2 < n2 && currentValue2 == TLcomp2.vi_rd[2 * ( indexInTLComp2 + size2 ) + 1] )
03429                 size2++;
03430             // must be found in both lists, find the start and end indices
03431             for( int i1 = 0; i1 < size1; i1++ )
03432             {
03433                 for( int i2 = 0; i2 < size2; i2++ )
03434                 {
03435                     // send the info back to components
03436                     int n = TLBackToComp1.get_n();
03437                     TLBackToComp1.reserve();
03438                     TLBackToComp1.vi_wr[3 * n] =
03439                         TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // send back to the proc marker
03440                                                                      // came from, info from comp2
03441                     TLBackToComp1.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03442                     TLBackToComp1.vi_wr[3 * n + 2] = TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // from proc on comp2
03443                     n                              = TLBackToComp2.get_n();
03444                     TLBackToComp2.reserve();
03445                     TLBackToComp2.vi_wr[3 * n] =
03446                         TLcomp2.vi_rd[2 * ( indexInTLComp2 + i2 )];  // send back info to original
03447                     TLBackToComp2.vi_wr[3 * n + 1] = currentValue1;  // initial value (resend?)
03448                     TLBackToComp2.vi_wr[3 * n + 2] = TLcomp1.vi_rd[2 * ( indexInTLComp1 + i1 )];  // from proc on comp1
03449                     // what if there are repeated markers in TLcomp2? increase just index2
03450                 }
03451             }
03452             indexInTLComp1 += size1;
03453             indexInTLComp2 += size2;
03454         }
03455     }
03456     pc.crystal_router()->gs_transfer( 1, TLBackToComp1, 0 );  // communication towards original tasks, with info about
03457     pc.crystal_router()->gs_transfer( 1, TLBackToComp2, 0 );
03458 
03459     TupleList TLv;  // vertices
03460     TupleList TLc;  // cells if needed (not type 2)
03461 
03462     if( *pid1 >= 0 )
03463     {
03464         // we are on original comp 1 tasks
03465         // before ordering
03466         // now for each value in TLBackToComp1.vi_rd[3*i+1], on current proc, we know the
03467         // processors it communicates with
03468 #ifdef VERBOSE
03469         std::stringstream f1;
03470         f1 << "TLBack1_" << localRank << ".txt";
03471         TLBackToComp1.print_to_file( f1.str().c_str() );
03472 #endif
03473         // so we are now on pid1, we know now each marker were it has to go
03474         // add a new method to ParCommGraph, to set up the split_ranges and involved_IDs_map
03475         rval = cgraph->set_split_ranges( *comp1, TLBackToComp1, valuesComp1, lenTagType1, ents_of_interest, *type );MB_CHK_ERR( rval );
03476         // we can just send vertices and elements, with crystal routers;
03477         // on the receiving end, make sure they are not duplicated, by looking at the global id
03478         // if *type is 1, also send global_dofs tag in the element tuple
03479         rval = cgraph->form_tuples_to_migrate_mesh( context.MBI, TLv, TLc, *type, lenTagType1 );MB_CHK_ERR( rval );
03480     }
03481     pc.crystal_router()->gs_transfer( 1, TLv, 0 );  // communication towards coupler tasks, with mesh vertices
03482     if( *type != 2 ) pc.crystal_router()->gs_transfer( 1, TLc, 0 );  // those are cells
03483 
03484     if( *pid3 >= 0 )  // will receive the mesh, on coupler pes!
03485     {
03486         appData& data3     = context.appDatas[*pid3];
03487         EntityHandle fset3 = data3.file_set;
03488         Range primary_ents3;                 // vertices for type 2, cells of dim 2 for type 1 or 3
03489         std::vector< int > values_entities;  // will be the size of primary_ents3 * lenTagType1
03490         rval = cgraph_rev->form_mesh_from_tuples( context.MBI, TLv, TLc, *type, lenTagType1, fset3, primary_ents3,
03491                                                   values_entities );MB_CHK_ERR( rval );
03492         iMOAB_UpdateMeshInfo( pid3 );
03493         int ndofPerEl = 1;
03494         if( 1 == *type ) ndofPerEl = (int)( sqrt( lenTagType1 ) );
03495         // because we are on the coupler, we know that the read map pid2 exists
03496         assert( *pid2 >= 0 );
03497         appData& dataIntx        = context.appDatas[*pid2];
03498         TempestMapAppData& tdata = dataIntx.tempestData;
03499 
03500         // if we are on source coverage, direction 1, we can set covering mesh, covering cells
03501         if( 1 == *direction )
03502         {
03503             tdata.pid_src = pid3;
03504             tdata.remapper->SetMeshSet( Remapper::CoveringMesh, fset3, primary_ents3 );
03505             weightMap->SetSourceNDofsPerElement( ndofPerEl );
03506             weightMap->set_col_dc_dofs( values_entities );  // will set col_dtoc_dofmap
03507         }
03508         // if we are on target, we can set the target cells
03509         else
03510         {
03511             tdata.pid_dest = pid3;
03512             tdata.remapper->SetMeshSet( Remapper::TargetMesh, fset3, primary_ents3 );
03513             weightMap->SetDestinationNDofsPerElement( ndofPerEl );
03514             weightMap->set_row_dc_dofs( values_entities );  // will set row_dtoc_dofmap
03515         }
03516     }
03517 
03518     return moab::MB_SUCCESS;
03519 }
03520 
03521 #endif  // #ifdef MOAB_HAVE_MPI
03522 
03523 ErrCode iMOAB_WriteMappingWeightsToFile(
03524     iMOAB_AppID pid_intersection,
03525     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03526     const iMOAB_String remap_weights_filename )
03527 {
03528     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03529     assert( remap_weights_filename && strlen( remap_weights_filename ) );
03530 
03531     ErrorCode rval;
03532 
03533     // Get the source and target data and pcomm objects
03534     appData& data_intx       = context.appDatas[*pid_intersection];
03535     TempestMapAppData& tdata = data_intx.tempestData;
03536 
03537     // Get the handle to the remapper object
03538     assert( tdata.remapper != NULL );
03539 
03540     // Now get online weights object and ensure it is valid
03541     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03542     assert( weightMap != NULL );
03543 
03544     std::string filename = std::string( remap_weights_filename );
03545 
03546     // Write the map file to disk in parallel using either HDF5 or SCRIP interface
03547     rval = weightMap->WriteParallelMap( filename );MB_CHK_ERR( rval );
03548 
03549     return moab::MB_SUCCESS;
03550 }
03551 
03552 #endif  // #ifdef MOAB_HAVE_NETCDF
03553 
03554 #define USE_API
03555 static ErrCode ComputeSphereRadius( iMOAB_AppID pid, double* radius )
03556 {
03557     ErrorCode rval;
03558     moab::CartVect pos;
03559 
03560     Range& verts                   = context.appDatas[*pid].all_verts;
03561     moab::EntityHandle firstVertex = ( verts[0] );
03562 
03563     // coordinate data
03564     rval = context.MBI->get_coords( &( firstVertex ), 1, (double*)&( pos[0] ) );MB_CHK_ERR( rval );
03565 
03566     // compute the distance from origin
03567     // TODO: we could do this in a loop to verify if the pid represents a spherical mesh
03568     *radius = pos.length();
03569     return moab::MB_SUCCESS;
03570 }
03571 
03572 ErrCode iMOAB_ComputeMeshIntersectionOnSphere( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03573 {
03574     ErrorCode rval;
03575     ErrCode ierr;
03576     bool validate = true;
03577 
03578     double radius_source = 1.0;
03579     double radius_target = 1.0;
03580     const double epsrel  = 1e-15;
03581     const double boxeps  = 1.e-6;
03582 
03583     // Get the source and target data and pcomm objects
03584     appData& data_src  = context.appDatas[*pid_src];
03585     appData& data_tgt  = context.appDatas[*pid_tgt];
03586     appData& data_intx = context.appDatas[*pid_intx];
03587 #ifdef MOAB_HAVE_MPI
03588     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03589 #endif
03590 
03591     // Mesh intersection has already been computed; Return early.
03592     TempestMapAppData& tdata = data_intx.tempestData;
03593     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
03594 
03595     bool is_parallel = false, is_root = true;
03596     int rank = 0;
03597 #ifdef MOAB_HAVE_MPI
03598     if( pco_intx )
03599     {
03600         rank        = pco_intx->rank();
03601         is_parallel = ( pco_intx->size() > 1 );
03602         is_root     = ( rank == 0 );
03603         rval        = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
03604     }
03605 #endif
03606 
03607     moab::DebugOutput outputFormatter( std::cout, rank, 0 );
03608     outputFormatter.set_prefix( "[iMOAB_ComputeMeshIntersectionOnSphere]: " );
03609 
03610     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
03611     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
03612 
03613     // Rescale the radius of both to compute the intersection
03614     ComputeSphereRadius( pid_src, &radius_source );
03615     ComputeSphereRadius( pid_tgt, &radius_target );
03616 #ifdef VERBOSE
03617     if( is_root )
03618         outputFormatter.printf( 0, "Radius of spheres: source = %12.14f, and target = %12.14f\n", radius_source,
03619                                 radius_target );
03620 #endif
03621 
03622     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
03623      * unscale later. */
03624     bool defaultradius = 1.0;
03625     if( fabs( radius_source - radius_target ) > 1e-10 )
03626     { /* the radii are different */
03627         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, defaultradius );MB_CHK_ERR( rval );
03628         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, defaultradius );MB_CHK_ERR( rval );
03629     }
03630 
03631     // Default area_method = lHuiller; Options: Girard, GaussQuadrature (if TR is available)
03632 #ifdef MOAB_HAVE_TEMPESTREMAP
03633     IntxAreaUtils areaAdaptor( IntxAreaUtils::GaussQuadrature );
03634 #else
03635     IntxAreaUtils areaAdaptor( IntxAreaUtils::lHuiller );
03636 #endif
03637 
03638     // print verbosely about the problem setting
03639     bool use_kdtree_search = false;
03640     double srctgt_areas[2], srctgt_areas_glb[2];
03641     {
03642         moab::Range rintxverts, rintxelems;
03643         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
03644         rval = context.MBI->get_entities_by_dimension( data_src.file_set, data_src.dimension, rintxelems );MB_CHK_ERR( rval );
03645         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
03646         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );MB_CHK_ERR( rval );
03647         srctgt_areas[0] = areaAdaptor.area_on_sphere( context.MBI, data_src.file_set, defaultradius /*radius_source*/ );
03648 #ifdef VERBOSE
03649         if( is_root )
03650             outputFormatter.printf( 0, "The red set contains %d vertices and %d elements \n", rintxverts.size(),
03651                                     rintxelems.size() );
03652 #endif
03653 
03654         moab::Range bintxverts, bintxelems;
03655         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
03656         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, data_tgt.dimension, bintxelems );MB_CHK_ERR( rval );
03657         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
03658         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );MB_CHK_ERR( rval );
03659         srctgt_areas[1] = areaAdaptor.area_on_sphere( context.MBI, data_tgt.file_set, defaultradius /*radius_target*/ );
03660 #ifdef VERBOSE
03661         if( is_root )
03662             outputFormatter.printf( 0, "The blue set contains %d vertices and %d elements \n", bintxverts.size(),
03663                                     bintxelems.size() );
03664 #endif
03665 #ifdef MOAB_HAVE_MPI
03666         MPI_Allreduce( &srctgt_areas[0], &srctgt_areas_glb[0], 2, MPI_DOUBLE, MPI_SUM, pco_intx->comm() );
03667 #else
03668         srctgt_areas_glb[0] = srctgt_areas[0];
03669         srctgt_areas_glb[1] = srctgt_areas[1];
03670 #endif
03671         use_kdtree_search = ( srctgt_areas_glb[0] < srctgt_areas_glb[1] );
03672     }
03673 
03674     data_intx.dimension = data_tgt.dimension;
03675     // set the context for the source and destination applications
03676     tdata.pid_src  = pid_src;
03677     tdata.pid_dest = pid_tgt;
03678 
03679     // Now allocate and initialize the remapper object
03680 #ifdef MOAB_HAVE_MPI
03681     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03682 #else
03683     tdata.remapper = new moab::TempestRemapper( context.MBI );
03684 #endif
03685     tdata.remapper->meshValidate     = true;
03686     tdata.remapper->constructEdgeMap = true;
03687 
03688     // Do not create new filesets; Use the sets from our respective applications
03689     tdata.remapper->initialize( false );
03690     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03691     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03692     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03693 
03694     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
03695     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
03696 
03697     // First, compute the covering source set.
03698     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
03699 
03700     // Next, compute intersections with MOAB.
03701     rval = tdata.remapper->ComputeOverlapMesh( use_kdtree_search, false );MB_CHK_ERR( rval );
03702 
03703     // Mapping computation done
03704     if( validate )
03705     {
03706         double local_area,
03707             global_areas[3];  // Array for Initial area, and through Method 1 and Method 2
03708         local_area = areaAdaptor.area_on_sphere( context.MBI, data_intx.file_set, radius_source );
03709 
03710         global_areas[0] = srctgt_areas_glb[0];
03711         global_areas[1] = srctgt_areas_glb[1];
03712 
03713 #ifdef MOAB_HAVE_MPI
03714         if( is_parallel )
03715         {
03716             MPI_Reduce( &local_area, &global_areas[2], 1, MPI_DOUBLE, MPI_SUM, 0, pco_intx->comm() );
03717         }
03718         else
03719         {
03720             global_areas[2] = local_area;
03721         }
03722 #else
03723         global_areas[2] = local_area;
03724 #endif
03725         if( is_root )
03726         {
03727             outputFormatter.printf( 0,
03728                                     "initial area: source mesh = %12.14f, target mesh = %12.14f, "
03729                                     "overlap mesh = %12.14f\n",
03730                                     global_areas[0], global_areas[1], global_areas[2] );
03731             outputFormatter.printf( 0, " relative error w.r.t source = %12.14e, and target = %12.14e\n",
03732                                     fabs( global_areas[0] - global_areas[2] ) / global_areas[0],
03733                                     fabs( global_areas[1] - global_areas[2] ) / global_areas[1] );
03734         }
03735     }
03736 
03737     return moab::MB_SUCCESS;
03738 }
03739 
03740 ErrCode iMOAB_ComputePointDoFIntersection( iMOAB_AppID pid_src, iMOAB_AppID pid_tgt, iMOAB_AppID pid_intx )
03741 {
03742     ErrorCode rval;
03743     ErrCode ierr;
03744 
03745     double radius_source = 1.0;
03746     double radius_target = 1.0;
03747     const double epsrel  = 1e-15;
03748     const double boxeps  = 1.e-8;
03749 
03750     // Get the source and target data and pcomm objects
03751     appData& data_src  = context.appDatas[*pid_src];
03752     appData& data_tgt  = context.appDatas[*pid_tgt];
03753     appData& data_intx = context.appDatas[*pid_intx];
03754 #ifdef MOAB_HAVE_MPI
03755     ParallelComm* pco_intx = context.pcomms[*pid_intx];
03756 #endif
03757 
03758     // Mesh intersection has already been computed; Return early.
03759     TempestMapAppData& tdata = data_intx.tempestData;
03760     if( tdata.remapper != NULL ) return moab::MB_SUCCESS;  // nothing to do
03761 
03762 #ifdef MOAB_HAVE_MPI
03763     if( pco_intx )
03764     {
03765         rval = pco_intx->check_all_shared_handles();MB_CHK_ERR( rval );
03766     }
03767 #endif
03768 
03769     ierr = iMOAB_UpdateMeshInfo( pid_src );MB_CHK_ERR( ierr );
03770     ierr = iMOAB_UpdateMeshInfo( pid_tgt );MB_CHK_ERR( ierr );
03771 
03772     // Rescale the radius of both to compute the intersection
03773     ComputeSphereRadius( pid_src, &radius_source );
03774     ComputeSphereRadius( pid_tgt, &radius_target );
03775 
03776     IntxAreaUtils areaAdaptor;
03777     // print verbosely about the problem setting
03778     {
03779         moab::Range rintxverts, rintxelems;
03780         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 0, rintxverts );MB_CHK_ERR( rval );
03781         rval = context.MBI->get_entities_by_dimension( data_src.file_set, 2, rintxelems );MB_CHK_ERR( rval );
03782         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_src.file_set );MB_CHK_ERR( rval );
03783         rval = areaAdaptor.positive_orientation( context.MBI, data_src.file_set, radius_source );MB_CHK_ERR( rval );
03784 #ifdef VERBOSE
03785         std::cout << "The red set contains " << rintxverts.size() << " vertices and " << rintxelems.size()
03786                   << " elements \n";
03787 #endif
03788 
03789         moab::Range bintxverts, bintxelems;
03790         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 0, bintxverts );MB_CHK_ERR( rval );
03791         rval = context.MBI->get_entities_by_dimension( data_tgt.file_set, 2, bintxelems );MB_CHK_ERR( rval );
03792         rval = IntxUtils::fix_degenerate_quads( context.MBI, data_tgt.file_set );MB_CHK_ERR( rval );
03793         rval = areaAdaptor.positive_orientation( context.MBI, data_tgt.file_set, radius_target );MB_CHK_ERR( rval );
03794 #ifdef VERBOSE
03795         std::cout << "The blue set contains " << bintxverts.size() << " vertices and " << bintxelems.size()
03796                   << " elements \n";
03797 #endif
03798     }
03799 
03800     data_intx.dimension = data_tgt.dimension;
03801     // set the context for the source and destination applications
03802     // set the context for the source and destination applications
03803     tdata.pid_src         = pid_src;
03804     tdata.pid_dest        = pid_tgt;
03805     data_intx.point_cloud = ( data_src.point_cloud || data_tgt.point_cloud );
03806     assert( data_intx.point_cloud == true );
03807 
03808     // Now allocate and initialize the remapper object
03809 #ifdef MOAB_HAVE_MPI
03810     tdata.remapper = new moab::TempestRemapper( context.MBI, pco_intx );
03811 #else
03812     tdata.remapper = new moab::TempestRemapper( context.MBI );
03813 #endif
03814     tdata.remapper->meshValidate     = true;
03815     tdata.remapper->constructEdgeMap = true;
03816 
03817     // Do not create new filesets; Use the sets from our respective applications
03818     tdata.remapper->initialize( false );
03819     tdata.remapper->GetMeshSet( moab::Remapper::SourceMesh )  = data_src.file_set;
03820     tdata.remapper->GetMeshSet( moab::Remapper::TargetMesh )  = data_tgt.file_set;
03821     tdata.remapper->GetMeshSet( moab::Remapper::OverlapMesh ) = data_intx.file_set;
03822 
03823     /* Let make sure that the radius match for source and target meshes. If not, rescale now and
03824      * unscale later. */
03825     if( fabs( radius_source - radius_target ) > 1e-10 )
03826     { /* the radii are different */
03827         rval = IntxUtils::ScaleToRadius( context.MBI, data_src.file_set, 1.0 );MB_CHK_ERR( rval );
03828         rval = IntxUtils::ScaleToRadius( context.MBI, data_tgt.file_set, 1.0 );MB_CHK_ERR( rval );
03829     }
03830 
03831     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::SourceMesh );MB_CHK_ERR( rval );
03832     rval = tdata.remapper->ConvertMeshToTempest( moab::Remapper::TargetMesh );MB_CHK_ERR( rval );
03833 
03834     // First, compute the covering source set.
03835     rval = tdata.remapper->ConstructCoveringSet( epsrel, 1.0, 1.0, boxeps, false );MB_CHK_ERR( rval );
03836 
03837 #ifdef MOAB_HAVE_MPI
03838     /* VSM: This context should be set on the data_src but it would overwrite the source
03839        covering set context in case it is coupled to another APP as well.
03840        This needs a redesign. */
03841     // data_intx.covering_set = tdata.remapper->GetCoveringSet();
03842     // data_src.covering_set = tdata.remapper->GetCoveringSet();
03843 #endif
03844 
03845     // Now let us re-convert the MOAB mesh back to Tempest representation
03846     rval = tdata.remapper->ComputeGlobalLocalMaps();MB_CHK_ERR( rval );
03847 
03848     return moab::MB_SUCCESS;
03849 }
03850 
03851 ErrCode iMOAB_ComputeScalarProjectionWeights(
03852     iMOAB_AppID pid_intx,
03853     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03854     const iMOAB_String disc_method_source,
03855     int* disc_order_source,
03856     const iMOAB_String disc_method_target,
03857     int* disc_order_target,
03858     int* fNoBubble,
03859     int* fMonotoneTypeID,
03860     int* fVolumetric,
03861     int* fNoConservation,
03862     int* fValidate,
03863     const iMOAB_String source_solution_tag_dof_name,
03864     const iMOAB_String target_solution_tag_dof_name )
03865 {
03866     moab::ErrorCode rval;
03867 
03868     assert( disc_order_source && disc_order_target && *disc_order_source > 0 && *disc_order_target > 0 );
03869     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03870     assert( disc_method_source && strlen( disc_method_source ) );
03871     assert( disc_method_target && strlen( disc_method_target ) );
03872     assert( source_solution_tag_dof_name && strlen( source_solution_tag_dof_name ) );
03873     assert( target_solution_tag_dof_name && strlen( target_solution_tag_dof_name ) );
03874 
03875     // Get the source and target data and pcomm objects
03876     appData& data_intx       = context.appDatas[*pid_intx];
03877     TempestMapAppData& tdata = data_intx.tempestData;
03878 
03879     // Setup computation of weights
03880     // Set the context for the remapping weights computation
03881     tdata.weightMaps[std::string( solution_weights_identifier )] = new moab::TempestOnlineMap( tdata.remapper );
03882 
03883     // Call to generate the remap weights with the tempest meshes
03884     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03885     assert( weightMap != NULL );
03886 
03887     // Now let us compute the local-global mapping and store it in the context
03888     // We need this mapping when computing matvec products and to do reductions in parallel
03889     // Additionally, the call below will also compute weights with TempestRemap
03890     rval = weightMap->GenerateRemappingWeights(
03891         std::string( disc_method_source ),                   // std::string strInputType
03892         std::string( disc_method_target ),                   // std::string strOutputType
03893         ( *disc_order_source ), ( *disc_order_target ),      // const int nPin, const int nPout
03894         ( fNoBubble ? *fNoBubble : false ),                  // bool fNoBubble=false
03895         ( fMonotoneTypeID ? *fMonotoneTypeID : 0 ),          // int fMonotoneTypeID=0
03896         ( fVolumetric ? *fVolumetric > 0 : false ),          // bool fVolumetric=false
03897         ( fNoConservation ? *fNoConservation > 0 : false ),  // bool fNoConservation=false
03898         ( fValidate ? *fValidate : false ),                  // bool fNoCheck=false
03899         source_solution_tag_dof_name,                        // const std::string srcDofTagName = "GLOBAL_ID"
03900         target_solution_tag_dof_name,                        // const std::string tgtDofTagName = "GLOBAL_ID"
03901         false,                                               // bool fInputConcave = false
03902         false                                                // bool fOutputConcave = false
03903     );MB_CHK_ERR( rval );
03904 
03905     return moab::MB_SUCCESS;
03906 }
03907 
03908 ErrCode iMOAB_ApplyScalarProjectionWeights(
03909     iMOAB_AppID pid_intersection,
03910     const iMOAB_String solution_weights_identifier, /* "scalar", "flux", "custom" */
03911     const iMOAB_String source_solution_tag_name,
03912     const iMOAB_String target_solution_tag_name )
03913 {
03914     assert( solution_weights_identifier && strlen( solution_weights_identifier ) );
03915     assert( source_solution_tag_name && strlen( source_solution_tag_name ) );
03916     assert( target_solution_tag_name && strlen( target_solution_tag_name ) );
03917 
03918     moab::ErrorCode rval;
03919 
03920     // Get the source and target data and pcomm objects
03921     appData& data_intx       = context.appDatas[*pid_intersection];
03922     TempestMapAppData& tdata = data_intx.tempestData;
03923 
03924     // Now allocate and initialize the remapper object
03925     moab::TempestRemapper* remapper   = tdata.remapper;
03926     moab::TempestOnlineMap* weightMap = tdata.weightMaps[std::string( solution_weights_identifier )];
03927 
03928     // we assume that there are separators ";" between the tag names
03929     std::vector< std::string > srcNames;
03930     std::vector< std::string > tgtNames;
03931     std::vector< Tag > srcTagHandles;
03932     std::vector< Tag > tgtTagHandles;
03933     std::string separator( ";" );
03934     std::string src_name( source_solution_tag_name );
03935     std::string tgt_name( target_solution_tag_name );
03936     split_tag_names( src_name, separator, srcNames );
03937     split_tag_names( tgt_name, separator, tgtNames );
03938     if( srcNames.size() != tgtNames.size() )
03939     {
03940         std::cout << " error in parsing source and target tag names. \n";
03941         return moab::MB_FAILURE;
03942     }
03943 
03944     for( size_t i = 0; i < srcNames.size(); i++ )
03945     {
03946         Tag tagHandle;
03947         rval = context.MBI->tag_get_handle( srcNames[i].c_str(), tagHandle );
03948         if( MB_SUCCESS != rval || NULL == tagHandle )
03949         {
03950             return moab::MB_FAILURE;
03951         }
03952         srcTagHandles.push_back( tagHandle );
03953         rval = context.MBI->tag_get_handle( tgtNames[i].c_str(), tagHandle );
03954         if( MB_SUCCESS != rval || NULL == tagHandle )
03955         {
03956             return moab::MB_FAILURE;
03957         }
03958         tgtTagHandles.push_back( tagHandle );
03959     }
03960 
03961     std::vector< double > solSTagVals;
03962     std::vector< double > solTTagVals;
03963 
03964     moab::Range sents, tents;
03965     if( data_intx.point_cloud )
03966     {
03967         appData& data_src = context.appDatas[*tdata.pid_src];
03968         appData& data_tgt = context.appDatas[*tdata.pid_dest];
03969         if( data_src.point_cloud )
03970         {
03971             moab::Range& covSrcEnts = remapper->GetMeshVertices( moab::Remapper::CoveringMesh );
03972             solSTagVals.resize( covSrcEnts.size(), -1.0 );
03973             sents = covSrcEnts;
03974         }
03975         else
03976         {
03977             moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
03978             solSTagVals.resize( covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() *
03979                                     weightMap->GetSourceNDofsPerElement(),
03980                                 -1.0 );
03981             sents = covSrcEnts;
03982         }
03983         if( data_tgt.point_cloud )
03984         {
03985             moab::Range& tgtEnts = remapper->GetMeshVertices( moab::Remapper::TargetMesh );
03986             solTTagVals.resize( tgtEnts.size(), -1.0 );
03987             tents = tgtEnts;
03988         }
03989         else
03990         {
03991             moab::Range& tgtEnts = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
03992             solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
03993                                     weightMap->GetDestinationNDofsPerElement(),
03994                                 -1.0 );
03995             tents = tgtEnts;
03996         }
03997     }
03998     else
03999     {
04000         moab::Range& covSrcEnts = remapper->GetMeshEntities( moab::Remapper::CoveringMesh );
04001         moab::Range& tgtEnts    = remapper->GetMeshEntities( moab::Remapper::TargetMesh );
04002         solSTagVals.resize(
04003             covSrcEnts.size() * weightMap->GetSourceNDofsPerElement() * weightMap->GetSourceNDofsPerElement(), -1.0 );
04004         solTTagVals.resize( tgtEnts.size() * weightMap->GetDestinationNDofsPerElement() *
04005                                 weightMap->GetDestinationNDofsPerElement(),
04006                             -1.0 );
04007 
04008         sents = covSrcEnts;
04009         tents = tgtEnts;
04010     }
04011 
04012     for( size_t i = 0; i < srcTagHandles.size(); i++ )
04013     {
04014         // The tag data is np*np*n_el_src
04015         Tag ssolnTag = srcTagHandles[i];
04016         Tag tsolnTag = tgtTagHandles[i];
04017         rval         = context.MBI->tag_get_data( ssolnTag, sents, &solSTagVals[0] );MB_CHK_ERR( rval );
04018 
04019         // Compute the application of weights on the suorce solution data and store it in the
04020         // destination solution vector data Optionally, can also perform the transpose application
04021         // of the weight matrix. Set the 3rd argument to true if this is needed
04022         rval = weightMap->ApplyWeights( solSTagVals, solTTagVals, false );MB_CHK_ERR( rval );
04023 
04024         // The tag data is np*np*n_el_dest
04025         rval = context.MBI->tag_set_data( tsolnTag, tents, &solTTagVals[0] );MB_CHK_ERR( rval );
04026     }
04027 
04028 // #define VERBOSE
04029 #ifdef VERBOSE
04030     ParallelComm* pco_intx = context.pcomms[*pid_intersection];
04031 
04032     int ivar = 0;
04033     {
04034         Tag ssolnTag = srcTagHandles[ivar];
04035         std::stringstream sstr;
04036         sstr << "covsrcTagData_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04037         std::ofstream output_file( sstr.str().c_str() );
04038         for( unsigned i = 0; i < sents.size(); ++i )
04039         {
04040             EntityHandle elem = sents[i];
04041             std::vector< double > locsolSTagVals( 16 );
04042             rval = context.MBI->tag_get_data( ssolnTag, &elem, 1, &locsolSTagVals[0] );MB_CHK_ERR( rval );
04043             output_file << "\n" << remapper->GetGlobalID( Remapper::CoveringMesh, i ) << "-- \n\t";
04044             for( unsigned j = 0; j < 16; ++j )
04045                 output_file << locsolSTagVals[j] << " ";
04046         }
04047         output_file.flush();  // required here
04048         output_file.close();
04049     }
04050     {
04051         std::stringstream sstr;
04052         sstr << "outputSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04053         EntityHandle sets[2] = { context.appDatas[*tdata.pid_src].file_set,
04054                                  context.appDatas[*tdata.pid_dest].file_set };
04055         rval                 = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04056     }
04057     {
04058         std::stringstream sstr;
04059         sstr << "outputCovSrcDest_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".h5m";
04060         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04061         EntityHandle covering_set = remapper->GetCoveringSet();
04062         EntityHandle sets[2]      = { covering_set, context.appDatas[*tdata.pid_dest].file_set };
04063         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", sets, 2 );MB_CHK_ERR( rval );
04064     }
04065     {
04066         std::stringstream sstr;
04067         sstr << "covMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04068         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04069         EntityHandle covering_set = remapper->GetCoveringSet();
04070         rval                      = context.MBI->write_file( sstr.str().c_str(), NULL, "", &covering_set, 1 );MB_CHK_ERR( rval );
04071     }
04072     {
04073         std::stringstream sstr;
04074         sstr << "tgtMesh_" << *pid_intersection << "_" << pco_intx->rank() << ".vtk";
04075         // EntityHandle sets[2] = {data_intx.file_set, data_intx.covering_set};
04076         EntityHandle target_set = remapper->GetMeshSet( Remapper::TargetMesh );
04077         rval                    = context.MBI->write_file( sstr.str().c_str(), NULL, "", &target_set, 1 );MB_CHK_ERR( rval );
04078     }
04079     {
04080         std::stringstream sstr;
04081         sstr << "colvector_" << *pid_intersection << "_" << ivar << "_" << pco_intx->rank() << ".txt";
04082         std::ofstream output_file( sstr.str().c_str() );
04083         for( unsigned i = 0; i < solSTagVals.size(); ++i )
04084             output_file << i << " " << weightMap->col_dofmap[i] << " " << weightMap->col_gdofmap[i] << " "
04085                         << solSTagVals[i] << "\n";
04086         output_file.flush();  // required here
04087         output_file.close();
04088     }
04089 #endif
04090     // #undef VERBOSE
04091 
04092     return moab::MB_SUCCESS;
04093 }
04094 
04095 #endif  // MOAB_HAVE_TEMPESTREMAP
04096 
04097 #ifdef __cplusplus
04098 }
04099 #endif
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines