MOAB: Mesh Oriented datABase  (version 5.4.1)
ParallelComm.cpp
Go to the documentation of this file.
00001 #include "moab/Interface.hpp"
00002 #include "moab/ParallelComm.hpp"
00003 #include "moab/WriteUtilIface.hpp"
00004 #include "moab/ReadUtilIface.hpp"
00005 #include "SequenceManager.hpp"
00006 #include "moab/Error.hpp"
00007 #include "EntitySequence.hpp"
00008 #include "MBTagConventions.hpp"
00009 #include "moab/Skinner.hpp"
00010 #include "MBParallelConventions.h"
00011 #include "moab/Core.hpp"
00012 #include "ElementSequence.hpp"
00013 #include "moab/CN.hpp"
00014 #include "moab/RangeMap.hpp"
00015 #include "moab/MeshTopoUtil.hpp"
00016 #include "TagInfo.hpp"
00017 #include "DebugOutput.hpp"
00018 #include "SharedSetData.hpp"
00019 #include "moab/ScdInterface.hpp"
00020 #include "moab/TupleList.hpp"
00021 #include "moab/gs.hpp"
00022 
00023 #include <iostream>
00024 #include <sstream>
00025 #include <algorithm>
00026 #include <functional>
00027 #include <numeric>
00028 
00029 #include <cmath>
00030 #include <cstdlib>
00031 #include <cassert>
00032 
00033 #ifdef MOAB_HAVE_MPI
00034 #include "moab_mpi.h"
00035 #endif
00036 #ifdef MOAB_HAVE_MPE
00037 #include "mpe.h"
00038 int IFACE_START, IFACE_END;
00039 int GHOST_START, GHOST_END;
00040 int SHAREDV_START, SHAREDV_END;
00041 int RESOLVE_START, RESOLVE_END;
00042 int ENTITIES_START, ENTITIES_END;
00043 int RHANDLES_START, RHANDLES_END;
00044 int OWNED_START, OWNED_END;
00045 #endif
00046 
00047 namespace moab
00048 {
00049 
00050 const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
00051 
00052 const int MAX_BCAST_SIZE = ( 1 << 28 );
00053 
00054 std::vector< ParallelComm::Buffer* > msgs;
00055 unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
00056 std::string __PACK_string, __UNPACK_string;
00057 
00058 #ifdef DEBUG_PACKING_TIMES
00059 #define PC( n, m )                                                            \
00060     {                                                                         \
00061         if( __PACK_num == (unsigned int)n && __PACK_string == m )             \
00062             __PACK_count++;                                                   \
00063         else                                                                  \
00064         {                                                                     \
00065             if( __PACK_count > 1 ) std::cerr << " (" << __PACK_count << "x)"; \
00066             __PACK_count  = 1;                                                \
00067             __PACK_string = m;                                                \
00068             __PACK_num    = n;                                                \
00069             std::cerr << std::endl << "PACK: " << n << m;                     \
00070         }                                                                     \
00071     }
00072 #define UPC( n, m )                                                              \
00073     {                                                                            \
00074         if( __UNPACK_num == (unsigned int)n && __UNPACK_string == m )            \
00075             __UNPACK_count++;                                                    \
00076         else                                                                     \
00077         {                                                                        \
00078             if( __UNPACK_count > 1 ) std::cerr << "(" << __UNPACK_count << "x)"; \
00079             __UNPACK_count  = 1;                                                 \
00080             __UNPACK_string = m;                                                 \
00081             __UNPACK_num    = n;                                                 \
00082             std::cerr << std::endl << "UNPACK: " << n << m;                      \
00083         }                                                                        \
00084     }
00085 #else
00086 #define PC( n, m )
00087 #define UPC( n, m )
00088 #endif
00089 
00090 template < typename T >
00091 static inline void UNPACK( unsigned char*& buff, T* val, size_t count )
00092 {
00093     memcpy( val, buff, count * sizeof( T ) );
00094     buff += count * sizeof( T );
00095 }
00096 
00097 template < typename T >
00098 static inline void PACK( unsigned char*& buff, const T* val, size_t count )
00099 {
00100     memcpy( buff, val, count * sizeof( T ) );
00101     buff += count * sizeof( T );
00102 }
00103 
00104 static inline void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
00105 {
00106     PACK( buff, int_val, num );
00107     PC( num, " ints" );
00108 }
00109 
00110 static inline void PACK_INT( unsigned char*& buff, int int_val )
00111 {
00112     PACK_INTS( buff, &int_val, 1 );
00113 }
00114 
00115 static inline void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
00116 {
00117     PACK( buff, dbl_val, num );
00118     PC( num, " doubles" );
00119 }
00120 
00121 // static inline
00122 // void PACK_DBL(unsigned char*& buff, const double dbl_val)
00123 //{ PACK_DBLS(buff, &dbl_val, 1); }
00124 
00125 static inline void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
00126 {
00127     PACK( buff, eh_val, num );
00128     PC( num, " handles" );
00129 }
00130 
00131 // static inline
00132 // void PACK_CHAR_64(unsigned char*& buff, const char* str)
00133 //{
00134 //  memcpy(buff, str, 64);
00135 //  buff += 64;
00136 //  PC(64, " chars");
00137 //}
00138 
00139 static inline void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
00140 {
00141     PACK( buff, reinterpret_cast< const unsigned char* >( val ), num );
00142     PC( num, " void" );
00143 }
00144 
00145 static inline void PACK_BYTES( unsigned char*& buff, const void* val, int num )
00146 {
00147     PACK_INT( buff, num );
00148     PACK_VOID( buff, val, num );
00149 }
00150 
00151 static inline void PACK_RANGE( unsigned char*& buff, const Range& rng )
00152 {
00153     PACK_INT( buff, rng.psize() );
00154     Range::const_pair_iterator cit;
00155     for( cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit )
00156     {
00157         EntityHandle eh[2] = { cit->first, cit->second };
00158         PACK_EH( buff, eh, 2 );
00159     }
00160     PC( rng.psize(), "-subranged range" );
00161 }
00162 
00163 static inline void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
00164 {
00165     UNPACK( buff, int_val, num );
00166     UPC( num, " ints" );
00167 }
00168 
00169 static inline void UNPACK_INT( unsigned char*& buff, int& int_val )
00170 {
00171     UNPACK_INTS( buff, &int_val, 1 );
00172 }
00173 
00174 static inline void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
00175 {
00176     UNPACK( buff, dbl_val, num );
00177     UPC( num, " doubles" );
00178 }
00179 
00180 static inline void UNPACK_DBL( unsigned char*& buff, double& dbl_val )
00181 {
00182     UNPACK_DBLS( buff, &dbl_val, 1 );
00183 }
00184 
00185 static inline void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
00186 {
00187     UNPACK( buff, eh_val, num );
00188     UPC( num, " handles" );
00189 }
00190 
00191 // static inline
00192 // void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
00193 //{
00194 //  memcpy(buff, char_val, 64);
00195 //  buff += 64;
00196 //  UPC(64, " chars");
00197 //}
00198 
00199 static inline void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
00200 {
00201     UNPACK( buff, reinterpret_cast< unsigned char* >( val ), num );
00202     UPC( num, " void" );
00203 }
00204 
00205 static inline void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
00206 {
00207     int int_type = MBMAXTYPE;
00208     UNPACK_INT( buff, int_type );
00209     type = static_cast< EntityType >( int_type );
00210     assert( type >= MBVERTEX && type <= MBMAXTYPE );
00211 }
00212 
00213 static inline void UNPACK_RANGE( unsigned char*& buff, Range& rng )
00214 {
00215     int num_subs;
00216     EntityHandle eh[2];
00217     UNPACK_INT( buff, num_subs );
00218     for( int i = 0; i < num_subs; i++ )
00219     {
00220         UPC( num_subs, "-subranged range" );
00221         UNPACK_EH( buff, eh, 2 );
00222         rng.insert( eh[0], eh[1] );
00223     }
00224 }
00225 
00226 enum MBMessageTag
00227 {
00228     MB_MESG_ANY = MPI_ANY_TAG,
00229     MB_MESG_ENTS_ACK,
00230     MB_MESG_ENTS_SIZE,
00231     MB_MESG_ENTS_LARGE,
00232     MB_MESG_REMOTEH_ACK,
00233     MB_MESG_REMOTEH_SIZE,
00234     MB_MESG_REMOTEH_LARGE,
00235     MB_MESG_TAGS_ACK,
00236     MB_MESG_TAGS_SIZE,
00237     MB_MESG_TAGS_LARGE
00238 };
00239 
00240 static inline size_t RANGE_SIZE( const Range& rng )
00241 {
00242     return 2 * sizeof( EntityHandle ) * rng.psize() + sizeof( int );
00243 }
00244 
00245 #define PRINT_DEBUG_ISEND( A, B, C, D, E )    print_debug_isend( ( A ), ( B ), ( C ), ( D ), ( E ) )
00246 #define PRINT_DEBUG_IRECV( A, B, C, D, E, F ) print_debug_irecv( ( A ), ( B ), ( C ), ( D ), ( E ), ( F ) )
00247 #define PRINT_DEBUG_RECD( A )                 print_debug_recd( ( A ) )
00248 #define PRINT_DEBUG_WAITANY( A, B, C )        print_debug_waitany( ( A ), ( B ), ( C ) )
00249 
00250 void ParallelComm::print_debug_isend( int from, int to, unsigned char* buff, int tag, int sz )
00251 {
00252     myDebug->tprintf( 3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n", from, to, (void*)buff, tag, sz );
00253 }
00254 
00255 void ParallelComm::print_debug_irecv( int to, int from, unsigned char* buff, int sz, int tag, int incoming )
00256 {
00257     myDebug->tprintf( 3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d", to, from, (void*)buff, tag, sz );
00258     if( tag < MB_MESG_REMOTEH_ACK )
00259         myDebug->printf( 3, ", incoming1=%d\n", incoming );
00260     else if( tag < MB_MESG_TAGS_ACK )
00261         myDebug->printf( 3, ", incoming2=%d\n", incoming );
00262     else
00263         myDebug->printf( 3, ", incoming=%d\n", incoming );
00264 }
00265 
00266 void ParallelComm::print_debug_recd( MPI_Status status )
00267 {
00268     if( myDebug->get_verbosity() == 3 )
00269     {
00270         int this_count;
00271         int success = MPI_Get_count( &status, MPI_UNSIGNED_CHAR, &this_count );
00272         if( MPI_SUCCESS != success ) this_count = -1;
00273         myDebug->tprintf( 3, "Received from %d, count = %d, tag = %d\n", status.MPI_SOURCE, this_count,
00274                           status.MPI_TAG );
00275     }
00276 }
00277 
00278 void ParallelComm::print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc )
00279 {
00280     if( myDebug->get_verbosity() == 3 )
00281     {
00282         myDebug->tprintf( 3, "Waitany, p=%d, ", proc );
00283         if( tag < MB_MESG_REMOTEH_ACK )
00284             myDebug->print( 3, ", recv_ent_reqs=" );
00285         else if( tag < MB_MESG_TAGS_ACK )
00286             myDebug->print( 3, ", recv_remoteh_reqs=" );
00287         else
00288             myDebug->print( 3, ", recv_tag_reqs=" );
00289         for( unsigned int i = 0; i < reqs.size(); i++ )
00290             myDebug->printf( 3, " %p", (void*)(intptr_t)reqs[i] );
00291         myDebug->print( 3, "\n" );
00292     }
00293 }
00294 
00295 /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
00296 const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
00297 
00298 /** \brief Tag storing parallel communication objects
00299  *
00300  * This tag stores pointers to ParallelComm communication
00301  * objects; one of these is allocated for each different
00302  * communicator used to read mesh. ParallelComm stores
00303  * partition and interface sets corresponding to its parallel mesh.
00304  * By default, a parallel read uses the first ParallelComm object
00305  * on the interface instance; if instantiated with one, ReadParallel
00306  * adds this object to the interface instance too.
00307  *
00308  * Tag type: opaque
00309  * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
00310  */
00311 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
00312 
00313 ParallelComm::ParallelComm( Interface* impl, MPI_Comm cm, int* id )
00314     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00315       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00316       myDebug( NULL )
00317 {
00318     initialize();
00319     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00320     if( id ) *id = pcommID;
00321 }
00322 
00323 ParallelComm::ParallelComm( Interface* impl, std::vector< unsigned char >& /*tmp_buff*/, MPI_Comm cm, int* id )
00324     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00325       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00326       myDebug( NULL )
00327 {
00328     initialize();
00329     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00330     if( id ) *id = pcommID;
00331 }
00332 
00333 ParallelComm::~ParallelComm()
00334 {
00335     remove_pcomm( this );
00336     delete_all_buffers();
00337     delete myDebug;
00338     delete sharedSetData;
00339 }
00340 
00341 void ParallelComm::initialize()
00342 {
00343     Core* core      = dynamic_cast< Core* >( mbImpl );
00344     sequenceManager = core->sequence_manager();
00345     mbImpl->query_interface( errorHandler );
00346 
00347     // Initialize MPI, if necessary
00348     int flag   = 1;
00349     int retval = MPI_Initialized( &flag );
00350     if( MPI_SUCCESS != retval || !flag )
00351     {
00352         int argc    = 0;
00353         char** argv = NULL;
00354 
00355         // mpi not initialized yet - initialize here
00356         retval = MPI_Init( &argc, &argv );
00357         assert( MPI_SUCCESS == retval );
00358     }
00359 
00360     // Reserve space for vectors
00361     buffProcs.reserve( MAX_SHARING_PROCS );
00362     localOwnedBuffs.reserve( MAX_SHARING_PROCS );
00363     remoteOwnedBuffs.reserve( MAX_SHARING_PROCS );
00364 
00365     pcommID = add_pcomm( this );
00366 
00367     if( !myDebug )
00368     {
00369         myDebug = new DebugOutput( "ParallelComm", std::cerr );
00370         myDebug->set_rank( procConfig.proc_rank() );
00371     }
00372 }
00373 
00374 int ParallelComm::add_pcomm( ParallelComm* pc )
00375 {
00376     // Add this pcomm to instance tag
00377     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS, (ParallelComm*)NULL );
00378     Tag pc_tag = pcomm_tag( mbImpl, true );
00379     assert( 0 != pc_tag );
00380 
00381     const EntityHandle root = 0;
00382     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00383     if( MB_SUCCESS != result && MB_TAG_NOT_FOUND != result ) return -1;
00384     int index = 0;
00385     while( index < MAX_SHARING_PROCS && pc_array[index] )
00386         index++;
00387     if( index == MAX_SHARING_PROCS )
00388     {
00389         index = -1;
00390         assert( false );
00391     }
00392     else
00393     {
00394         pc_array[index] = pc;
00395         mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00396     }
00397     return index;
00398 }
00399 
00400 void ParallelComm::remove_pcomm( ParallelComm* pc )
00401 {
00402     // Remove this pcomm from instance tag
00403     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS );
00404     Tag pc_tag = pcomm_tag( mbImpl, true );
00405 
00406     const EntityHandle root                      = 0;
00407     ErrorCode result                             = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00408     std::vector< ParallelComm* >::iterator pc_it = std::find( pc_array.begin(), pc_array.end(), pc );
00409     assert( MB_SUCCESS == result && pc_it != pc_array.end() );
00410     // Empty if test to get around compiler warning about unused var
00411     if( MB_SUCCESS == result )
00412     {
00413     }
00414 
00415     *pc_it = NULL;
00416     mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00417 }
00418 
00419 //! Assign a global id space, for largest-dimension or all entities (and
00420 //! in either case for vertices too)
00421 ErrorCode ParallelComm::assign_global_ids( EntityHandle this_set,
00422                                            const int dimension,
00423                                            const int start_id,
00424                                            const bool largest_dim_only,
00425                                            const bool parallel,
00426                                            const bool owned_only )
00427 {
00428     Range entities[4];
00429     ErrorCode result;
00430     std::vector< unsigned char > pstatus;
00431     for( int dim = 0; dim <= dimension; dim++ )
00432     {
00433         if( dim == 0 || !largest_dim_only || dim == dimension )
00434         {
00435             result = mbImpl->get_entities_by_dimension( this_set, dim, entities[dim] );MB_CHK_SET_ERR( result, "Failed to get vertices in assign_global_ids" );
00436         }
00437 
00438         // Need to filter out non-locally-owned entities!!!
00439         pstatus.resize( entities[dim].size() );
00440         result = mbImpl->tag_get_data( pstatus_tag(), entities[dim], &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus in assign_global_ids" );
00441 
00442         Range dum_range;
00443         Range::iterator rit;
00444         unsigned int i;
00445         for( rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++ )
00446             if( pstatus[i] & PSTATUS_NOT_OWNED ) dum_range.insert( *rit );
00447         entities[dim] = subtract( entities[dim], dum_range );
00448     }
00449 
00450     return assign_global_ids( entities, dimension, start_id, parallel, owned_only );
00451 }
00452 
00453 //! Assign a global id space, for largest-dimension or all entities (and
00454 //! in either case for vertices too)
00455 ErrorCode ParallelComm::assign_global_ids( Range entities[],
00456                                            const int dimension,
00457                                            const int start_id,
00458                                            const bool parallel,
00459                                            const bool owned_only )
00460 {
00461     int local_num_elements[4];
00462     ErrorCode result;
00463     for( int dim = 0; dim <= dimension; dim++ )
00464     {
00465         local_num_elements[dim] = entities[dim].size();
00466     }
00467 
00468     // Communicate numbers
00469     std::vector< int > num_elements( procConfig.proc_size() * 4 );
00470 #ifdef MOAB_HAVE_MPI
00471     if( procConfig.proc_size() > 1 && parallel )
00472     {
00473         int retval =
00474             MPI_Allgather( local_num_elements, 4, MPI_INT, &num_elements[0], 4, MPI_INT, procConfig.proc_comm() );
00475         if( 0 != retval ) return MB_FAILURE;
00476     }
00477     else
00478 #endif
00479         for( int dim = 0; dim < 4; dim++ )
00480             num_elements[dim] = local_num_elements[dim];
00481 
00482     // My entities start at one greater than total_elems[d]
00483     int total_elems[4] = { start_id, start_id, start_id, start_id };
00484 
00485     for( unsigned int proc = 0; proc < procConfig.proc_rank(); proc++ )
00486     {
00487         for( int dim = 0; dim < 4; dim++ )
00488             total_elems[dim] += num_elements[4 * proc + dim];
00489     }
00490 
00491     // Assign global ids now
00492     Tag gid_tag = mbImpl->globalId_tag();
00493 
00494     for( int dim = 0; dim < 4; dim++ )
00495     {
00496         if( entities[dim].empty() ) continue;
00497         num_elements.resize( entities[dim].size() );
00498         int i = 0;
00499         for( Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit )
00500             num_elements[i++] = total_elems[dim]++;
00501 
00502         result = mbImpl->tag_set_data( gid_tag, entities[dim], &num_elements[0] );MB_CHK_SET_ERR( result, "Failed to set global id tag in assign_global_ids" );
00503     }
00504 
00505     if( owned_only ) return MB_SUCCESS;
00506 
00507     // Exchange tags
00508     for( int dim = 1; dim < 4; dim++ )
00509         entities[0].merge( entities[dim] );
00510 
00511     return exchange_tags( gid_tag, entities[0] );
00512 }
00513 
00514 int ParallelComm::get_buffers( int to_proc, bool* is_new )
00515 {
00516     int ind                                   = -1;
00517     std::vector< unsigned int >::iterator vit = std::find( buffProcs.begin(), buffProcs.end(), to_proc );
00518     if( vit == buffProcs.end() )
00519     {
00520         assert( "shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank() );
00521         ind = buffProcs.size();
00522         buffProcs.push_back( (unsigned int)to_proc );
00523         localOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00524         remoteOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00525         if( is_new ) *is_new = true;
00526     }
00527     else
00528     {
00529         ind = vit - buffProcs.begin();
00530         if( is_new ) *is_new = false;
00531     }
00532     assert( ind < MAX_SHARING_PROCS );
00533     return ind;
00534 }
00535 
00536 ErrorCode ParallelComm::broadcast_entities( const int from_proc,
00537                                             Range& entities,
00538                                             const bool adjacencies,
00539                                             const bool tags )
00540 {
00541 #ifndef MOAB_HAVE_MPI
00542     return MB_FAILURE;
00543 #else
00544 
00545     ErrorCode result = MB_SUCCESS;
00546     int success;
00547     int buff_size;
00548 
00549     Buffer buff( INITIAL_BUFF_SIZE );
00550     buff.reset_ptr( sizeof( int ) );
00551     if( (int)procConfig.proc_rank() == from_proc )
00552     {
00553         result = add_verts( entities );MB_CHK_SET_ERR( result, "Failed to add adj vertices" );
00554 
00555         buff.reset_ptr( sizeof( int ) );
00556         result = pack_buffer( entities, adjacencies, tags, false, -1, &buff );MB_CHK_SET_ERR( result, "Failed to compute buffer size in broadcast_entities" );
00557         buff.set_stored_size();
00558         buff_size = buff.buff_ptr - buff.mem_ptr;
00559     }
00560 
00561     success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
00562     if( MPI_SUCCESS != success )
00563     {
00564         MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
00565     }
00566 
00567     if( !buff_size )  // No data
00568         return MB_SUCCESS;
00569 
00570     if( (int)procConfig.proc_rank() != from_proc ) buff.reserve( buff_size );
00571 
00572     size_t offset = 0;
00573     while( buff_size )
00574     {
00575         int sz  = std::min( buff_size, MAX_BCAST_SIZE );
00576         success = MPI_Bcast( buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00577         if( MPI_SUCCESS != success )
00578         {
00579             MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer failed" );
00580         }
00581 
00582         offset += sz;
00583         buff_size -= sz;
00584     }
00585 
00586     if( (int)procConfig.proc_rank() != from_proc )
00587     {
00588         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00589         std::vector< std::vector< int > > dum1p;
00590         std::vector< EntityHandle > dum2, dum4;
00591         std::vector< unsigned int > dum3;
00592         buff.reset_ptr( sizeof( int ) );
00593         result = unpack_buffer( buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );MB_CHK_SET_ERR( result, "Failed to unpack buffer in broadcast_entities" );
00594         std::copy( dum4.begin(), dum4.end(), range_inserter( entities ) );
00595     }
00596 
00597     return MB_SUCCESS;
00598 #endif
00599 }
00600 
00601 ErrorCode ParallelComm::scatter_entities( const int from_proc,
00602                                           std::vector< Range >& entities,
00603                                           const bool adjacencies,
00604                                           const bool tags )
00605 {
00606 #ifndef MOAB_HAVE_MPI
00607     return MB_FAILURE;
00608 #else
00609     ErrorCode result = MB_SUCCESS;
00610     int i, success, buff_size, prev_size;
00611     int nProcs         = (int)procConfig.proc_size();
00612     int* sendCounts    = new int[nProcs];
00613     int* displacements = new int[nProcs];
00614     sendCounts[0]      = sizeof( int );
00615     displacements[0]   = 0;
00616     Buffer buff( INITIAL_BUFF_SIZE );
00617     buff.reset_ptr( sizeof( int ) );
00618     buff.set_stored_size();
00619     unsigned int my_proc = procConfig.proc_rank();
00620 
00621     // Get buffer size array for each remote processor
00622     if( my_proc == (unsigned int)from_proc )
00623     {
00624         for( i = 1; i < nProcs; i++ )
00625         {
00626             prev_size = buff.buff_ptr - buff.mem_ptr;
00627             buff.reset_ptr( prev_size + sizeof( int ) );
00628             result = add_verts( entities[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00629 
00630             result = pack_buffer( entities[i], adjacencies, tags, false, -1, &buff );
00631             if( MB_SUCCESS != result )
00632             {
00633                 delete[] sendCounts;
00634                 delete[] displacements;
00635                 MB_SET_ERR( result, "Failed to pack buffer in scatter_entities" );
00636             }
00637 
00638             buff_size                               = buff.buff_ptr - buff.mem_ptr - prev_size;
00639             *( (int*)( buff.mem_ptr + prev_size ) ) = buff_size;
00640             sendCounts[i]                           = buff_size;
00641         }
00642     }
00643 
00644     // Broadcast buffer size array
00645     success = MPI_Bcast( sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm() );
00646     if( MPI_SUCCESS != success )
00647     {
00648         delete[] sendCounts;
00649         delete[] displacements;
00650         MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
00651     }
00652 
00653     for( i = 1; i < nProcs; i++ )
00654     {
00655         displacements[i] = displacements[i - 1] + sendCounts[i - 1];
00656     }
00657 
00658     Buffer rec_buff;
00659     rec_buff.reserve( sendCounts[my_proc] );
00660 
00661     // Scatter actual geometry
00662     success = MPI_Scatterv( buff.mem_ptr, sendCounts, displacements, MPI_UNSIGNED_CHAR, rec_buff.mem_ptr,
00663                             sendCounts[my_proc], MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00664 
00665     if( MPI_SUCCESS != success )
00666     {
00667         delete[] sendCounts;
00668         delete[] displacements;
00669         MB_SET_ERR( MB_FAILURE, "MPI_Scatterv of buffer failed" );
00670     }
00671 
00672     // Unpack in remote processors
00673     if( my_proc != (unsigned int)from_proc )
00674     {
00675         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00676         std::vector< std::vector< int > > dum1p;
00677         std::vector< EntityHandle > dum2, dum4;
00678         std::vector< unsigned int > dum3;
00679         rec_buff.reset_ptr( sizeof( int ) );
00680         result = unpack_buffer( rec_buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );
00681         if( MB_SUCCESS != result )
00682         {
00683             delete[] sendCounts;
00684             delete[] displacements;
00685             MB_SET_ERR( result, "Failed to unpack buffer in scatter_entities" );
00686         }
00687 
00688         std::copy( dum4.begin(), dum4.end(), range_inserter( entities[my_proc] ) );
00689     }
00690 
00691     delete[] sendCounts;
00692     delete[] displacements;
00693 
00694     return MB_SUCCESS;
00695 #endif
00696 }
00697 
00698 ErrorCode ParallelComm::send_entities( const int to_proc,
00699                                        Range& orig_ents,
00700                                        const bool adjs,
00701                                        const bool tags,
00702                                        const bool store_remote_handles,
00703                                        const bool is_iface,
00704                                        Range& /*final_ents*/,
00705                                        int& incoming1,
00706                                        int& incoming2,
00707                                        TupleList& entprocs,
00708                                        std::vector< MPI_Request >& recv_remoteh_reqs,
00709                                        bool /*wait_all*/ )
00710 {
00711 #ifndef MOAB_HAVE_MPI
00712     return MB_FAILURE;
00713 #else
00714     // Pack entities to local buffer
00715     int ind = get_buffers( to_proc );
00716     localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00717 
00718     // Add vertices
00719     ErrorCode result = add_verts( orig_ents );MB_CHK_SET_ERR( result, "Failed to add verts in send_entities" );
00720 
00721     // Filter out entities already shared with destination
00722     Range tmp_range;
00723     result = filter_pstatus( orig_ents, PSTATUS_SHARED, PSTATUS_AND, to_proc, &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00724     if( !tmp_range.empty() )
00725     {
00726         orig_ents = subtract( orig_ents, tmp_range );
00727     }
00728 
00729     result = pack_buffer( orig_ents, adjs, tags, store_remote_handles, to_proc, localOwnedBuffs[ind], &entprocs );MB_CHK_SET_ERR( result, "Failed to pack buffer in send_entities" );
00730 
00731     // Send buffer
00732     result = send_buffer( to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind], recvReqs[2 * ind + 1],
00733                           (int*)( remoteOwnedBuffs[ind]->mem_ptr ),
00734                           //&ackbuff,
00735                           incoming1, MB_MESG_REMOTEH_SIZE,
00736                           ( !is_iface && store_remote_handles ? localOwnedBuffs[ind] : NULL ),
00737                           &recv_remoteh_reqs[2 * ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to send buffer" );
00738 
00739     return MB_SUCCESS;
00740 #endif
00741 }
00742 
00743 ErrorCode ParallelComm::send_entities( std::vector< unsigned int >& send_procs,
00744                                        std::vector< Range* >& send_ents,
00745                                        int& incoming1,
00746                                        int& incoming2,
00747                                        const bool store_remote_handles )
00748 {
00749 #ifdef MOAB_HAVE_MPE
00750     if( myDebug->get_verbosity() == 2 )
00751     {
00752         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_entities." );
00753     }
00754 #endif
00755     myDebug->tprintf( 1, "Entering send_entities\n" );
00756     if( myDebug->get_verbosity() == 4 )
00757     {
00758         msgs.clear();
00759         msgs.reserve( MAX_SHARING_PROCS );
00760     }
00761 
00762     unsigned int i;
00763     int ind;
00764     ErrorCode result = MB_SUCCESS;
00765 
00766     // Set buffProcs with communicating procs
00767     unsigned int n_proc = send_procs.size();
00768     for( i = 0; i < n_proc; i++ )
00769     {
00770         ind    = get_buffers( send_procs[i] );
00771         result = add_verts( *send_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00772 
00773         // Filter out entities already shared with destination
00774         Range tmp_range;
00775         result = filter_pstatus( *send_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00776         if( !tmp_range.empty() )
00777         {
00778             *send_ents[i] = subtract( *send_ents[i], tmp_range );
00779         }
00780     }
00781 
00782     //===========================================
00783     // Get entities to be sent to neighbors
00784     // Need to get procs each entity is sent to
00785     //===========================================
00786     Range allsent, tmp_range;
00787     int npairs = 0;
00788     TupleList entprocs;
00789     for( i = 0; i < n_proc; i++ )
00790     {
00791         int n_ents = send_ents[i]->size();
00792         if( n_ents > 0 )
00793         {
00794             npairs += n_ents;  // Get the total # of proc/handle pairs
00795             allsent.merge( *send_ents[i] );
00796         }
00797     }
00798 
00799     // Allocate a TupleList of that size
00800     entprocs.initialize( 1, 0, 1, 0, npairs );
00801     entprocs.enableWriteAccess();
00802 
00803     // Put the proc/handle pairs in the list
00804     for( i = 0; i < n_proc; i++ )
00805     {
00806         for( Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit )
00807         {
00808             entprocs.vi_wr[entprocs.get_n()]  = send_procs[i];
00809             entprocs.vul_wr[entprocs.get_n()] = *rit;
00810             entprocs.inc_n();
00811         }
00812     }
00813 
00814     // Sort by handle
00815     moab::TupleList::buffer sort_buffer;
00816     sort_buffer.buffer_init( npairs );
00817     entprocs.sort( 1, &sort_buffer );
00818     entprocs.disableWriteAccess();
00819     sort_buffer.reset();
00820 
00821     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
00822                       (unsigned long)allsent.size() );
00823 
00824     //===========================================
00825     // Pack and send ents from this proc to others
00826     //===========================================
00827     for( i = 0; i < n_proc; i++ )
00828     {
00829         if( send_ents[i]->size() > 0 )
00830         {
00831             ind = get_buffers( send_procs[i] );
00832             myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
00833                               (unsigned long)send_ents[i]->size() );
00834             // Reserve space on front for size and for initial buff size
00835             localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00836             result = pack_buffer( *send_ents[i], false, true, store_remote_handles, buffProcs[ind],
00837                                   localOwnedBuffs[ind], &entprocs, &allsent );
00838 
00839             if( myDebug->get_verbosity() == 4 )
00840             {
00841                 msgs.resize( msgs.size() + 1 );
00842                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00843             }
00844 
00845             // Send the buffer (size stored in front in send_buffer)
00846             result = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind],
00847                                   recvReqs[2 * ind + 1], &ackbuff, incoming1, MB_MESG_REMOTEH_SIZE,
00848                                   ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recvRemotehReqs[2 * ind],
00849                                   &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost send" );
00850         }
00851     }
00852     entprocs.reset();
00853 
00854 #ifdef MOAB_HAVE_MPE
00855     if( myDebug->get_verbosity() == 2 )
00856     {
00857         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_entities." );
00858     }
00859 #endif
00860 
00861     return MB_SUCCESS;
00862 }
00863 
00864 /////////////////////////////////////////////////////////////////////////////////
00865 // Send and Receive routines for a sequence of entities: use case UMR
00866 /////////////////////////////////////////////////////////////////////////////////
00867 void print_buff( unsigned char* ch, int size )
00868 {
00869     for( int i = 0; i < size; i++ )
00870         std::cout << ch[i];
00871     std::cout << "\n";
00872 }
00873 ErrorCode ParallelComm::send_recv_entities( std::vector< int >& send_procs,
00874                                             std::vector< std::vector< int > >& msgsizes,
00875                                             std::vector< std::vector< EntityHandle > >& senddata,
00876                                             std::vector< std::vector< EntityHandle > >& recvdata )
00877 {
00878 #ifdef USE_MPE
00879     if( myDebug->get_verbosity() == 2 )
00880     {
00881         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities." );
00882     }
00883 #endif
00884     myDebug->tprintf( 1, "Entering send_recv_entities\n" );
00885     if( myDebug->get_verbosity() == 4 )
00886     {
00887         msgs.clear();
00888         msgs.reserve( MAX_SHARING_PROCS );
00889     }
00890 
00891     // unsigned int i;
00892     int i, ind, success;
00893     ErrorCode error = MB_SUCCESS;
00894 
00895     //===========================================
00896     // Pack and send ents from this proc to others
00897     //===========================================
00898 
00899     // std::cout<<"resetting all buffers"<<std::endl;
00900 
00901     reset_all_buffers();
00902     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00903     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00904     int ack_buff;
00905     int incoming = 0;
00906 
00907     std::vector< unsigned int >::iterator sit;
00908 
00909     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
00910     {
00911         incoming++;
00912         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
00913                            MB_MESG_ENTS_SIZE, incoming );
00914 
00915         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
00916                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
00917         if( success != MPI_SUCCESS )
00918         {
00919             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in send_recv_entities" );
00920         }
00921     }
00922 
00923     //  std::set<unsigned int>::iterator it;
00924     for( i = 0; i < (int)send_procs.size(); i++ )
00925     {
00926         // Get index of the shared processor in the local buffer
00927         ind = get_buffers( send_procs[i] );
00928         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00929 
00930         int buff_size = msgsizes[i].size() * sizeof( int ) + senddata[i].size() * sizeof( EntityHandle );
00931         localOwnedBuffs[ind]->check_space( buff_size );
00932 
00933         // Pack entities
00934         std::vector< int > msg;
00935         msg.insert( msg.end(), msgsizes[i].begin(), msgsizes[i].end() );
00936         PACK_INTS( localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size() );
00937 
00938         std::vector< EntityHandle > entities;
00939         entities.insert( entities.end(), senddata[i].begin(), senddata[i].end() );
00940         PACK_EH( localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size() );
00941         localOwnedBuffs[ind]->set_stored_size();
00942 
00943         if( myDebug->get_verbosity() == 4 )
00944         {
00945             msgs.resize( msgs.size() + 1 );
00946             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00947         }
00948 
00949         // Send the buffer (size stored in front in send_buffer)
00950         error = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
00951                              recv_ent_reqs[3 * ind + 2], &ack_buff, incoming );MB_CHK_SET_ERR( error, "Failed to Isend in send_recv_entities" );
00952     }
00953 
00954     //===========================================
00955     // Receive and unpack ents from received data
00956     //===========================================
00957 
00958     while( incoming )
00959     {
00960 
00961         MPI_Status status;
00962         int index_in_recv_requests;
00963 
00964         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
00965         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status );
00966         if( MPI_SUCCESS != success )
00967         {
00968             MB_SET_ERR( MB_FAILURE, "Failed in waitany in send_recv_entities" );
00969         }
00970 
00971         // Processor index in the list is divided by 3
00972         ind = index_in_recv_requests / 3;
00973 
00974         PRINT_DEBUG_RECD( status );
00975 
00976         // OK, received something; decrement incoming counter
00977         incoming--;
00978 
00979         bool done = false;
00980 
00981         error = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind],
00982                              recv_ent_reqs[3 * ind + 1],  // This is for receiving the second message
00983                              recv_ent_reqs[3 * ind + 2],  // This would be for ack, but it is not
00984                                                           // used; consider removing it
00985                              incoming, localOwnedBuffs[ind],
00986                              sendReqs[3 * ind + 1],  // Send request for sending the second message
00987                              sendReqs[3 * ind + 2],  // This is for sending the ack
00988                              done );MB_CHK_SET_ERR( error, "Failed to resize recv buffer" );
00989 
00990         if( done )
00991         {
00992             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00993 
00994             int from_proc = status.MPI_SOURCE;
00995             int idx       = std::find( send_procs.begin(), send_procs.end(), from_proc ) - send_procs.begin();
00996 
00997             int msg = msgsizes[idx].size();
00998             std::vector< int > recvmsg( msg );
00999             int ndata = senddata[idx].size();
01000             std::vector< EntityHandle > dum_vec( ndata );
01001 
01002             UNPACK_INTS( remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg );
01003             UNPACK_EH( remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata );
01004 
01005             recvdata[idx].insert( recvdata[idx].end(), dum_vec.begin(), dum_vec.end() );
01006         }
01007     }
01008 
01009 #ifdef USE_MPE
01010     if( myDebug->get_verbosity() == 2 )
01011     {
01012         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities." );
01013     }
01014 #endif
01015 
01016     return MB_SUCCESS;
01017 }
01018 
01019 ErrorCode ParallelComm::update_remote_data( EntityHandle entity,
01020                                             std::vector< int >& procs,
01021                                             std::vector< EntityHandle >& handles )
01022 {
01023     ErrorCode error;
01024     unsigned char pstatus = PSTATUS_INTERFACE;
01025 
01026     int procmin = *std::min_element( procs.begin(), procs.end() );
01027 
01028     if( (int)rank() > procmin )
01029         pstatus |= PSTATUS_NOT_OWNED;
01030     else
01031         procmin = rank();
01032 
01033     // DBG
01034     // std::cout<<"entity = "<<entity<<std::endl;
01035     // for (int j=0; j<procs.size(); j++)
01036     // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
01037     // DBG
01038 
01039     if( (int)procs.size() > 1 )
01040     {
01041         procs.push_back( rank() );
01042         handles.push_back( entity );
01043 
01044         int idx = std::find( procs.begin(), procs.end(), procmin ) - procs.begin();
01045 
01046         std::iter_swap( procs.begin(), procs.begin() + idx );
01047         std::iter_swap( handles.begin(), handles.begin() + idx );
01048 
01049         // DBG
01050         //  std::cout<<"entity = "<<entity<<std::endl;
01051         // for (int j=0; j<procs.size(); j++)
01052         // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
01053         // DBG
01054     }
01055 
01056     // if ((entity == 10388) && (rank()==1))
01057     //    std::cout<<"Here"<<std::endl;
01058 
01059     error = update_remote_data( entity, &procs[0], &handles[0], procs.size(), pstatus );MB_CHK_ERR( error );
01060 
01061     return MB_SUCCESS;
01062 }
01063 
01064 ErrorCode ParallelComm::get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc )
01065 {
01066     ErrorCode error;
01067     std::vector< EntityHandle > newents;
01068     error = get_remote_handles( true, local_vec, rem_vec, num_ents, to_proc, newents );MB_CHK_ERR( error );
01069 
01070     return MB_SUCCESS;
01071 }
01072 
01073 //////////////////////////////////////////////////////////////////
01074 
01075 ErrorCode ParallelComm::recv_entities( const int from_proc,
01076                                        const bool store_remote_handles,
01077                                        const bool is_iface,
01078                                        Range& final_ents,
01079                                        int& incoming1,
01080                                        int& incoming2,
01081                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01082                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01083                                        std::vector< std::vector< int > >& L1p,
01084                                        std::vector< EntityHandle >& L2hloc,
01085                                        std::vector< EntityHandle >& L2hrem,
01086                                        std::vector< unsigned int >& L2p,
01087                                        std::vector< MPI_Request >& recv_remoteh_reqs,
01088                                        bool /*wait_all*/ )
01089 {
01090 #ifndef MOAB_HAVE_MPI
01091     return MB_FAILURE;
01092 #else
01093     // Non-blocking receive for the first message (having size info)
01094     int ind1 = get_buffers( from_proc );
01095     incoming1++;
01096     PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
01097                        MB_MESG_ENTS_SIZE, incoming1 );
01098     int success = MPI_Irecv( remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc,
01099                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind1] );
01100     if( success != MPI_SUCCESS )
01101     {
01102         MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" );
01103     }
01104 
01105     // Receive messages in while loop
01106     return recv_messages( from_proc, store_remote_handles, is_iface, final_ents, incoming1, incoming2, L1hloc, L1hrem,
01107                           L1p, L2hloc, L2hrem, L2p, recv_remoteh_reqs );
01108 #endif
01109 }
01110 
01111 ErrorCode ParallelComm::recv_entities( std::set< unsigned int >& recv_procs,
01112                                        int incoming1,
01113                                        int incoming2,
01114                                        const bool store_remote_handles,
01115                                        const bool migrate )
01116 {
01117     //===========================================
01118     // Receive/unpack new entities
01119     //===========================================
01120     // Number of incoming messages is the number of procs we communicate with
01121     int success, ind, i;
01122     ErrorCode result;
01123     MPI_Status status;
01124     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
01125     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
01126     std::vector< std::vector< int > > L1p( buffProcs.size() );
01127     std::vector< EntityHandle > L2hloc, L2hrem;
01128     std::vector< unsigned int > L2p;
01129     std::vector< EntityHandle > new_ents;
01130 
01131     while( incoming1 )
01132     {
01133         // Wait for all recvs of ents before proceeding to sending remote handles,
01134         // b/c some procs may have sent to a 3rd proc ents owned by me;
01135         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
01136 
01137         success = MPI_Waitany( 2 * buffProcs.size(), &recvReqs[0], &ind, &status );
01138         if( MPI_SUCCESS != success )
01139         {
01140             MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
01141         }
01142 
01143         PRINT_DEBUG_RECD( status );
01144 
01145         // OK, received something; decrement incoming counter
01146         incoming1--;
01147         bool done = false;
01148 
01149         // In case ind is for ack, we need index of one before it
01150         unsigned int base_ind = 2 * ( ind / 2 );
01151         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 2], recvReqs[ind], recvReqs[ind + 1],
01152                               incoming1, localOwnedBuffs[ind / 2], sendReqs[base_ind], sendReqs[base_ind + 1], done,
01153                               ( store_remote_handles ? localOwnedBuffs[ind / 2] : NULL ), MB_MESG_REMOTEH_SIZE,
01154                               &recvRemotehReqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01155 
01156         if( done )
01157         {
01158             if( myDebug->get_verbosity() == 4 )
01159             {
01160                 msgs.resize( msgs.size() + 1 );
01161                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 2] );
01162             }
01163 
01164             // Message completely received - process buffer that was sent
01165             remoteOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01166             result = unpack_buffer( remoteOwnedBuffs[ind / 2]->buff_ptr, store_remote_handles, buffProcs[ind / 2],
01167                                     ind / 2, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
01168             if( MB_SUCCESS != result )
01169             {
01170                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
01171                 print_buffer( remoteOwnedBuffs[ind / 2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 2], false );
01172                 return result;
01173             }
01174 
01175             if( recvReqs.size() != 2 * buffProcs.size() )
01176             {
01177                 // Post irecv's for remote handles from new proc
01178                 recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01179                 for( i = recvReqs.size(); i < (int)( 2 * buffProcs.size() ); i += 2 )
01180                 {
01181                     localOwnedBuffs[i / 2]->reset_buffer();
01182                     incoming2++;
01183                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 2], localOwnedBuffs[i / 2]->mem_ptr,
01184                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
01185                     success = MPI_Irecv( localOwnedBuffs[i / 2]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
01186                                          buffProcs[i / 2], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
01187                                          &recvRemotehReqs[i] );
01188                     if( success != MPI_SUCCESS )
01189                     {
01190                         MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
01191                     }
01192                 }
01193                 recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01194                 sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01195             }
01196         }
01197     }
01198 
01199     // Assign and remove newly created elements from/to receive processor
01200     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
01201     if( migrate )
01202     {
01203         // result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult,
01204         // "Failed to remove entities to part");
01205     }
01206 
01207     // Add requests for any new addl procs
01208     if( recvReqs.size() != 2 * buffProcs.size() )
01209     {
01210         // Shouldn't get here...
01211         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
01212     }
01213 
01214 #ifdef MOAB_HAVE_MPE
01215     if( myDebug->get_verbosity() == 2 )
01216     {
01217         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending recv entities." );
01218     }
01219 #endif
01220 
01221     //===========================================
01222     // Send local handles for new entity to owner
01223     //===========================================
01224     std::set< unsigned int >::iterator it  = recv_procs.begin();
01225     std::set< unsigned int >::iterator eit = recv_procs.end();
01226     for( ; it != eit; ++it )
01227     {
01228         ind = get_buffers( *it );
01229         // Reserve space on front for size and for initial buff size
01230         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
01231 
01232         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01233         remoteOwnedBuffs[ind]->set_stored_size();
01234 
01235         if( myDebug->get_verbosity() == 4 )
01236         {
01237             msgs.resize( msgs.size() + 1 );
01238             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
01239         }
01240         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[2 * ind],
01241                               recvRemotehReqs[2 * ind + 1], &ackbuff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01242     }
01243 
01244     //===========================================
01245     // Process remote handles of my ghosteds
01246     //===========================================
01247     while( incoming2 )
01248     {
01249         PRINT_DEBUG_WAITANY( recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01250         success = MPI_Waitany( 2 * buffProcs.size(), &recvRemotehReqs[0], &ind, &status );
01251         if( MPI_SUCCESS != success )
01252         {
01253             MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
01254         }
01255 
01256         // OK, received something; decrement incoming counter
01257         incoming2--;
01258 
01259         PRINT_DEBUG_RECD( status );
01260         bool done             = false;
01261         unsigned int base_ind = 2 * ( ind / 2 );
01262         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 2], recvRemotehReqs[ind],
01263                               recvRemotehReqs[ind + 1], incoming2, remoteOwnedBuffs[ind / 2], sendReqs[base_ind],
01264                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01265         if( done )
01266         {
01267             // Incoming remote handles
01268             if( myDebug->get_verbosity() == 4 )
01269             {
01270                 msgs.resize( msgs.size() + 1 );
01271                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
01272             }
01273 
01274             localOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01275             result =
01276                 unpack_remote_handles( buffProcs[ind / 2], localOwnedBuffs[ind / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01277         }
01278     }
01279 
01280 #ifdef MOAB_HAVE_MPE
01281     if( myDebug->get_verbosity() == 2 )
01282     {
01283         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
01284         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending recv entities (still doing checks)." );
01285     }
01286 #endif
01287     myDebug->tprintf( 1, "Exiting recv_entities.\n" );
01288 
01289     return MB_SUCCESS;
01290 }
01291 
01292 ErrorCode ParallelComm::recv_messages( const int from_proc,
01293                                        const bool store_remote_handles,
01294                                        const bool is_iface,
01295                                        Range& final_ents,
01296                                        int& incoming1,
01297                                        int& incoming2,
01298                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01299                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01300                                        std::vector< std::vector< int > >& L1p,
01301                                        std::vector< EntityHandle >& L2hloc,
01302                                        std::vector< EntityHandle >& L2hrem,
01303                                        std::vector< unsigned int >& L2p,
01304                                        std::vector< MPI_Request >& recv_remoteh_reqs )
01305 {
01306 #ifndef MOAB_HAVE_MPI
01307     return MB_FAILURE;
01308 #else
01309     MPI_Status status;
01310     ErrorCode result;
01311     int ind1 = get_buffers( from_proc );
01312     int success, ind2;
01313     std::vector< EntityHandle > new_ents;
01314 
01315     // Wait and receive messages
01316     while( incoming1 )
01317     {
01318         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
01319         success = MPI_Waitany( 2, &recvReqs[2 * ind1], &ind2, &status );
01320         if( MPI_SUCCESS != success )
01321         {
01322             MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_messages" );
01323         }
01324 
01325         PRINT_DEBUG_RECD( status );
01326 
01327         // OK, received something; decrement incoming counter
01328         incoming1--;
01329         bool done = false;
01330 
01331         // In case ind is for ack, we need index of one before it
01332         ind2 += 2 * ind1;
01333         unsigned int base_ind = 2 * ( ind2 / 2 );
01334 
01335         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind2 / 2],
01336                               // recvbuff,
01337                               recvReqs[ind2], recvReqs[ind2 + 1], incoming1, localOwnedBuffs[ind2 / 2],
01338                               sendReqs[base_ind], sendReqs[base_ind + 1], done,
01339                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind2 / 2] : NULL ),
01340                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01341 
01342         if( done )
01343         {
01344             // If it is done, unpack buffer
01345             remoteOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01346             result = unpack_buffer( remoteOwnedBuffs[ind2 / 2]->buff_ptr, store_remote_handles, from_proc, ind2 / 2,
01347                                     L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );MB_CHK_SET_ERR( result, "Failed to unpack buffer in recev_messages" );
01348 
01349             std::copy( new_ents.begin(), new_ents.end(), range_inserter( final_ents ) );
01350 
01351             // Send local handles for new elements to owner
01352             // Reserve space on front for size and for initial buff size
01353             remoteOwnedBuffs[ind2 / 2]->reset_buffer( sizeof( int ) );
01354 
01355             result = pack_remote_handles( L1hloc[ind2 / 2], L1hrem[ind2 / 2], L1p[ind2 / 2], from_proc,
01356                                           remoteOwnedBuffs[ind2 / 2] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01357             remoteOwnedBuffs[ind2 / 2]->set_stored_size();
01358 
01359             result = send_buffer( buffProcs[ind2 / 2], remoteOwnedBuffs[ind2 / 2], MB_MESG_REMOTEH_SIZE, sendReqs[ind2],
01360                                   recv_remoteh_reqs[ind2 + 1], (int*)( localOwnedBuffs[ind2 / 2]->mem_ptr ),
01361                                   //&ackbuff,
01362                                   incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01363         }
01364     }
01365 
01366     return MB_SUCCESS;
01367 #endif
01368 }
01369 
01370 ErrorCode ParallelComm::recv_remote_handle_messages( const int from_proc,
01371                                                      int& incoming2,
01372                                                      std::vector< EntityHandle >& L2hloc,
01373                                                      std::vector< EntityHandle >& L2hrem,
01374                                                      std::vector< unsigned int >& L2p,
01375                                                      std::vector< MPI_Request >& recv_remoteh_reqs )
01376 {
01377 #ifndef MOAB_HAVE_MPI
01378     return MB_FAILURE;
01379 #else
01380     MPI_Status status;
01381     ErrorCode result;
01382     int ind1 = get_buffers( from_proc );
01383     int success, ind2;
01384 
01385     while( incoming2 )
01386     {
01387         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01388         success = MPI_Waitany( 2, &recv_remoteh_reqs[2 * ind1], &ind2, &status );
01389         if( MPI_SUCCESS != success )
01390         {
01391             MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_remote_handle_messages" );
01392         }
01393 
01394         // OK, received something; decrement incoming counter
01395         incoming2--;
01396 
01397         PRINT_DEBUG_RECD( status );
01398 
01399         bool done = false;
01400         ind2 += 2 * ind1;
01401         unsigned int base_ind = 2 * ( ind2 / 2 );
01402         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind2 / 2], recv_remoteh_reqs[ind2],
01403                               recv_remoteh_reqs[ind2 + 1], incoming2, remoteOwnedBuffs[ind2 / 2], sendReqs[base_ind],
01404                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01405         if( done )
01406         {
01407             // Incoming remote handles
01408             localOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01409             result =
01410                 unpack_remote_handles( buffProcs[ind2 / 2], localOwnedBuffs[ind2 / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01411         }
01412     }
01413 
01414     return MB_SUCCESS;
01415 #endif
01416 }
01417 
01418 ErrorCode ParallelComm::pack_buffer( Range& orig_ents,
01419                                      const bool /*adjacencies*/,
01420                                      const bool tags,
01421                                      const bool store_remote_handles,
01422                                      const int to_proc,
01423                                      Buffer* buff,
01424                                      TupleList* entprocs,
01425                                      Range* allsent )
01426 {
01427     // Pack the buffer with the entity ranges, adjacencies, and tags sections
01428     //
01429     // Note: new entities used in subsequent connectivity lists, sets, or tags,
01430     // are referred to as (MBMAXTYPE + index), where index is into vector
01431     // of new entities, 0-based
01432     ErrorCode result;
01433 
01434     Range set_range;
01435     std::vector< Tag > all_tags;
01436     std::vector< Range > tag_ranges;
01437 
01438     Range::const_iterator rit;
01439 
01440     // Entities
01441     result = pack_entities( orig_ents, buff, store_remote_handles, to_proc, false, entprocs, allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
01442 
01443     // Sets
01444     result = pack_sets( orig_ents, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing sets (count) failed" );
01445 
01446     // Tags
01447     Range final_ents;
01448     if( tags )
01449     {
01450         result = get_tag_send_list( orig_ents, all_tags, tag_ranges );MB_CHK_SET_ERR( result, "Failed to get tagged entities" );
01451         result = pack_tags( orig_ents, all_tags, all_tags, tag_ranges, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing tags (count) failed" );
01452     }
01453     else
01454     {  // Set tag size to 0
01455         buff->check_space( sizeof( int ) );
01456         PACK_INT( buff->buff_ptr, 0 );
01457         buff->set_stored_size();
01458     }
01459 
01460     return result;
01461 }
01462 
01463 ErrorCode ParallelComm::unpack_buffer( unsigned char* buff_ptr,
01464                                        const bool store_remote_handles,
01465                                        const int from_proc,
01466                                        const int ind,
01467                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01468                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01469                                        std::vector< std::vector< int > >& L1p,
01470                                        std::vector< EntityHandle >& L2hloc,
01471                                        std::vector< EntityHandle >& L2hrem,
01472                                        std::vector< unsigned int >& L2p,
01473                                        std::vector< EntityHandle >& new_ents,
01474                                        const bool created_iface )
01475 {
01476     unsigned char* tmp_buff = buff_ptr;
01477     ErrorCode result;
01478     result = unpack_entities( buff_ptr, store_remote_handles, ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
01479                               new_ents, created_iface );MB_CHK_SET_ERR( result, "Unpacking entities failed" );
01480     if( myDebug->get_verbosity() == 3 )
01481     {
01482         myDebug->tprintf( 4, "unpack_entities buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01483         tmp_buff = buff_ptr;
01484     }
01485     result = unpack_sets( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking sets failed" );
01486     if( myDebug->get_verbosity() == 3 )
01487     {
01488         myDebug->tprintf( 4, "unpack_sets buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01489         tmp_buff = buff_ptr;
01490     }
01491     result = unpack_tags( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking tags failed" );
01492     if( myDebug->get_verbosity() == 3 )
01493     {
01494         myDebug->tprintf( 4, "unpack_tags buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01495         // tmp_buff = buff_ptr;
01496     }
01497 
01498     if( myDebug->get_verbosity() == 3 ) myDebug->print( 4, "\n" );
01499 
01500     return MB_SUCCESS;
01501 }
01502 
01503 int ParallelComm::estimate_ents_buffer_size( Range& entities, const bool store_remote_handles )
01504 {
01505     int buff_size = 0;
01506     std::vector< EntityHandle > dum_connect_vec;
01507     const EntityHandle* connect;
01508     int num_connect;
01509 
01510     int num_verts = entities.num_of_type( MBVERTEX );
01511     // # verts + coords + handles
01512     buff_size += 2 * sizeof( int ) + 3 * sizeof( double ) * num_verts;
01513     if( store_remote_handles ) buff_size += sizeof( EntityHandle ) * num_verts;
01514 
01515     // Do a rough count by looking at first entity of each type
01516     for( EntityType t = MBEDGE; t < MBENTITYSET; t++ )
01517     {
01518         const Range::iterator rit = entities.lower_bound( t );
01519         if( TYPE_FROM_HANDLE( *rit ) != t ) continue;
01520 
01521         ErrorCode result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect_vec );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get connectivity to estimate buffer size", -1 );
01522 
01523         // Number, type, nodes per entity
01524         buff_size += 3 * sizeof( int );
01525         int num_ents = entities.num_of_type( t );
01526         // Connectivity, handle for each ent
01527         buff_size += ( num_connect + 1 ) * sizeof( EntityHandle ) * num_ents;
01528     }
01529 
01530     // Extra entity type at end, passed as int
01531     buff_size += sizeof( int );
01532 
01533     return buff_size;
01534 }
01535 
01536 int ParallelComm::estimate_sets_buffer_size( Range& entities, const bool /*store_remote_handles*/ )
01537 {
01538     // Number of sets
01539     int buff_size = sizeof( int );
01540 
01541     // Do a rough count by looking at first entity of each type
01542     Range::iterator rit = entities.lower_bound( MBENTITYSET );
01543     ErrorCode result;
01544 
01545     for( ; rit != entities.end(); ++rit )
01546     {
01547         unsigned int options;
01548         result = mbImpl->get_meshset_options( *rit, options );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get meshset options", -1 );
01549 
01550         buff_size += sizeof( int );
01551 
01552         Range set_range;
01553         if( options & MESHSET_SET )
01554         {
01555             // Range-based set; count the subranges
01556             result = mbImpl->get_entities_by_handle( *rit, set_range );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get set entities", -1 );
01557 
01558             // Set range
01559             buff_size += RANGE_SIZE( set_range );
01560         }
01561         else if( options & MESHSET_ORDERED )
01562         {
01563             // Just get the number of entities in the set
01564             int num_ents;
01565             result = mbImpl->get_number_entities_by_handle( *rit, num_ents );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get number entities in ordered set", -1 );
01566 
01567             // Set vec
01568             buff_size += sizeof( EntityHandle ) * num_ents + sizeof( int );
01569         }
01570 
01571         // Get numbers of parents/children
01572         int num_par, num_ch;
01573         result = mbImpl->num_child_meshsets( *rit, &num_ch );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num children", -1 );
01574         result = mbImpl->num_parent_meshsets( *rit, &num_par );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num parents", -1 );
01575 
01576         buff_size += ( num_ch + num_par ) * sizeof( EntityHandle ) + 2 * sizeof( int );
01577     }
01578 
01579     return buff_size;
01580 }
01581 
01582 ErrorCode ParallelComm::pack_entities( Range& entities,
01583                                        Buffer* buff,
01584                                        const bool store_remote_handles,
01585                                        const int to_proc,
01586                                        const bool /*is_iface*/,
01587                                        TupleList* entprocs,
01588                                        Range* /*allsent*/ )
01589 {
01590     // Packed information:
01591     // 1. # entities = E
01592     // 2. for e in E
01593     //   a. # procs sharing e, incl. sender and receiver = P
01594     //   b. for p in P (procs sharing e)
01595     //   c. for p in P (handle for e on p) (Note1)
01596     // 3. vertex/entity info
01597 
01598     // Get an estimate of the buffer size & pre-allocate buffer size
01599     int buff_size = estimate_ents_buffer_size( entities, store_remote_handles );
01600     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate ents buffer size" );
01601     buff->check_space( buff_size );
01602     myDebug->tprintf( 3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size );
01603 
01604     unsigned int num_ents;
01605     ErrorCode result;
01606 
01607     std::vector< EntityHandle > entities_vec( entities.size() );
01608     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
01609 
01610     // First pack procs/handles sharing this ent, not including this dest but including
01611     // others (with zero handles)
01612     if( store_remote_handles )
01613     {
01614         // Buff space is at least proc + handle for each entity; use avg of 4 other procs
01615         // to estimate buff size, but check later
01616         buff->check_space( sizeof( int ) + ( 5 * sizeof( int ) + sizeof( EntityHandle ) ) * entities.size() );
01617 
01618         // 1. # entities = E
01619         PACK_INT( buff->buff_ptr, entities.size() );
01620 
01621         Range::iterator rit;
01622 
01623         // Pre-fetch sharedp and pstatus
01624         std::vector< int > sharedp_vals( entities.size() );
01625         result = mbImpl->tag_get_data( sharedp_tag(), entities, &sharedp_vals[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
01626         std::vector< char > pstatus_vals( entities.size() );
01627         result = mbImpl->tag_get_data( pstatus_tag(), entities, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
01628 
01629         unsigned int i;
01630         int tmp_procs[MAX_SHARING_PROCS];
01631         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01632         std::set< unsigned int > dumprocs;
01633 
01634         // 2. for e in E
01635         for( rit = entities.begin(), i = 0; rit != entities.end(); ++rit, i++ )
01636         {
01637             unsigned int ind =
01638                 std::lower_bound( entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit ) - entprocs->vul_rd;
01639             assert( ind < entprocs->get_n() );
01640 
01641             while( ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit )
01642                 dumprocs.insert( entprocs->vi_rd[ind++] );
01643 
01644             result = build_sharedhps_list( *rit, pstatus_vals[i], sharedp_vals[i], dumprocs, num_ents, tmp_procs,
01645                                            tmp_handles );MB_CHK_SET_ERR( result, "Failed to build sharedhps" );
01646 
01647             dumprocs.clear();
01648 
01649             // Now pack them
01650             buff->check_space( ( num_ents + 1 ) * sizeof( int ) + num_ents * sizeof( EntityHandle ) );
01651             PACK_INT( buff->buff_ptr, num_ents );
01652             PACK_INTS( buff->buff_ptr, tmp_procs, num_ents );
01653             PACK_EH( buff->buff_ptr, tmp_handles, num_ents );
01654 
01655 #ifndef NDEBUG
01656             // Check for duplicates in proc list
01657             unsigned int dp = 0;
01658             for( ; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++ )
01659                 dumprocs.insert( tmp_procs[dp] );
01660             assert( dumprocs.size() == dp );
01661             dumprocs.clear();
01662 #endif
01663         }
01664     }
01665 
01666     // Pack vertices
01667     Range these_ents = entities.subset_by_type( MBVERTEX );
01668     num_ents         = these_ents.size();
01669 
01670     if( num_ents )
01671     {
01672         buff_size = 2 * sizeof( int ) + 3 * num_ents * sizeof( double );
01673         buff->check_space( buff_size );
01674 
01675         // Type, # ents
01676         PACK_INT( buff->buff_ptr, ( (int)MBVERTEX ) );
01677         PACK_INT( buff->buff_ptr, ( (int)num_ents ) );
01678 
01679         std::vector< double > tmp_coords( 3 * num_ents );
01680         result = mbImpl->get_coords( these_ents, &tmp_coords[0] );MB_CHK_SET_ERR( result, "Failed to get vertex coordinates" );
01681         PACK_DBLS( buff->buff_ptr, &tmp_coords[0], 3 * num_ents );
01682 
01683         myDebug->tprintf( 4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01684                           CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01685     }
01686 
01687     // Now entities; go through range, packing by type and equal # verts per element
01688     Range::iterator start_rit = entities.find( *these_ents.rbegin() );
01689     ++start_rit;
01690     int last_nodes       = -1;
01691     EntityType last_type = MBMAXTYPE;
01692     these_ents.clear();
01693     Range::iterator end_rit = start_rit;
01694     EntitySequence* seq;
01695     ElementSequence* eseq;
01696 
01697     while( start_rit != entities.end() || !these_ents.empty() )
01698     {
01699         // Cases:
01700         // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
01701         // B: !end, last type & nodes same, seq: save contig sequence in these_ents
01702         // C: !end, last type & nodes different: pack these_ents, then save contig sequence in
01703         // these_ents D: end: pack these_ents
01704 
01705         // Find the sequence holding current start entity, if we're not at end
01706         eseq = NULL;
01707         if( start_rit != entities.end() )
01708         {
01709             result = sequenceManager->find( *start_rit, seq );MB_CHK_SET_ERR( result, "Failed to find entity sequence" );
01710             if( NULL == seq ) return MB_FAILURE;
01711             eseq = dynamic_cast< ElementSequence* >( seq );
01712         }
01713 
01714         // Pack the last batch if at end or next one is different
01715         if( !these_ents.empty() &&
01716             ( !eseq || eseq->type() != last_type || last_nodes != (int)eseq->nodes_per_element() ) )
01717         {
01718             result = pack_entity_seq( last_nodes, store_remote_handles, to_proc, these_ents, entities_vec, buff );MB_CHK_SET_ERR( result, "Failed to pack entities from a sequence" );
01719             these_ents.clear();
01720         }
01721 
01722         if( eseq )
01723         {
01724             // Continuation of current range, just save these entities
01725             // Get position in entities list one past end of this sequence
01726             end_rit = entities.lower_bound( start_rit, entities.end(), eseq->end_handle() + 1 );
01727 
01728             // Put these entities in the range
01729             std::copy( start_rit, end_rit, range_inserter( these_ents ) );
01730 
01731             last_type  = eseq->type();
01732             last_nodes = eseq->nodes_per_element();
01733         }
01734         else if( start_rit != entities.end() && TYPE_FROM_HANDLE( *start_rit ) == MBENTITYSET )
01735             break;
01736 
01737         start_rit = end_rit;
01738     }
01739 
01740     // Pack MBMAXTYPE to indicate end of ranges
01741     buff->check_space( sizeof( int ) );
01742     PACK_INT( buff->buff_ptr, ( (int)MBMAXTYPE ) );
01743 
01744     buff->set_stored_size();
01745     return MB_SUCCESS;
01746 }
01747 
01748 ErrorCode ParallelComm::build_sharedhps_list( const EntityHandle entity,
01749                                               const unsigned char pstatus,
01750                                               const int
01751 #ifndef NDEBUG
01752                                                   sharedp
01753 #endif
01754                                               ,
01755                                               const std::set< unsigned int >& procs,
01756                                               unsigned int& num_ents,
01757                                               int* tmp_procs,
01758                                               EntityHandle* tmp_handles )
01759 {
01760     num_ents = 0;
01761     unsigned char pstat;
01762     ErrorCode result = get_sharing_data( entity, tmp_procs, tmp_handles, pstat, num_ents );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
01763     assert( pstat == pstatus );
01764 
01765     // Build shared proc/handle lists
01766     // Start with multi-shared, since if it is the owner will be first
01767     if( pstatus & PSTATUS_MULTISHARED )
01768     {
01769     }
01770     else if( pstatus & PSTATUS_NOT_OWNED )
01771     {
01772         // If not multishared and not owned, other sharing proc is owner, put that
01773         // one first
01774         assert( "If not owned, I should be shared too" && pstatus & PSTATUS_SHARED && 1 == num_ents );
01775         tmp_procs[1]   = procConfig.proc_rank();
01776         tmp_handles[1] = entity;
01777         num_ents       = 2;
01778     }
01779     else if( pstatus & PSTATUS_SHARED )
01780     {
01781         // If not multishared and owned, I'm owner
01782         assert( "shared and owned, should be only 1 sharing proc" && 1 == num_ents );
01783         tmp_procs[1]   = tmp_procs[0];
01784         tmp_procs[0]   = procConfig.proc_rank();
01785         tmp_handles[1] = tmp_handles[0];
01786         tmp_handles[0] = entity;
01787         num_ents       = 2;
01788     }
01789     else
01790     {
01791         // Not shared yet, just add owner (me)
01792         tmp_procs[0]   = procConfig.proc_rank();
01793         tmp_handles[0] = entity;
01794         num_ents       = 1;
01795     }
01796 
01797 #ifndef NDEBUG
01798     int tmp_ps = num_ents;
01799 #endif
01800 
01801     // Now add others, with zero handle for now
01802     for( std::set< unsigned int >::iterator sit = procs.begin(); sit != procs.end(); ++sit )
01803     {
01804 #ifndef NDEBUG
01805         if( tmp_ps && std::find( tmp_procs, tmp_procs + tmp_ps, *sit ) != tmp_procs + tmp_ps )
01806         {
01807             std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
01808                       << ". Entity:" << std::endl;
01809             list_entities( &entity, 1 );
01810             std::cerr << "pstatus = " << (int)pstatus << ", sharedp = " << sharedp << std::endl;
01811             std::cerr << "tmp_ps = ";
01812             for( int i = 0; i < tmp_ps; i++ )
01813                 std::cerr << tmp_procs[i] << " ";
01814             std::cerr << std::endl;
01815             std::cerr << "procs = ";
01816             for( std::set< unsigned int >::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2 )
01817                 std::cerr << *sit2 << " ";
01818             assert( false );
01819         }
01820 #endif
01821         tmp_procs[num_ents]   = *sit;
01822         tmp_handles[num_ents] = 0;
01823         num_ents++;
01824     }
01825 
01826     // Put -1 after procs and 0 after handles
01827     if( MAX_SHARING_PROCS > num_ents )
01828     {
01829         tmp_procs[num_ents]   = -1;
01830         tmp_handles[num_ents] = 0;
01831     }
01832 
01833     return MB_SUCCESS;
01834 }
01835 
01836 ErrorCode ParallelComm::pack_entity_seq( const int nodes_per_entity,
01837                                          const bool store_remote_handles,
01838                                          const int to_proc,
01839                                          Range& these_ents,
01840                                          std::vector< EntityHandle >& entities_vec,
01841                                          Buffer* buff )
01842 {
01843     int tmp_space = 3 * sizeof( int ) + nodes_per_entity * these_ents.size() * sizeof( EntityHandle );
01844     buff->check_space( tmp_space );
01845 
01846     // Pack the entity type
01847     PACK_INT( buff->buff_ptr, ( (int)TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01848 
01849     // Pack # ents
01850     PACK_INT( buff->buff_ptr, these_ents.size() );
01851 
01852     // Pack the nodes per entity
01853     PACK_INT( buff->buff_ptr, nodes_per_entity );
01854     myDebug->tprintf( 3, "after some pack int  %d \n", buff->get_current_size() );
01855 
01856     // Pack the connectivity
01857     std::vector< EntityHandle > connect;
01858     ErrorCode result = MB_SUCCESS;
01859     for( Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit )
01860     {
01861         connect.clear();
01862         result = mbImpl->get_connectivity( &( *rit ), 1, connect, false );MB_CHK_SET_ERR( result, "Failed to get connectivity" );
01863         assert( (int)connect.size() == nodes_per_entity );
01864         result =
01865             get_remote_handles( store_remote_handles, &connect[0], &connect[0], connect.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
01866         PACK_EH( buff->buff_ptr, &connect[0], connect.size() );
01867     }
01868 
01869     myDebug->tprintf( 3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01870                       CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01871 
01872     return result;
01873 }
01874 
01875 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
01876                                             EntityHandle* from_vec,
01877                                             EntityHandle* to_vec_tmp,
01878                                             int num_ents,
01879                                             int to_proc,
01880                                             const std::vector< EntityHandle >& new_ents )
01881 {
01882     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
01883     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01884     // OTHER VERSION TOO!!!
01885     if( 0 == num_ents ) return MB_SUCCESS;
01886 
01887     // Use a local destination ptr in case we're doing an in-place copy
01888     std::vector< EntityHandle > tmp_vector;
01889     EntityHandle* to_vec = to_vec_tmp;
01890     if( to_vec == from_vec )
01891     {
01892         tmp_vector.resize( num_ents );
01893         to_vec = &tmp_vector[0];
01894     }
01895 
01896     if( !store_remote_handles )
01897     {
01898         int err;
01899         // In this case, substitute position in new_ents list
01900         for( int i = 0; i < num_ents; i++ )
01901         {
01902             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01903             assert( new_ents[ind] == from_vec[i] );
01904             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01905             assert( to_vec[i] != 0 && !err && -1 != ind );
01906         }
01907     }
01908     else
01909     {
01910         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01911         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
01912 
01913         // Get single-proc destination handles and shared procs
01914         std::vector< int > sharing_procs( num_ents );
01915         result = mbImpl->tag_get_data( shh_tag, from_vec, num_ents, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
01916         result = mbImpl->tag_get_data( shp_tag, from_vec, num_ents, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
01917         for( int j = 0; j < num_ents; j++ )
01918         {
01919             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
01920         }
01921 
01922         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01923         int tmp_procs[MAX_SHARING_PROCS];
01924         int i;
01925         // Go through results, and for 0-valued ones, look for multiple shared proc
01926         for( i = 0; i < num_ents; i++ )
01927         {
01928             if( !to_vec[i] )
01929             {
01930                 result = mbImpl->tag_get_data( shps_tag, from_vec + i, 1, tmp_procs );
01931                 if( MB_SUCCESS == result )
01932                 {
01933                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
01934                     {
01935                         if( -1 == tmp_procs[j] )
01936                             break;
01937                         else if( tmp_procs[j] == to_proc )
01938                         {
01939                             result = mbImpl->tag_get_data( shhs_tag, from_vec + i, 1, tmp_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
01940                             to_vec[i] = tmp_handles[j];
01941                             assert( to_vec[i] );
01942                             break;
01943                         }
01944                     }
01945                 }
01946                 if( !to_vec[i] )
01947                 {
01948                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01949                     if( (int)new_ents.size() == j )
01950                     {
01951                         std::cout << "Failed to find new entity in send list, proc " << procConfig.proc_rank()
01952                                   << std::endl;
01953                         for( int k = 0; k <= num_ents; k++ )
01954                             std::cout << k << ": " << from_vec[k] << " " << to_vec[k] << std::endl;
01955                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
01956                     }
01957                     int err;
01958                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
01959                     if( err )
01960                     {
01961                         MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" );
01962                     }
01963                 }
01964             }
01965         }
01966     }
01967 
01968     // memcpy over results if from_vec and to_vec are the same
01969     if( to_vec_tmp == from_vec ) memcpy( from_vec, to_vec, num_ents * sizeof( EntityHandle ) );
01970 
01971     return MB_SUCCESS;
01972 }
01973 
01974 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
01975                                             const Range& from_range,
01976                                             EntityHandle* to_vec,
01977                                             int to_proc,
01978                                             const std::vector< EntityHandle >& new_ents )
01979 {
01980     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
01981     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01982     // OTHER VERSION TOO!!!
01983     if( from_range.empty() ) return MB_SUCCESS;
01984 
01985     if( !store_remote_handles )
01986     {
01987         int err;
01988         // In this case, substitute position in new_ents list
01989         Range::iterator rit;
01990         unsigned int i;
01991         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
01992         {
01993             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
01994             assert( new_ents[ind] == *rit );
01995             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01996             assert( to_vec[i] != 0 && !err && -1 != ind );
01997         }
01998     }
01999     else
02000     {
02001         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
02002         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
02003 
02004         // Get single-proc destination handles and shared procs
02005         std::vector< int > sharing_procs( from_range.size() );
02006         result = mbImpl->tag_get_data( shh_tag, from_range, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
02007         result = mbImpl->tag_get_data( shp_tag, from_range, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
02008         for( unsigned int j = 0; j < from_range.size(); j++ )
02009         {
02010             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
02011         }
02012 
02013         EntityHandle tmp_handles[MAX_SHARING_PROCS];
02014         int tmp_procs[MAX_SHARING_PROCS];
02015         // Go through results, and for 0-valued ones, look for multiple shared proc
02016         Range::iterator rit;
02017         unsigned int i;
02018         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
02019         {
02020             if( !to_vec[i] )
02021             {
02022                 result = mbImpl->tag_get_data( shhs_tag, &( *rit ), 1, tmp_handles );
02023                 if( MB_SUCCESS == result )
02024                 {
02025                     result = mbImpl->tag_get_data( shps_tag, &( *rit ), 1, tmp_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
02026                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
02027                         if( tmp_procs[j] == to_proc )
02028                         {
02029                             to_vec[i] = tmp_handles[j];
02030                             break;
02031                         }
02032                 }
02033 
02034                 if( !to_vec[i] )
02035                 {
02036                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
02037                     if( (int)new_ents.size() == j )
02038                     {
02039                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
02040                     }
02041                     int err;
02042                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
02043                     if( err )
02044                     {
02045                         MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" );
02046                     }
02047                 }
02048             }
02049         }
02050     }
02051 
02052     return MB_SUCCESS;
02053 }
02054 
02055 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles,
02056                                             const Range& from_range,
02057                                             Range& to_range,
02058                                             int to_proc,
02059                                             const std::vector< EntityHandle >& new_ents )
02060 {
02061     std::vector< EntityHandle > to_vector( from_range.size() );
02062 
02063     ErrorCode result = get_remote_handles( store_remote_handles, from_range, &to_vector[0], to_proc, new_ents );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
02064     std::copy( to_vector.begin(), to_vector.end(), range_inserter( to_range ) );
02065     return result;
02066 }
02067 
02068 ErrorCode ParallelComm::unpack_entities( unsigned char*& buff_ptr,
02069                                          const bool store_remote_handles,
02070                                          const int /*from_ind*/,
02071                                          const bool is_iface,
02072                                          std::vector< std::vector< EntityHandle > >& L1hloc,
02073                                          std::vector< std::vector< EntityHandle > >& L1hrem,
02074                                          std::vector< std::vector< int > >& L1p,
02075                                          std::vector< EntityHandle >& L2hloc,
02076                                          std::vector< EntityHandle >& L2hrem,
02077                                          std::vector< unsigned int >& L2p,
02078                                          std::vector< EntityHandle >& new_ents,
02079                                          const bool created_iface )
02080 {
02081     // General algorithm:
02082     // - unpack # entities
02083     // - save start of remote handle info, then scan forward to entity definition data
02084     // - for all vertices or entities w/ same # verts:
02085     //   . get entity type, num ents, and (if !vert) # verts
02086     //   . for each ent:
02087     //      o get # procs/handles in remote handle info
02088     //      o if # procs/handles > 2, check for already-created entity:
02089     //        x get index of owner proc (1st in proc list), resize L1 list if nec
02090     //        x look for already-arrived entity in L2 by owner handle
02091     //      o if no existing entity:
02092     //        x if iface, look for existing entity with same connect & type
02093     //        x if none found, create vertex or element
02094     //        x if !iface & multi-shared, save on L2
02095     //        x if !iface, put new entity on new_ents list
02096     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
02097     //      o if !iface, save new handle on L1 for all sharing procs
02098 
02099     // Lists of handles/procs to return to sending/other procs
02100     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
02101     //         and h' is either the remote proc handle (if that is known) or
02102     //         the owner proc handle (otherwise);
02103     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
02104     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
02105     //         remote handles are on owning proc
02106     // L2p: owning procs for handles in L2hrem
02107 
02108     ErrorCode result;
02109     bool done         = false;
02110     ReadUtilIface* ru = NULL;
02111 
02112     result = mbImpl->query_interface( ru );MB_CHK_SET_ERR( result, "Failed to get ReadUtilIface" );
02113 
02114     // 1. # entities = E
02115     int num_ents             = 0;
02116     unsigned char* buff_save = buff_ptr;
02117     int i, j;
02118 
02119     if( store_remote_handles )
02120     {
02121         UNPACK_INT( buff_ptr, num_ents );
02122 
02123         buff_save = buff_ptr;
02124 
02125         // Save place where remote handle info starts, then scan forward to ents
02126         for( i = 0; i < num_ents; i++ )
02127         {
02128             UNPACK_INT( buff_ptr, j );
02129             if( j < 0 )
02130             {
02131                 std::cout << "Should be non-negative # proc/handles.";
02132                 return MB_FAILURE;
02133             }
02134 
02135             buff_ptr += j * ( sizeof( int ) + sizeof( EntityHandle ) );
02136         }
02137     }
02138 
02139     std::vector< EntityHandle > msg_ents;
02140 
02141     while( !done )
02142     {
02143         EntityType this_type = MBMAXTYPE;
02144         UNPACK_TYPE( buff_ptr, this_type );
02145         assert( this_type != MBENTITYSET );
02146 
02147         // MBMAXTYPE signifies end of entities data
02148         if( MBMAXTYPE == this_type ) break;
02149 
02150         // Get the number of ents
02151         int num_ents2, verts_per_entity = 0;
02152         UNPACK_INT( buff_ptr, num_ents2 );
02153 
02154         // Unpack the nodes per entity
02155         if( MBVERTEX != this_type && num_ents2 )
02156         {
02157             UNPACK_INT( buff_ptr, verts_per_entity );
02158         }
02159 
02160         std::vector< int > ps( MAX_SHARING_PROCS, -1 );
02161         std::vector< EntityHandle > hs( MAX_SHARING_PROCS, 0 );
02162         for( int e = 0; e < num_ents2; e++ )
02163         {
02164             // Check for existing entity, otherwise make new one
02165             EntityHandle new_h = 0;
02166             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02167             double coords[3];
02168             int num_ps = -1;
02169 
02170             //=======================================
02171             // Unpack all the data at once, to make sure the buffer pointers
02172             // are tracked correctly
02173             //=======================================
02174             if( store_remote_handles )
02175             {
02176                 // Pointers to other procs/handles
02177                 UNPACK_INT( buff_save, num_ps );
02178                 if( 0 >= num_ps )
02179                 {
02180                     std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
02181                     return MB_FAILURE;
02182                 }
02183 
02184                 UNPACK_INTS( buff_save, &ps[0], num_ps );
02185                 UNPACK_EH( buff_save, &hs[0], num_ps );
02186             }
02187 
02188             if( MBVERTEX == this_type )
02189             {
02190                 UNPACK_DBLS( buff_ptr, coords, 3 );
02191             }
02192             else
02193             {
02194                 assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02195                 UNPACK_EH( buff_ptr, connect, verts_per_entity );
02196 
02197                 // Update connectivity to local handles
02198                 result = get_local_handles( connect, verts_per_entity, msg_ents );MB_CHK_SET_ERR( result, "Failed to get local handles" );
02199             }
02200 
02201             //=======================================
02202             // Now, process that data; begin by finding an identical
02203             // entity, if there is one
02204             //=======================================
02205             if( store_remote_handles )
02206             {
02207                 result = find_existing_entity( is_iface, ps[0], hs[0], num_ps, connect, verts_per_entity, this_type,
02208                                                L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
02209             }
02210 
02211             //=======================================
02212             // If we didn't find one, we'll have to create one
02213             //=======================================
02214             bool created_here = false;
02215             if( !new_h && !is_iface )
02216             {
02217                 if( MBVERTEX == this_type )
02218                 {
02219                     // Create a vertex
02220                     result = mbImpl->create_vertex( coords, new_h );MB_CHK_SET_ERR( result, "Failed to make new vertex" );
02221                 }
02222                 else
02223                 {
02224                     // Create the element
02225                     result = mbImpl->create_element( this_type, connect, verts_per_entity, new_h );MB_CHK_SET_ERR( result, "Failed to make new element" );
02226 
02227                     // Update adjacencies
02228                     result = ru->update_adjacencies( new_h, 1, verts_per_entity, connect );MB_CHK_SET_ERR( result, "Failed to update adjacencies" );
02229                 }
02230 
02231                 // Should have a new handle now
02232                 assert( new_h );
02233 
02234                 created_here = true;
02235             }
02236 
02237             //=======================================
02238             // Take care of sharing data
02239             //=======================================
02240 
02241             // Need to save entities found in order, for interpretation of
02242             // later parts of this message
02243             if( !is_iface )
02244             {
02245                 assert( new_h );
02246                 msg_ents.push_back( new_h );
02247             }
02248 
02249             if( created_here ) new_ents.push_back( new_h );
02250 
02251             if( new_h && store_remote_handles )
02252             {
02253                 unsigned char new_pstat = 0x0;
02254                 if( is_iface )
02255                 {
02256                     new_pstat = PSTATUS_INTERFACE;
02257                     // Here, lowest rank proc should be first
02258                     int idx = std::min_element( &ps[0], &ps[0] + num_ps ) - &ps[0];
02259                     if( idx )
02260                     {
02261                         std::swap( ps[0], ps[idx] );
02262                         std::swap( hs[0], hs[idx] );
02263                     }
02264                     // Set ownership based on lowest rank; can't be in update_remote_data, because
02265                     // there we don't know whether it resulted from ghosting or not
02266                     if( ( num_ps > 1 && ps[0] != (int)rank() ) ) new_pstat |= PSTATUS_NOT_OWNED;
02267                 }
02268                 else if( created_here )
02269                 {
02270                     if( created_iface )
02271                         new_pstat = PSTATUS_NOT_OWNED;
02272                     else
02273                         new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
02274                 }
02275 
02276                 // Update sharing data and pstatus, adjusting order if iface
02277                 result = update_remote_data( new_h, &ps[0], &hs[0], num_ps, new_pstat );MB_CHK_SET_ERR( result, "unpack_entities" );
02278 
02279                 // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
02280                 if( store_remote_handles && !is_iface && num_ps > 2 )
02281                 {
02282                     L2hrem.push_back( hs[0] );
02283                     L2hloc.push_back( new_h );
02284                     L2p.push_back( ps[0] );
02285                 }
02286 
02287                 // Need to send this new handle to all sharing procs
02288                 if( !is_iface )
02289                 {
02290                     for( j = 0; j < num_ps; j++ )
02291                     {
02292                         if( ps[j] == (int)procConfig.proc_rank() ) continue;
02293                         int idx = get_buffers( ps[j] );
02294                         if( idx == (int)L1hloc.size() )
02295                         {
02296                             L1hloc.resize( idx + 1 );
02297                             L1hrem.resize( idx + 1 );
02298                             L1p.resize( idx + 1 );
02299                         }
02300 
02301                         // Don't bother adding if it's already in the list
02302                         std::vector< EntityHandle >::iterator vit =
02303                             std::find( L1hloc[idx].begin(), L1hloc[idx].end(), new_h );
02304                         if( vit != L1hloc[idx].end() )
02305                         {
02306                             // If it's in the list but remote handle isn't known but we know
02307                             // it, replace in the list
02308                             if( L1p[idx][vit - L1hloc[idx].begin()] != -1 && hs[j] )
02309                             {
02310                                 L1hrem[idx][vit - L1hloc[idx].begin()] = hs[j];
02311                                 L1p[idx][vit - L1hloc[idx].begin()]    = -1;
02312                             }
02313                             else
02314                                 continue;
02315                         }
02316                         else
02317                         {
02318                             if( !hs[j] )
02319                             {
02320                                 assert( -1 != ps[0] && num_ps > 2 );
02321                                 L1p[idx].push_back( ps[0] );
02322                                 L1hrem[idx].push_back( hs[0] );
02323                             }
02324                             else
02325                             {
02326                                 assert(
02327                                     "either this remote handle isn't in the remote list, or "
02328                                     "it's for another proc" &&
02329                                     ( std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) == L1hrem[idx].end() ||
02330                                       L1p[idx][std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) -
02331                                                L1hrem[idx].begin()] != -1 ) );
02332                                 L1p[idx].push_back( -1 );
02333                                 L1hrem[idx].push_back( hs[j] );
02334                             }
02335                             L1hloc[idx].push_back( new_h );
02336                         }
02337                     }
02338                 }
02339 
02340                 assert( "Shouldn't be here for non-shared entities" && -1 != num_ps );
02341                 std::fill( &ps[0], &ps[num_ps], -1 );
02342                 std::fill( &hs[0], &hs[num_ps], 0 );
02343             }
02344         }
02345 
02346         myDebug->tprintf( 4, "Unpacked %d ents of type %s", num_ents2, CN::EntityTypeName( this_type ) );
02347     }
02348 
02349     myDebug->tprintf( 4, "Done unpacking entities.\n" );
02350 
02351     // Need to sort here, to enable searching
02352     std::sort( new_ents.begin(), new_ents.end() );
02353 
02354     return MB_SUCCESS;
02355 }
02356 
02357 ErrorCode ParallelComm::print_buffer( unsigned char* buff_ptr, int mesg_tag, int from_proc, bool sent )
02358 {
02359     std::cerr << procConfig.proc_rank();
02360     if( sent )
02361         std::cerr << " sent";
02362     else
02363         std::cerr << " received";
02364     std::cerr << " message type " << mesg_tag << " to/from proc " << from_proc << "; contents:" << std::endl;
02365 
02366     int msg_length, num_ents;
02367     unsigned char* orig_ptr = buff_ptr;
02368     UNPACK_INT( buff_ptr, msg_length );
02369     std::cerr << msg_length << " bytes..." << std::endl;
02370 
02371     if( MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag )
02372     {
02373         // 1. # entities = E
02374         int i, j, k;
02375         std::vector< int > ps;
02376         std::vector< EntityHandle > hs;
02377 
02378         UNPACK_INT( buff_ptr, num_ents );
02379         std::cerr << num_ents << " entities..." << std::endl;
02380 
02381         // Save place where remote handle info starts, then scan forward to ents
02382         for( i = 0; i < num_ents; i++ )
02383         {
02384             UNPACK_INT( buff_ptr, j );
02385             if( 0 > j ) return MB_FAILURE;
02386             ps.resize( j );
02387             hs.resize( j );
02388             std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
02389             UNPACK_INTS( buff_ptr, &ps[0], j );
02390             UNPACK_EH( buff_ptr, &hs[0], j );
02391             std::cerr << "   Procs: ";
02392             for( k = 0; k < j; k++ )
02393                 std::cerr << ps[k] << " ";
02394             std::cerr << std::endl;
02395             std::cerr << "   Handles: ";
02396             for( k = 0; k < j; k++ )
02397                 std::cerr << hs[k] << " ";
02398             std::cerr << std::endl;
02399 
02400             if( buff_ptr - orig_ptr > msg_length )
02401             {
02402                 std::cerr << "End of buffer..." << std::endl;
02403                 std::cerr.flush();
02404                 return MB_FAILURE;
02405             }
02406         }
02407 
02408         while( true )
02409         {
02410             EntityType this_type = MBMAXTYPE;
02411             UNPACK_TYPE( buff_ptr, this_type );
02412             assert( this_type != MBENTITYSET );
02413 
02414             // MBMAXTYPE signifies end of entities data
02415             if( MBMAXTYPE == this_type ) break;
02416 
02417             // Get the number of ents
02418             int num_ents2, verts_per_entity = 0;
02419             UNPACK_INT( buff_ptr, num_ents2 );
02420 
02421             // Unpack the nodes per entity
02422             if( MBVERTEX != this_type && num_ents2 )
02423             {
02424                 UNPACK_INT( buff_ptr, verts_per_entity );
02425             }
02426 
02427             std::cerr << "Type: " << CN::EntityTypeName( this_type ) << "; num_ents = " << num_ents2;
02428             if( MBVERTEX != this_type ) std::cerr << "; verts_per_ent = " << verts_per_entity;
02429             std::cerr << std::endl;
02430             if( num_ents2 < 0 || num_ents2 > msg_length )
02431             {
02432                 std::cerr << "Wrong number of entities, returning." << std::endl;
02433                 return MB_FAILURE;
02434             }
02435 
02436             for( int e = 0; e < num_ents2; e++ )
02437             {
02438                 // Check for existing entity, otherwise make new one
02439                 if( MBVERTEX == this_type )
02440                 {
02441                     double coords[3];
02442                     UNPACK_DBLS( buff_ptr, coords, 3 );
02443                     std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl;
02444                 }
02445                 else
02446                 {
02447                     EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02448                     assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02449                     UNPACK_EH( buff_ptr, connect, verts_per_entity );
02450 
02451                     // Update connectivity to local handles
02452                     std::cerr << "Connectivity: ";
02453                     for( k = 0; k < verts_per_entity; k++ )
02454                         std::cerr << connect[k] << " ";
02455                     std::cerr << std::endl;
02456                 }
02457 
02458                 if( buff_ptr - orig_ptr > msg_length )
02459                 {
02460                     std::cerr << "End of buffer..." << std::endl;
02461                     std::cerr.flush();
02462                     return MB_FAILURE;
02463                 }
02464             }
02465         }
02466     }
02467     else if( MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag )
02468     {
02469         UNPACK_INT( buff_ptr, num_ents );
02470         std::cerr << num_ents << " entities..." << std::endl;
02471         if( 0 > num_ents || num_ents > msg_length )
02472         {
02473             std::cerr << "Wrong number of entities, returning." << std::endl;
02474             return MB_FAILURE;
02475         }
02476         std::vector< EntityHandle > L1hloc( num_ents ), L1hrem( num_ents );
02477         std::vector< int > L1p( num_ents );
02478         UNPACK_INTS( buff_ptr, &L1p[0], num_ents );
02479         UNPACK_EH( buff_ptr, &L1hrem[0], num_ents );
02480         UNPACK_EH( buff_ptr, &L1hloc[0], num_ents );
02481         std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
02482         for( int i = 0; i < num_ents; i++ )
02483         {
02484             EntityType etype = TYPE_FROM_HANDLE( L1hloc[i] );
02485             std::cerr << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hrem[i] ) << ", "
02486                       << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hloc[i] ) << ", " << L1p[i] << std::endl;
02487         }
02488 
02489         if( buff_ptr - orig_ptr > msg_length )
02490         {
02491             std::cerr << "End of buffer..." << std::endl;
02492             std::cerr.flush();
02493             return MB_FAILURE;
02494         }
02495     }
02496     else if( mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE )
02497     {
02498         int num_tags, dum1, data_type, tag_size;
02499         UNPACK_INT( buff_ptr, num_tags );
02500         std::cerr << "Number of tags = " << num_tags << std::endl;
02501         for( int i = 0; i < num_tags; i++ )
02502         {
02503             std::cerr << "Tag " << i << ":" << std::endl;
02504             UNPACK_INT( buff_ptr, tag_size );
02505             UNPACK_INT( buff_ptr, dum1 );
02506             UNPACK_INT( buff_ptr, data_type );
02507             std::cerr << "Tag size, type, data type = " << tag_size << ", " << dum1 << ", " << data_type << std::endl;
02508             UNPACK_INT( buff_ptr, dum1 );
02509             std::cerr << "Default value size = " << dum1 << std::endl;
02510             buff_ptr += dum1;
02511             UNPACK_INT( buff_ptr, dum1 );
02512             std::string name( (char*)buff_ptr, dum1 );
02513             std::cerr << "Tag name = " << name.c_str() << std::endl;
02514             buff_ptr += dum1;
02515             UNPACK_INT( buff_ptr, num_ents );
02516             std::cerr << "Number of ents = " << num_ents << std::endl;
02517             std::vector< EntityHandle > tmp_buff( num_ents );
02518             UNPACK_EH( buff_ptr, &tmp_buff[0], num_ents );
02519             int tot_length = 0;
02520             for( int j = 0; j < num_ents; j++ )
02521             {
02522                 EntityType etype = TYPE_FROM_HANDLE( tmp_buff[j] );
02523                 std::cerr << CN::EntityTypeName( etype ) << " " << ID_FROM_HANDLE( tmp_buff[j] ) << ", tag = ";
02524                 if( tag_size == MB_VARIABLE_LENGTH )
02525                 {
02526                     UNPACK_INT( buff_ptr, dum1 );
02527                     tot_length += dum1;
02528                     std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
02529                 }
02530                 else if( data_type == MB_TYPE_DOUBLE )
02531                 {
02532                     double dum_dbl;
02533                     UNPACK_DBL( buff_ptr, dum_dbl );
02534                     std::cerr << dum_dbl << std::endl;
02535                 }
02536                 else if( data_type == MB_TYPE_INTEGER )
02537                 {
02538                     int dum_int;
02539                     UNPACK_INT( buff_ptr, dum_int );
02540                     std::cerr << dum_int << std::endl;
02541                 }
02542                 else if( data_type == MB_TYPE_OPAQUE )
02543                 {
02544                     std::cerr << "(opaque)" << std::endl;
02545                     buff_ptr += tag_size;
02546                 }
02547                 else if( data_type == MB_TYPE_HANDLE )
02548                 {
02549                     EntityHandle dum_eh;
02550                     UNPACK_EH( buff_ptr, &dum_eh, 1 );
02551                     std::cerr << dum_eh << std::endl;
02552                 }
02553                 else if( data_type == MB_TYPE_BIT )
02554                 {
02555                     std::cerr << "(bit)" << std::endl;
02556                     buff_ptr += tag_size;
02557                 }
02558             }
02559             if( tag_size == MB_VARIABLE_LENGTH ) buff_ptr += tot_length;
02560         }
02561     }
02562     else
02563     {
02564         assert( false );
02565         return MB_FAILURE;
02566     }
02567 
02568     std::cerr.flush();
02569 
02570     return MB_SUCCESS;
02571 }
02572 
02573 ErrorCode ParallelComm::list_entities( const EntityHandle* ents, int num_ents )
02574 {
02575     if( NULL == ents )
02576     {
02577         Range shared_ents;
02578         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
02579         shared_ents.print( "Shared entities:\n" );
02580         return MB_SUCCESS;
02581     }
02582 
02583     unsigned char pstat;
02584     EntityHandle tmp_handles[MAX_SHARING_PROCS];
02585     int tmp_procs[MAX_SHARING_PROCS];
02586     unsigned int num_ps;
02587     ErrorCode result;
02588 
02589     for( int i = 0; i < num_ents; i++ )
02590     {
02591         result = mbImpl->list_entities( ents + i, 1 );MB_CHK_ERR( result );
02592         double coords[3];
02593         result = mbImpl->get_coords( ents + i, 1, coords );
02594         std::cout << " coords: " << coords[0] << " " << coords[1] << " " << coords[2] << "\n";
02595 
02596         result = get_sharing_data( ents[i], tmp_procs, tmp_handles, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
02597 
02598         std::cout << "Pstatus: ";
02599         if( !num_ps )
02600             std::cout << "local " << std::endl;
02601         else
02602         {
02603             if( pstat & PSTATUS_NOT_OWNED ) std::cout << "NOT_OWNED; ";
02604             if( pstat & PSTATUS_SHARED ) std::cout << "SHARED; ";
02605             if( pstat & PSTATUS_MULTISHARED ) std::cout << "MULTISHARED; ";
02606             if( pstat & PSTATUS_INTERFACE ) std::cout << "INTERFACE; ";
02607             if( pstat & PSTATUS_GHOST ) std::cout << "GHOST; ";
02608             std::cout << std::endl;
02609             for( unsigned int j = 0; j < num_ps; j++ )
02610             {
02611                 std::cout << "  proc " << tmp_procs[j] << " id (handle) " << mbImpl->id_from_handle( tmp_handles[j] )
02612                           << "(" << tmp_handles[j] << ")" << std::endl;
02613             }
02614         }
02615         std::cout << std::endl;
02616     }
02617 
02618     return MB_SUCCESS;
02619 }
02620 
02621 ErrorCode ParallelComm::list_entities( const Range& ents )
02622 {
02623     for( Range::iterator rit = ents.begin(); rit != ents.end(); ++rit )
02624         list_entities( &( *rit ), 1 );
02625 
02626     return MB_SUCCESS;
02627 }
02628 
02629 ErrorCode ParallelComm::update_remote_data( Range& local_range,
02630                                             Range& remote_range,
02631                                             int other_proc,
02632                                             const unsigned char add_pstat )
02633 {
02634     Range::iterator rit, rit2;
02635     ErrorCode result = MB_SUCCESS;
02636 
02637     // For each pair of local/remote handles:
02638     for( rit = local_range.begin(), rit2 = remote_range.begin(); rit != local_range.end(); ++rit, ++rit2 )
02639     {
02640         result = update_remote_data( *rit, &other_proc, &( *rit2 ), 1, add_pstat );MB_CHK_ERR( result );
02641     }
02642 
02643     return MB_SUCCESS;
02644 }
02645 
02646 ErrorCode ParallelComm::update_remote_data( const EntityHandle new_h,
02647                                             const int* ps,
02648                                             const EntityHandle* hs,
02649                                             const int num_ps,
02650                                             const unsigned char add_pstat
02651                                             // The following lines left in for future debugging, at least until I trust
02652                                             // this function; tjt, 10/4/2013
02653                                             //                                           , int *new_ps,
02654                                             //                                           EntityHandle *new_hs,
02655                                             //                                           int &new_numps,
02656                                             //                                           unsigned char &new_pstat
02657 )
02658 {
02659     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02660     // in this function, so no need to initialize; sharing data does not include
02661     // this proc if shared with only one other
02662 
02663     // Following variables declared here to avoid compiler errors
02664     int new_numps;
02665     unsigned char new_pstat;
02666     std::vector< int > new_ps( MAX_SHARING_PROCS, -1 );
02667     std::vector< EntityHandle > new_hs( MAX_SHARING_PROCS, 0 );
02668 
02669     new_numps        = 0;
02670     ErrorCode result = get_sharing_data( new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps );MB_CHK_SET_ERR( result, "Failed to get sharing data in update_remote_data" );
02671     int num_exist = new_numps;
02672 
02673     // Add new pstat info to the flag
02674     new_pstat |= add_pstat;
02675 
02676     /*
02677     #define plist(str, lst, siz)                                          \
02678         std::cout << str << "(";                                          \
02679         for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
02680         std::cout << ") ";                                                \
02681 
02682         std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
02683         std::string ostr;
02684         plist("ps", ps, num_ps);
02685         plist("hs", hs, num_ps);
02686         print_pstatus(add_pstat, ostr);
02687         std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
02688         plist("tag_ps", new_ps, new_numps);
02689         plist("tag_hs", new_hs, new_numps);
02690         assert(new_numps <= size());
02691         print_pstatus(new_pstat, ostr);
02692         std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
02693     */
02694 
02695 #ifndef NDEBUG
02696     {
02697         // Check for duplicates in proc list
02698         std::set< unsigned int > dumprocs;
02699         unsigned int dp = 0;
02700         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02701             dumprocs.insert( ps[dp] );
02702         assert( dp == dumprocs.size() );
02703     }
02704 #endif
02705 
02706     // If only one sharer and I'm the owner, insert myself in the list;
02707     // otherwise, my data is checked at the end
02708     if( 1 == new_numps && !( new_pstat & PSTATUS_NOT_OWNED ) )
02709     {
02710         new_hs[1] = new_hs[0];
02711         new_ps[1] = new_ps[0];
02712         new_hs[0] = new_h;
02713         new_ps[0] = rank();
02714         new_numps = 2;
02715     }
02716 
02717     // Now put passed-in data onto lists
02718     int idx;
02719     for( int i = 0; i < num_ps; i++ )
02720     {
02721         idx = std::find( &new_ps[0], &new_ps[0] + new_numps, ps[i] ) - &new_ps[0];
02722         if( idx < new_numps )
02723         {
02724             if( !new_hs[idx] && hs[i] )
02725                 // h on list is 0 and passed-in h is non-zero, replace it
02726                 new_hs[idx] = hs[i];
02727             else
02728                 assert( !hs[i] || new_hs[idx] == hs[i] );
02729         }
02730         else
02731         {
02732             if( new_numps + 1 == MAX_SHARING_PROCS )
02733             {
02734                 MB_SET_ERR( MB_FAILURE, "Exceeded MAX_SHARING_PROCS for "
02735                                             << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) ) << ' '
02736                                             << ID_FROM_HANDLE( new_h ) << " in process " << rank() );
02737             }
02738             new_ps[new_numps] = ps[i];
02739             new_hs[new_numps] = hs[i];
02740             new_numps++;
02741         }
02742     }
02743 
02744     // Add myself, if it isn't there already
02745     idx = std::find( &new_ps[0], &new_ps[0] + new_numps, rank() ) - &new_ps[0];
02746     if( idx == new_numps )
02747     {
02748         new_ps[new_numps] = rank();
02749         new_hs[new_numps] = new_h;
02750         new_numps++;
02751     }
02752     else if( !new_hs[idx] && new_numps > 2 )
02753         new_hs[idx] = new_h;
02754 
02755     // Proc list is complete; update for shared, multishared
02756     if( new_numps > 1 )
02757     {
02758         if( new_numps > 2 ) new_pstat |= PSTATUS_MULTISHARED;
02759         new_pstat |= PSTATUS_SHARED;
02760     }
02761 
02762     /*
02763         plist("new_ps", new_ps, new_numps);
02764         plist("new_hs", new_hs, new_numps);
02765         print_pstatus(new_pstat, ostr);
02766         std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
02767         std::cout << std::endl;
02768     */
02769 
02770     result = set_sharing_data( new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0] );MB_CHK_SET_ERR( result, "Failed to set sharing data in update_remote_data" );
02771 
02772     if( new_pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02773 
02774     return MB_SUCCESS;
02775 }
02776 
02777 ErrorCode ParallelComm::update_remote_data_old( const EntityHandle new_h,
02778                                                 const int* ps,
02779                                                 const EntityHandle* hs,
02780                                                 const int num_ps,
02781                                                 const unsigned char add_pstat )
02782 {
02783     EntityHandle tag_hs[MAX_SHARING_PROCS];
02784     int tag_ps[MAX_SHARING_PROCS];
02785     unsigned char pstat;
02786     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02787     // in this function, so no need to initialize
02788     unsigned int num_exist;
02789     ErrorCode result = get_sharing_data( new_h, tag_ps, tag_hs, pstat, num_exist );MB_CHK_ERR( result );
02790 
02791 #ifndef NDEBUG
02792     {
02793         // Check for duplicates in proc list
02794         std::set< unsigned int > dumprocs;
02795         unsigned int dp = 0;
02796         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02797             dumprocs.insert( ps[dp] );
02798         assert( dp == dumprocs.size() );
02799     }
02800 #endif
02801 
02802     // Add any new sharing data
02803     bool changed = false;
02804     int idx;
02805     if( !num_exist )
02806     {
02807         // Just take what caller passed
02808         memcpy( tag_ps, ps, num_ps * sizeof( int ) );
02809         memcpy( tag_hs, hs, num_ps * sizeof( EntityHandle ) );
02810         num_exist = num_ps;
02811         // If it's only one, hopefully I'm not there yet...
02812         assert( "I shouldn't be the only proc there." && ( 1 != num_exist || ps[0] != (int)procConfig.proc_rank() ) );
02813         changed = true;
02814     }
02815     else
02816     {
02817         for( int i = 0; i < num_ps; i++ )
02818         {
02819             idx = std::find( tag_ps, tag_ps + num_exist, ps[i] ) - tag_ps;
02820             if( idx == (int)num_exist )
02821             {
02822                 if( num_exist == MAX_SHARING_PROCS )
02823                 {
02824                     std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) )
02825                               << ' ' << ID_FROM_HANDLE( new_h ) << " in process " << proc_config().proc_rank()
02826                               << std::endl;
02827                     std::cerr.flush();
02828                     MPI_Abort( proc_config().proc_comm(), 66 );
02829                 }
02830 
02831                 // If there's only 1 sharing proc, and it's not me, then
02832                 // we'll end up with 3; add me to the front
02833                 if( !i && num_ps == 1 && num_exist == 1 && ps[0] != (int)procConfig.proc_rank() )
02834                 {
02835                     int j = 1;
02836                     // If I own this entity, put me at front, otherwise after first
02837                     if( !( pstat & PSTATUS_NOT_OWNED ) )
02838                     {
02839                         tag_ps[1] = tag_ps[0];
02840                         tag_hs[1] = tag_hs[0];
02841                         j         = 0;
02842                     }
02843                     tag_ps[j] = procConfig.proc_rank();
02844                     tag_hs[j] = new_h;
02845                     num_exist++;
02846                 }
02847 
02848                 tag_ps[num_exist] = ps[i];
02849                 tag_hs[num_exist] = hs[i];
02850                 num_exist++;
02851                 changed = true;
02852             }
02853             else if( 0 == tag_hs[idx] )
02854             {
02855                 tag_hs[idx] = hs[i];
02856                 changed     = true;
02857             }
02858             else if( 0 != hs[i] )
02859             {
02860                 assert( hs[i] == tag_hs[idx] );
02861             }
02862         }
02863     }
02864 
02865     // Adjust for interface layer if necessary
02866     if( add_pstat & PSTATUS_INTERFACE )
02867     {
02868         idx = std::min_element( tag_ps, tag_ps + num_exist ) - tag_ps;
02869         if( idx )
02870         {
02871             int tag_proc       = tag_ps[idx];
02872             tag_ps[idx]        = tag_ps[0];
02873             tag_ps[0]          = tag_proc;
02874             EntityHandle tag_h = tag_hs[idx];
02875             tag_hs[idx]        = tag_hs[0];
02876             tag_hs[0]          = tag_h;
02877             changed            = true;
02878             if( tag_ps[0] != (int)procConfig.proc_rank() ) pstat |= PSTATUS_NOT_OWNED;
02879         }
02880     }
02881 
02882     if( !changed ) return MB_SUCCESS;
02883 
02884     assert( "interface entities should have > 1 proc" && ( !( add_pstat & PSTATUS_INTERFACE ) || num_exist > 1 ) );
02885     assert( "ghost entities should have > 1 proc" && ( !( add_pstat & PSTATUS_GHOST ) || num_exist > 1 ) );
02886 
02887     // If it's multi-shared and we created the entity in this unpack,
02888     // local handle probably isn't in handle list yet
02889     if( num_exist > 2 )
02890     {
02891         idx = std::find( tag_ps, tag_ps + num_exist, procConfig.proc_rank() ) - tag_ps;
02892         assert( idx < (int)num_exist );
02893         if( !tag_hs[idx] ) tag_hs[idx] = new_h;
02894     }
02895 
02896     int tag_p;
02897     EntityHandle tag_h;
02898 
02899     // Update pstat
02900     pstat |= add_pstat;
02901 
02902     if( num_exist > 2 )
02903         pstat |= ( PSTATUS_MULTISHARED | PSTATUS_SHARED );
02904     else if( num_exist > 0 )
02905         pstat |= PSTATUS_SHARED;
02906 
02907     //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
02908     //                        num_exist, tag_hs, tag_ps, pstat);
02909 
02910     // Reset single shared proc/handle if was shared and moving to multi-shared
02911     if( num_exist > 2 && !( pstat & PSTATUS_MULTISHARED ) && ( pstat & PSTATUS_SHARED ) )
02912     {
02913         // Must remove sharedp/h first, which really means set to default value
02914         tag_p  = -1;
02915         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, &tag_p );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02916         tag_h  = 0;
02917         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, &tag_h );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02918     }
02919 
02920     // Set sharing tags
02921     if( num_exist > 2 )
02922     {
02923         std::fill( tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1 );
02924         std::fill( tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0 );
02925         result = mbImpl->tag_set_data( sharedps_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
02926         result = mbImpl->tag_set_data( sharedhs_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
02927 
02928 #ifndef NDEBUG
02929         {
02930             // Check for duplicates in proc list
02931             std::set< unsigned int > dumprocs;
02932             unsigned int dp = 0;
02933             for( ; dp < num_exist && -1 != tag_ps[dp]; dp++ )
02934                 dumprocs.insert( tag_ps[dp] );
02935             assert( dp == dumprocs.size() );
02936         }
02937 #endif
02938     }
02939     else if( num_exist == 2 || num_exist == 1 )
02940     {
02941         if( tag_ps[0] == (int)procConfig.proc_rank() )
02942         {
02943             assert( 2 == num_exist && tag_ps[1] != (int)procConfig.proc_rank() );
02944             tag_ps[0] = tag_ps[1];
02945             tag_hs[0] = tag_hs[1];
02946         }
02947         assert( tag_ps[0] != -1 && tag_hs[0] != 0 );
02948         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02949         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02950     }
02951 
02952     // Now set new pstatus
02953     result = mbImpl->tag_set_data( pstatus_tag(), &new_h, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
02954 
02955     if( pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02956 
02957     return MB_SUCCESS;
02958 }
02959 
02960 ErrorCode ParallelComm::get_sharing_data( const Range& entities, std::set< int >& procs, int operation )
02961 {
02962     // Get the union or intersection of sharing data for multiple entities
02963     ErrorCode result;
02964     int sp2[MAX_SHARING_PROCS];
02965     int num_ps;
02966     unsigned char pstat;
02967     std::set< int > tmp_procs;
02968     procs.clear();
02969 
02970     for( Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit )
02971     {
02972         // Get sharing procs
02973         result = get_sharing_data( *rit, sp2, NULL, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_sharing_data" );
02974         if( !( pstat & PSTATUS_SHARED ) && Interface::INTERSECT == operation )
02975         {
02976             procs.clear();
02977             return MB_SUCCESS;
02978         }
02979 
02980         if( rit == entities.begin() )
02981         {
02982             std::copy( sp2, sp2 + num_ps, std::inserter( procs, procs.begin() ) );
02983         }
02984         else
02985         {
02986             std::sort( sp2, sp2 + num_ps );
02987             tmp_procs.clear();
02988             if( Interface::UNION == operation )
02989                 std::set_union( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02990                                 std::inserter( tmp_procs, tmp_procs.end() ) );
02991             else if( Interface::INTERSECT == operation )
02992                 std::set_intersection( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02993                                        std::inserter( tmp_procs, tmp_procs.end() ) );
02994             else
02995             {
02996                 assert( "Unknown operation." && false );
02997                 return MB_FAILURE;
02998             }
02999             procs.swap( tmp_procs );
03000         }
03001         if( Interface::INTERSECT == operation && procs.empty() ) return MB_SUCCESS;
03002     }
03003 
03004     return MB_SUCCESS;
03005 }
03006 
03007 ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity,
03008                                           int* ps,
03009                                           EntityHandle* hs,
03010                                           unsigned char& pstat,
03011                                           unsigned int& num_ps )
03012 {
03013     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
03014     if( pstat & PSTATUS_MULTISHARED )
03015     {
03016         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
03017         if( hs )
03018         {
03019             result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
03020         }
03021         num_ps = std::find( ps, ps + MAX_SHARING_PROCS, -1 ) - ps;
03022     }
03023     else if( pstat & PSTATUS_SHARED )
03024     {
03025         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
03026         if( hs )
03027         {
03028             result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
03029             hs[1] = 0;
03030         }
03031         // Initialize past end of data
03032         ps[1]  = -1;
03033         num_ps = 1;
03034     }
03035     else
03036     {
03037         ps[0] = -1;
03038         if( hs ) hs[0] = 0;
03039         num_ps = 0;
03040     }
03041 
03042     assert( MAX_SHARING_PROCS >= num_ps );
03043 
03044     return MB_SUCCESS;
03045 }
03046 
03047 ErrorCode ParallelComm::find_existing_entity( const bool is_iface,
03048                                               const int owner_p,
03049                                               const EntityHandle owner_h,
03050                                               const int num_ps,
03051                                               const EntityHandle* connect,
03052                                               const int num_connect,
03053                                               const EntityType this_type,
03054                                               std::vector< EntityHandle >& L2hloc,
03055                                               std::vector< EntityHandle >& L2hrem,
03056                                               std::vector< unsigned int >& L2p,
03057                                               EntityHandle& new_h )
03058 {
03059     new_h = 0;
03060     if( !is_iface && num_ps > 2 )
03061     {
03062         for( unsigned int i = 0; i < L2hrem.size(); i++ )
03063         {
03064             if( L2hrem[i] == owner_h && owner_p == (int)L2p[i] )
03065             {
03066                 new_h = L2hloc[i];
03067                 return MB_SUCCESS;
03068             }
03069         }
03070     }
03071 
03072     // If we got here and it's a vertex, we don't need to look further
03073     if( MBVERTEX == this_type || !connect || !num_connect ) return MB_SUCCESS;
03074 
03075     Range tmp_range;
03076     ErrorCode result = mbImpl->get_adjacencies( connect, num_connect, CN::Dimension( this_type ), false, tmp_range );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
03077     if( !tmp_range.empty() )
03078     {
03079         // Found a corresponding entity - return target
03080         new_h = *tmp_range.begin();
03081     }
03082     else
03083     {
03084         new_h = 0;
03085     }
03086 
03087     return MB_SUCCESS;
03088 }
03089 
03090 ErrorCode ParallelComm::get_local_handles( const Range& remote_handles,
03091                                            Range& local_handles,
03092                                            const std::vector< EntityHandle >& new_ents )
03093 {
03094     std::vector< EntityHandle > rh_vec;
03095     rh_vec.reserve( remote_handles.size() );
03096     std::copy( remote_handles.begin(), remote_handles.end(), std::back_inserter( rh_vec ) );
03097     ErrorCode result = get_local_handles( &rh_vec[0], remote_handles.size(), new_ents );
03098     std::copy( rh_vec.begin(), rh_vec.end(), range_inserter( local_handles ) );
03099     return result;
03100 }
03101 
03102 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents )
03103 {
03104     std::vector< EntityHandle > tmp_ents;
03105     std::copy( new_ents.begin(), new_ents.end(), std::back_inserter( tmp_ents ) );
03106     return get_local_handles( from_vec, num_ents, tmp_ents );
03107 }
03108 
03109 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec,
03110                                            int num_ents,
03111                                            const std::vector< EntityHandle >& new_ents )
03112 {
03113     for( int i = 0; i < num_ents; i++ )
03114     {
03115         if( TYPE_FROM_HANDLE( from_vec[i] ) == MBMAXTYPE )
03116         {
03117             assert( ID_FROM_HANDLE( from_vec[i] ) < (int)new_ents.size() );
03118             from_vec[i] = new_ents[ID_FROM_HANDLE( from_vec[i] )];
03119         }
03120     }
03121 
03122     return MB_SUCCESS;
03123 }
03124 
03125 /*
03126 template <typename T> void
03127 insert_in_array(T* array, size_t array_size, size_t location, T value)
03128 {
03129   assert(location + 1 < array_size);
03130   for (size_t i = array_size - 1; i > location; i--)
03131     array[i] = array[i - 1];
03132   array[location] = value;
03133 }
03134 */
03135 
03136 ErrorCode ParallelComm::pack_range_map( Range& key_range, EntityHandle val_start, HandleMap& handle_map )
03137 {
03138     for( Range::const_pair_iterator key_it = key_range.const_pair_begin(); key_it != key_range.const_pair_end();
03139          ++key_it )
03140     {
03141         int tmp_num = ( *key_it ).second - ( *key_it ).first + 1;
03142         handle_map.insert( ( *key_it ).first, val_start, tmp_num );
03143         val_start += tmp_num;
03144     }
03145 
03146     return MB_SUCCESS;
03147 }
03148 
03149 ErrorCode ParallelComm::pack_sets( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc )
03150 {
03151     // SETS:
03152     // . #sets
03153     // . for each set:
03154     //   - options[#sets] (unsigned int)
03155     //   - if (unordered) set range
03156     //   - else if ordered
03157     //     . #ents in set
03158     //     . handles[#ents]
03159     //   - #parents
03160     //   - if (#parents) handles[#parents]
03161     //   - #children
03162     //   - if (#children) handles[#children]
03163 
03164     // Now the sets; assume any sets the application wants to pass are in the entities list
03165     ErrorCode result;
03166     Range all_sets = entities.subset_by_type( MBENTITYSET );
03167 
03168     int buff_size = estimate_sets_buffer_size( all_sets, store_remote_handles );
03169     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate sets buffer size" );
03170     buff->check_space( buff_size );
03171 
03172     // Number of sets
03173     PACK_INT( buff->buff_ptr, all_sets.size() );
03174 
03175     // Options for all sets
03176     std::vector< unsigned int > options( all_sets.size() );
03177     Range::iterator rit;
03178     std::vector< EntityHandle > members;
03179     int i;
03180     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03181     {
03182         result = mbImpl->get_meshset_options( *rit, options[i] );MB_CHK_SET_ERR( result, "Failed to get meshset options" );
03183     }
03184     buff->check_space( all_sets.size() * sizeof( unsigned int ) );
03185     PACK_VOID( buff->buff_ptr, &options[0], all_sets.size() * sizeof( unsigned int ) );
03186 
03187     // Pack parallel geometry unique id
03188     if( !all_sets.empty() )
03189     {
03190         Tag uid_tag;
03191         int n_sets  = all_sets.size();
03192         bool b_pack = false;
03193         std::vector< int > id_data( n_sets );
03194         result =
03195             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03196 
03197         result = mbImpl->tag_get_data( uid_tag, all_sets, &id_data[0] );
03198         if( MB_TAG_NOT_FOUND != result )
03199         {
03200             if( MB_SUCCESS != result ) MB_SET_ERR( result, "Failed to get parallel geometry unique ids" );
03201             for( i = 0; i < n_sets; i++ )
03202             {
03203                 if( id_data[i] != 0 )
03204                 {
03205                     b_pack = true;
03206                     break;
03207                 }
03208             }
03209         }
03210 
03211         if( b_pack )
03212         {  // If you find
03213             buff->check_space( ( n_sets + 1 ) * sizeof( int ) );
03214             PACK_INT( buff->buff_ptr, n_sets );
03215             PACK_INTS( buff->buff_ptr, &id_data[0], n_sets );
03216         }
03217         else
03218         {
03219             buff->check_space( sizeof( int ) );
03220             PACK_INT( buff->buff_ptr, 0 );
03221         }
03222     }
03223 
03224     // Vectors/ranges
03225     std::vector< EntityHandle > entities_vec( entities.size() );
03226     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03227     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03228     {
03229         members.clear();
03230         result = mbImpl->get_entities_by_handle( *rit, members );MB_CHK_SET_ERR( result, "Failed to get entities in ordered set" );
03231         result =
03232             get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
03233         buff->check_space( members.size() * sizeof( EntityHandle ) + sizeof( int ) );
03234         PACK_INT( buff->buff_ptr, members.size() );
03235         PACK_EH( buff->buff_ptr, &members[0], members.size() );
03236     }
03237 
03238     // Pack parent/child sets
03239     if( !store_remote_handles )
03240     {  // Only works not store remote handles
03241         // Pack numbers of parents/children
03242         unsigned int tot_pch = 0;
03243         int num_pch;
03244         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03245         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03246         {
03247             // Pack parents
03248             result = mbImpl->num_parent_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num parents" );
03249             PACK_INT( buff->buff_ptr, num_pch );
03250             tot_pch += num_pch;
03251             result = mbImpl->num_child_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num children" );
03252             PACK_INT( buff->buff_ptr, num_pch );
03253             tot_pch += num_pch;
03254         }
03255 
03256         // Now pack actual parents/children
03257         members.clear();
03258         members.reserve( tot_pch );
03259         std::vector< EntityHandle > tmp_pch;
03260         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03261         {
03262             result = mbImpl->get_parent_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get parents" );
03263             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03264             tmp_pch.clear();
03265             result = mbImpl->get_child_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get children" );
03266             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03267             tmp_pch.clear();
03268         }
03269         assert( members.size() == tot_pch );
03270         if( !members.empty() )
03271         {
03272             result = get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc,
03273                                          entities_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles for set parent/child sets" );
03274 #ifndef NDEBUG
03275             // Check that all handles are either sets or maxtype
03276             for( unsigned int __j = 0; __j < members.size(); __j++ )
03277                 assert( ( TYPE_FROM_HANDLE( members[__j] ) == MBMAXTYPE &&
03278                           ID_FROM_HANDLE( members[__j] ) < (int)entities.size() ) ||
03279                         TYPE_FROM_HANDLE( members[__j] ) == MBENTITYSET );
03280 #endif
03281             buff->check_space( members.size() * sizeof( EntityHandle ) );
03282             PACK_EH( buff->buff_ptr, &members[0], members.size() );
03283         }
03284     }
03285     else
03286     {
03287         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03288         for( rit = all_sets.begin(); rit != all_sets.end(); ++rit )
03289         {
03290             PACK_INT( buff->buff_ptr, 0 );
03291             PACK_INT( buff->buff_ptr, 0 );
03292         }
03293     }
03294 
03295     // Pack the handles
03296     if( store_remote_handles && !all_sets.empty() )
03297     {
03298         buff_size = RANGE_SIZE( all_sets );
03299         buff->check_space( buff_size );
03300         PACK_RANGE( buff->buff_ptr, all_sets );
03301     }
03302 
03303     myDebug->tprintf( 4, "Done packing sets.\n" );
03304 
03305     buff->set_stored_size();
03306 
03307     return MB_SUCCESS;
03308 }
03309 
03310 ErrorCode ParallelComm::unpack_sets( unsigned char*& buff_ptr,
03311                                      std::vector< EntityHandle >& entities,
03312                                      const bool store_remote_handles,
03313                                      const int from_proc )
03314 {
03315     // Now the sets; assume any sets the application wants to pass are in the entities list
03316     ErrorCode result;
03317 
03318     bool no_sets = ( entities.empty() || ( mbImpl->type_from_handle( *entities.rbegin() ) == MBENTITYSET ) );
03319 
03320     Range new_sets;
03321     int num_sets;
03322     UNPACK_INT( buff_ptr, num_sets );
03323 
03324     if( !num_sets ) return MB_SUCCESS;
03325 
03326     int i;
03327     Range::const_iterator rit;
03328     std::vector< EntityHandle > members;
03329     int num_ents;
03330     std::vector< unsigned int > options_vec( num_sets );
03331     // Option value
03332     if( num_sets ) UNPACK_VOID( buff_ptr, &options_vec[0], num_sets * sizeof( unsigned int ) );
03333 
03334     // Unpack parallel geometry unique id
03335     int n_uid;
03336     UNPACK_INT( buff_ptr, n_uid );
03337     if( n_uid > 0 && n_uid != num_sets )
03338     {
03339         std::cerr << "The number of Parallel geometry unique ids should be same." << std::endl;
03340     }
03341 
03342     if( n_uid > 0 )
03343     {  // If parallel geometry unique id is packed
03344         std::vector< int > uids( n_uid );
03345         UNPACK_INTS( buff_ptr, &uids[0], n_uid );
03346 
03347         Tag uid_tag;
03348         result =
03349             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03350 
03351         // Find existing sets
03352         for( i = 0; i < n_uid; i++ )
03353         {
03354             EntityHandle set_handle;
03355             Range temp_sets;
03356             void* tag_vals[] = { &uids[i] };
03357             if( uids[i] > 0 )
03358             {
03359                 result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &uid_tag, tag_vals, 1, temp_sets );
03360             }
03361             if( !temp_sets.empty() )
03362             {  // Existing set
03363                 set_handle = *temp_sets.begin();
03364             }
03365             else
03366             {  // Create a new set
03367                 result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03368                 result = mbImpl->tag_set_data( uid_tag, &set_handle, 1, &uids[i] );MB_CHK_SET_ERR( result, "Failed to set parallel geometry unique ids" );
03369             }
03370             new_sets.insert( set_handle );
03371         }
03372     }
03373     else
03374     {
03375         // Create sets
03376         for( i = 0; i < num_sets; i++ )
03377         {
03378             EntityHandle set_handle;
03379             result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03380 
03381             // Make sure new sets handles are monotonically increasing
03382             assert( set_handle > *new_sets.rbegin() );
03383             new_sets.insert( set_handle );
03384         }
03385     }
03386 
03387     std::copy( new_sets.begin(), new_sets.end(), std::back_inserter( entities ) );
03388     // Only need to sort if we came in with no sets on the end
03389     if( !no_sets ) std::sort( entities.begin(), entities.end() );
03390 
03391     for( rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++ )
03392     {
03393         // Unpack entities as vector, with length
03394         UNPACK_INT( buff_ptr, num_ents );
03395         members.resize( num_ents );
03396         if( num_ents ) UNPACK_EH( buff_ptr, &members[0], num_ents );
03397         result = get_local_handles( &members[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for ordered set contents" );
03398         result = mbImpl->add_entities( *rit, &members[0], num_ents );MB_CHK_SET_ERR( result, "Failed to add ents to ordered set in unpack" );
03399     }
03400 
03401     std::vector< int > num_pch( 2 * new_sets.size() );
03402     std::vector< int >::iterator vit;
03403     int tot_pch = 0;
03404     for( vit = num_pch.begin(); vit != num_pch.end(); ++vit )
03405     {
03406         UNPACK_INT( buff_ptr, *vit );
03407         tot_pch += *vit;
03408     }
03409 
03410     members.resize( tot_pch );
03411     UNPACK_EH( buff_ptr, &members[0], tot_pch );
03412     result = get_local_handles( &members[0], tot_pch, entities );MB_CHK_SET_ERR( result, "Failed to get local handle for parent/child sets" );
03413 
03414     int num               = 0;
03415     EntityHandle* mem_ptr = &members[0];
03416     for( rit = new_sets.begin(); rit != new_sets.end(); ++rit )
03417     {
03418         // Unpack parents/children
03419         int num_par = num_pch[num++], num_child = num_pch[num++];
03420         if( num_par + num_child )
03421         {
03422             for( i = 0; i < num_par; i++ )
03423             {
03424                 assert( 0 != mem_ptr[i] );
03425                 result = mbImpl->add_parent_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add parent to set in unpack" );
03426             }
03427             mem_ptr += num_par;
03428             for( i = 0; i < num_child; i++ )
03429             {
03430                 assert( 0 != mem_ptr[i] );
03431                 result = mbImpl->add_child_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add child to set in unpack" );
03432             }
03433             mem_ptr += num_child;
03434         }
03435     }
03436 
03437     // Unpack source handles
03438     Range dum_range;
03439     if( store_remote_handles && !new_sets.empty() )
03440     {
03441         UNPACK_RANGE( buff_ptr, dum_range );
03442         result = update_remote_data( new_sets, dum_range, from_proc, 0 );MB_CHK_SET_ERR( result, "Failed to set sharing data for sets" );
03443     }
03444 
03445     myDebug->tprintf( 4, "Done unpacking sets." );
03446 
03447     return MB_SUCCESS;
03448 }
03449 
03450 ErrorCode ParallelComm::pack_adjacencies( Range& /*entities*/,
03451                                           Range::const_iterator& /*start_rit*/,
03452                                           Range& /*whole_range*/,
03453                                           unsigned char*& /*buff_ptr*/,
03454                                           int& /*count*/,
03455                                           const bool /*just_count*/,
03456                                           const bool /*store_handles*/,
03457                                           const int /*to_proc*/ )
03458 {
03459     return MB_FAILURE;
03460 }
03461 
03462 ErrorCode ParallelComm::unpack_adjacencies( unsigned char*& /*buff_ptr*/,
03463                                             Range& /*entities*/,
03464                                             const bool /*store_handles*/,
03465                                             const int /*from_proc*/ )
03466 {
03467     return MB_FAILURE;
03468 }
03469 
03470 ErrorCode ParallelComm::pack_tags( Range& entities,
03471                                    const std::vector< Tag >& src_tags,
03472                                    const std::vector< Tag >& dst_tags,
03473                                    const std::vector< Range >& tag_ranges,
03474                                    Buffer* buff,
03475                                    const bool store_remote_handles,
03476                                    const int to_proc )
03477 {
03478     ErrorCode result;
03479     std::vector< Tag >::const_iterator tag_it, dst_it;
03480     std::vector< Range >::const_iterator rit;
03481     int count = 0;
03482 
03483     for( tag_it = src_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end(); ++tag_it, ++rit )
03484     {
03485         result = packed_tag_size( *tag_it, *rit, count );
03486         if( MB_SUCCESS != result ) return result;
03487     }
03488 
03489     // Number of tags
03490     count += sizeof( int );
03491 
03492     buff->check_space( count );
03493 
03494     PACK_INT( buff->buff_ptr, src_tags.size() );
03495 
03496     std::vector< EntityHandle > entities_vec( entities.size() );
03497     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03498 
03499     for( tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end();
03500          ++tag_it, ++dst_it, ++rit )
03501     {
03502         result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff, store_remote_handles, to_proc );
03503         if( MB_SUCCESS != result ) return result;
03504     }
03505 
03506     myDebug->tprintf( 4, "Done packing tags." );
03507 
03508     buff->set_stored_size();
03509 
03510     return MB_SUCCESS;
03511 }
03512 
03513 ErrorCode ParallelComm::packed_tag_size( Tag tag, const Range& tagged_entities, int& count )
03514 {
03515     // For dense tags, compute size assuming all entities have that tag
03516     // For sparse tags, get number of entities w/ that tag to compute size
03517 
03518     std::vector< int > var_len_sizes;
03519     std::vector< const void* > var_len_values;
03520 
03521     // Default value
03522     count += sizeof( int );
03523     if( NULL != tag->get_default_value() ) count += tag->get_default_value_size();
03524 
03525     // Size, type, data type
03526     count += 3 * sizeof( int );
03527 
03528     // Name
03529     count += sizeof( int );
03530     count += tag->get_name().size();
03531 
03532     // Range of tag
03533     count += sizeof( int ) + tagged_entities.size() * sizeof( EntityHandle );
03534 
03535     if( tag->get_size() == MB_VARIABLE_LENGTH )
03536     {
03537         const int num_ent = tagged_entities.size();
03538         // Send a tag size for each entity
03539         count += num_ent * sizeof( int );
03540         // Send tag data for each entity
03541         var_len_sizes.resize( num_ent );
03542         var_len_values.resize( num_ent );
03543         ErrorCode result =
03544             tag->get_data( sequenceManager, errorHandler, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get lenghts of variable-length tag values" );
03545         count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
03546     }
03547     else
03548     {
03549         // Tag data values for range or vector
03550         count += tagged_entities.size() * tag->get_size();
03551     }
03552 
03553     return MB_SUCCESS;
03554 }
03555 
03556 ErrorCode ParallelComm::pack_tag( Tag src_tag,
03557                                   Tag dst_tag,
03558                                   const Range& tagged_entities,
03559                                   const std::vector< EntityHandle >& whole_vec,
03560                                   Buffer* buff,
03561                                   const bool store_remote_handles,
03562                                   const int to_proc )
03563 {
03564     ErrorCode result;
03565     std::vector< int > var_len_sizes;
03566     std::vector< const void* > var_len_values;
03567 
03568     if( src_tag != dst_tag )
03569     {
03570         if( dst_tag->get_size() != src_tag->get_size() ) return MB_TYPE_OUT_OF_RANGE;
03571         if( dst_tag->get_data_type() != src_tag->get_data_type() && dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
03572             src_tag->get_data_type() != MB_TYPE_OPAQUE )
03573             return MB_TYPE_OUT_OF_RANGE;
03574     }
03575 
03576     // Size, type, data type
03577     buff->check_space( 3 * sizeof( int ) );
03578     PACK_INT( buff->buff_ptr, src_tag->get_size() );
03579     TagType this_type;
03580     result = mbImpl->tag_get_type( dst_tag, this_type );
03581     PACK_INT( buff->buff_ptr, (int)this_type );
03582     DataType data_type = src_tag->get_data_type();
03583     PACK_INT( buff->buff_ptr, (int)data_type );
03584     int type_size = TagInfo::size_from_data_type( data_type );
03585 
03586     // Default value
03587     if( NULL == src_tag->get_default_value() )
03588     {
03589         buff->check_space( sizeof( int ) );
03590         PACK_INT( buff->buff_ptr, 0 );
03591     }
03592     else
03593     {
03594         buff->check_space( src_tag->get_default_value_size() );
03595         PACK_BYTES( buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size() );
03596     }
03597 
03598     // Name
03599     buff->check_space( src_tag->get_name().size() );
03600     PACK_BYTES( buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size() );
03601 
03602     myDebug->tprintf( 4, "Packing tag \"%s\"", src_tag->get_name().c_str() );
03603     if( src_tag != dst_tag ) myDebug->tprintf( 4, " (as tag \"%s\")", dst_tag->get_name().c_str() );
03604     myDebug->tprintf( 4, "\n" );
03605 
03606     // Pack entities
03607     buff->check_space( tagged_entities.size() * sizeof( EntityHandle ) + sizeof( int ) );
03608     PACK_INT( buff->buff_ptr, tagged_entities.size() );
03609     std::vector< EntityHandle > dum_tagged_entities( tagged_entities.size() );
03610     result = get_remote_handles( store_remote_handles, tagged_entities, &dum_tagged_entities[0], to_proc, whole_vec );
03611     if( MB_SUCCESS != result )
03612     {
03613         if( myDebug->get_verbosity() == 3 )
03614         {
03615             std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
03616             tagged_entities.print( "  " );
03617         }
03618         MB_SET_ERR( result, "Failed to get remote handles for tagged entities" );
03619     }
03620 
03621     PACK_EH( buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size() );
03622 
03623     const size_t num_ent = tagged_entities.size();
03624     if( src_tag->get_size() == MB_VARIABLE_LENGTH )
03625     {
03626         var_len_sizes.resize( num_ent, 0 );
03627         var_len_values.resize( num_ent, 0 );
03628         result = mbImpl->tag_get_by_ptr( src_tag, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get variable-length tag data in pack_tags" );
03629         buff->check_space( num_ent * sizeof( int ) );
03630         PACK_INTS( buff->buff_ptr, &var_len_sizes[0], num_ent );
03631         for( unsigned int i = 0; i < num_ent; i++ )
03632         {
03633             buff->check_space( var_len_sizes[i] );
03634             PACK_VOID( buff->buff_ptr, var_len_values[i], type_size * var_len_sizes[i] );
03635         }
03636     }
03637     else
03638     {
03639         buff->check_space( num_ent * src_tag->get_size() );
03640         // Should be OK to read directly into buffer, since tags are untyped and
03641         // handled by memcpy
03642         result = mbImpl->tag_get_data( src_tag, tagged_entities, buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get tag data in pack_tags" );
03643         buff->buff_ptr += num_ent * src_tag->get_size();
03644         PC( num_ent * src_tag->get_size(), " void" );
03645     }
03646 
03647     return MB_SUCCESS;
03648 }
03649 
03650 ErrorCode ParallelComm::get_tag_send_list( const Range& whole_range,
03651                                            std::vector< Tag >& all_tags,
03652                                            std::vector< Range >& tag_ranges )
03653 {
03654     std::vector< Tag > tmp_tags;
03655     ErrorCode result = mbImpl->tag_get_tags( tmp_tags );MB_CHK_SET_ERR( result, "Failed to get tags in pack_tags" );
03656 
03657     std::vector< Tag >::iterator tag_it;
03658     for( tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it )
03659     {
03660         std::string tag_name;
03661         result = mbImpl->tag_get_name( *tag_it, tag_name );
03662         if( tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_' ) continue;
03663 
03664         Range tmp_range;
03665         result = ( *tag_it )->get_tagged_entities( sequenceManager, tmp_range );MB_CHK_SET_ERR( result, "Failed to get entities for tag in pack_tags" );
03666         tmp_range = intersect( tmp_range, whole_range );
03667 
03668         if( tmp_range.empty() ) continue;
03669 
03670         // OK, we'll be sending this tag
03671         all_tags.push_back( *tag_it );
03672         tag_ranges.push_back( Range() );
03673         tag_ranges.back().swap( tmp_range );
03674     }
03675 
03676     return MB_SUCCESS;
03677 }
03678 
03679 ErrorCode ParallelComm::unpack_tags( unsigned char*& buff_ptr,
03680                                      std::vector< EntityHandle >& entities,
03681                                      const bool /*store_remote_handles*/,
03682                                      const int /*from_proc*/,
03683                                      const MPI_Op* const mpi_op )
03684 {
03685     // Tags
03686     // Get all the tags
03687     // For dense tags, compute size assuming all entities have that tag
03688     // For sparse tags, get number of entities w/ that tag to compute size
03689 
03690     ErrorCode result;
03691 
03692     int num_tags;
03693     UNPACK_INT( buff_ptr, num_tags );
03694     std::vector< const void* > var_len_vals;
03695     std::vector< unsigned char > dum_vals;
03696     std::vector< EntityHandle > dum_ehvals;
03697 
03698     for( int i = 0; i < num_tags; i++ )
03699     {
03700         // Tag handle
03701         Tag tag_handle;
03702 
03703         // Size, data type
03704         int tag_size, tag_data_type, tag_type;
03705         UNPACK_INT( buff_ptr, tag_size );
03706         UNPACK_INT( buff_ptr, tag_type );
03707         UNPACK_INT( buff_ptr, tag_data_type );
03708 
03709         // Default value
03710         int def_val_size;
03711         UNPACK_INT( buff_ptr, def_val_size );
03712         void* def_val_ptr = NULL;
03713         if( def_val_size )
03714         {
03715             def_val_ptr = buff_ptr;
03716             buff_ptr += def_val_size;
03717             UPC( tag_size, " void" );
03718         }
03719 
03720         // Name
03721         int name_len;
03722         UNPACK_INT( buff_ptr, name_len );
03723         std::string tag_name( reinterpret_cast< char* >( buff_ptr ), name_len );
03724         buff_ptr += name_len;
03725         UPC( 64, " chars" );
03726 
03727         myDebug->tprintf( 4, "Unpacking tag %s\n", tag_name.c_str() );
03728 
03729         // Create the tag
03730         if( tag_size == MB_VARIABLE_LENGTH )
03731             result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type, tag_handle,
03732                                              MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03733         else
03734             result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType)tag_data_type, tag_handle,
03735                                              MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03736         if( MB_SUCCESS != result ) return result;
03737 
03738         // Get handles and convert to local handles
03739         int num_ents;
03740         UNPACK_INT( buff_ptr, num_ents );
03741         std::vector< EntityHandle > dum_ents( num_ents );
03742         UNPACK_EH( buff_ptr, &dum_ents[0], num_ents );
03743 
03744         // In this case handles are indices into new entity range; need to convert
03745         // to local handles
03746         result = get_local_handles( &dum_ents[0], num_ents, entities );MB_CHK_SET_ERR( result, "Unable to convert to local handles" );
03747 
03748         // If it's a handle type, also convert tag vals in-place in buffer
03749         if( MB_TYPE_HANDLE == tag_type )
03750         {
03751             dum_ehvals.resize( num_ents );
03752             UNPACK_EH( buff_ptr, &dum_ehvals[0], num_ents );
03753             result = get_local_handles( &dum_ehvals[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for tag vals" );
03754         }
03755 
03756         DataType data_type;
03757         mbImpl->tag_get_data_type( tag_handle, data_type );
03758         int type_size = TagInfo::size_from_data_type( data_type );
03759 
03760         if( !dum_ents.empty() )
03761         {
03762             if( tag_size == MB_VARIABLE_LENGTH )
03763             {
03764                 // Be careful of alignment here. If the integers are aligned
03765                 // in the buffer, we can use them directly. Otherwise we must
03766                 // copy them.
03767                 std::vector< int > var_lengths( num_ents );
03768                 UNPACK_INTS( buff_ptr, &var_lengths[0], num_ents );
03769                 UPC( sizeof( int ) * num_ents, " void" );
03770 
03771                 // Get pointers into buffer for each tag value
03772                 var_len_vals.resize( num_ents );
03773                 for( std::vector< EntityHandle >::size_type j = 0; j < (std::vector< EntityHandle >::size_type)num_ents;
03774                      j++ )
03775                 {
03776                     var_len_vals[j] = buff_ptr;
03777                     buff_ptr += var_lengths[j] * type_size;
03778                     UPC( var_lengths[j], " void" );
03779                 }
03780                 result =
03781                     mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents, &var_len_vals[0], &var_lengths[0] );MB_CHK_SET_ERR( result, "Failed to set tag data when unpacking variable-length tag" );
03782             }
03783             else
03784             {
03785                 // Get existing values of dst tag
03786                 dum_vals.resize( tag_size * num_ents );
03787                 if( mpi_op )
03788                 {
03789                     int tag_length;
03790                     result = mbImpl->tag_get_length( tag_handle, tag_length );MB_CHK_SET_ERR( result, "Failed to get tag length" );
03791                     result = mbImpl->tag_get_data( tag_handle, &dum_ents[0], num_ents, &dum_vals[0] );MB_CHK_SET_ERR( result, "Failed to get existing value of dst tag on entities" );
03792                     result = reduce_void( tag_data_type, *mpi_op, tag_length * num_ents, &dum_vals[0], buff_ptr );MB_CHK_SET_ERR( result, "Failed to perform mpi op on dst tags" );
03793                 }
03794                 result = mbImpl->tag_set_data( tag_handle, &dum_ents[0], num_ents, buff_ptr );MB_CHK_SET_ERR( result, "Failed to set range-based tag data when unpacking tag" );
03795                 buff_ptr += num_ents * tag_size;
03796                 UPC( num_ents * tag_size, " void" );
03797             }
03798         }
03799     }
03800 
03801     myDebug->tprintf( 4, "Done unpacking tags.\n" );
03802 
03803     return MB_SUCCESS;
03804 }
03805 
03806 template < class T >
03807 T LAND( const T& arg1, const T& arg2 )
03808 {
03809     return arg1 && arg2;
03810 }
03811 template < class T >
03812 T LOR( const T& arg1, const T& arg2 )
03813 {
03814     return arg1 || arg2;
03815 }
03816 template < class T >
03817 T LXOR( const T& arg1, const T& arg2 )
03818 {
03819     return ( ( arg1 && !arg2 ) || ( !arg1 && arg2 ) );
03820 }
03821 template < class T >
03822 T MAX( const T& arg1, const T& arg2 )
03823 {
03824     return ( arg1 > arg2 ? arg1 : arg2 );
03825 }
03826 template < class T >
03827 T MIN( const T& arg1, const T& arg2 )
03828 {
03829     return ( arg1 < arg2 ? arg1 : arg2 );
03830 }
03831 template < class T >
03832 T ADD( const T& arg1, const T& arg2 )
03833 {
03834     return arg1 + arg2;
03835 }
03836 template < class T >
03837 T MULT( const T& arg1, const T& arg2 )
03838 {
03839     return arg1 * arg2;
03840 }
03841 
03842 template < class T >
03843 ErrorCode ParallelComm::reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals )
03844 {
03845     T* old_tmp = reinterpret_cast< T* >( old_vals );
03846     // T *new_tmp = reinterpret_cast<T*>(new_vals);
03847     // new vals pointer needs to be aligned , some compilers will optimize and will shift
03848 
03849     std::vector< T > new_values;
03850     new_values.resize( num_ents );
03851     memcpy( &new_values[0], new_vals, num_ents * sizeof( T ) );
03852     T* new_tmp = &new_values[0];
03853 
03854     if( mpi_op == MPI_SUM )
03855         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD< T > );
03856     else if( mpi_op == MPI_PROD )
03857         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT< T > );
03858     else if( mpi_op == MPI_MAX )
03859         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX< T > );
03860     else if( mpi_op == MPI_MIN )
03861         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN< T > );
03862     else if( mpi_op == MPI_LAND )
03863         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND< T > );
03864     else if( mpi_op == MPI_LOR )
03865         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR< T > );
03866     else if( mpi_op == MPI_LXOR )
03867         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR< T > );
03868     else if( mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR )
03869     {
03870         std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
03871         return MB_FAILURE;
03872     }
03873     else if( mpi_op != MPI_OP_NULL )
03874     {
03875         std::cerr << "Unknown MPI operation type." << std::endl;
03876         return MB_TYPE_OUT_OF_RANGE;
03877     }
03878 
03879     // copy now the result back where it should be
03880     memcpy( new_vals, new_tmp, num_ents * sizeof( T ) );
03881     std::vector< T >().swap( new_values );  // way to release allocated vector
03882 
03883     return MB_SUCCESS;
03884 }
03885 
03886 ErrorCode ParallelComm::reduce_void( int tag_data_type,
03887                                      const MPI_Op mpi_op,
03888                                      int num_ents,
03889                                      void* old_vals,
03890                                      void* new_vals )
03891 {
03892     ErrorCode result;
03893     switch( tag_data_type )
03894     {
03895         case MB_TYPE_INTEGER:
03896             result = reduce< int >( mpi_op, num_ents, old_vals, new_vals );
03897             break;
03898         case MB_TYPE_DOUBLE:
03899             result = reduce< double >( mpi_op, num_ents, old_vals, new_vals );
03900             break;
03901         case MB_TYPE_BIT:
03902             result = reduce< unsigned char >( mpi_op, num_ents, old_vals, new_vals );
03903             break;
03904         default:
03905             result = MB_SUCCESS;
03906             break;
03907     }
03908 
03909     return result;
03910 }
03911 
03912 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, int resolve_dim, int shared_dim, const Tag* id_tag )
03913 {
03914     ErrorCode result;
03915     Range proc_ents;
03916 
03917     // Check for structured mesh, and do it differently if it is
03918     ScdInterface* scdi;
03919     result = mbImpl->query_interface( scdi );
03920     if( scdi )
03921     {
03922         result = scdi->tag_shared_vertices( this, this_set );
03923         if( MB_SUCCESS == result )
03924         {
03925             myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
03926             return result;
03927         }
03928     }
03929 
03930     if( 0 == this_set )
03931     {
03932         // Get the entities in the partition sets
03933         for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
03934         {
03935             Range tmp_ents;
03936             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
03937             if( MB_SUCCESS != result ) return result;
03938             proc_ents.merge( tmp_ents );
03939         }
03940     }
03941     else
03942     {
03943         result = mbImpl->get_entities_by_handle( this_set, proc_ents, true );
03944         if( MB_SUCCESS != result ) return result;
03945     }
03946 
03947     // Resolve dim is maximal dim of entities in proc_ents
03948     if( -1 == resolve_dim )
03949     {
03950         if( !proc_ents.empty() ) resolve_dim = mbImpl->dimension_from_handle( *proc_ents.rbegin() );
03951     }
03952 
03953     // proc_ents should all be of same dimension
03954     if( resolve_dim > shared_dim &&
03955         mbImpl->dimension_from_handle( *proc_ents.rbegin() ) != mbImpl->dimension_from_handle( *proc_ents.begin() ) )
03956     {
03957         Range::iterator lower = proc_ents.lower_bound( CN::TypeDimensionMap[0].first ),
03958                         upper = proc_ents.upper_bound( CN::TypeDimensionMap[resolve_dim - 1].second );
03959         proc_ents.erase( lower, upper );
03960     }
03961 
03962     // Must call even if we don't have any entities, to make sure
03963     // collective comm'n works
03964     return resolve_shared_ents( this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag );
03965 }
03966 
03967 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set,
03968                                              Range& proc_ents,
03969                                              int resolve_dim,
03970                                              int shared_dim,
03971                                              Range* skin_ents,
03972                                              const Tag* id_tag )
03973 {
03974 #ifdef MOAB_HAVE_MPE
03975     if( myDebug->get_verbosity() == 2 )
03976     {
03977         define_mpe();
03978         MPE_Log_event( RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents." );
03979     }
03980 #endif
03981 
03982     ErrorCode result;
03983     myDebug->tprintf( 1, "Resolving shared entities.\n" );
03984 
03985     if( resolve_dim < shared_dim )
03986     {
03987         MB_SET_ERR( MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones" );
03988     }
03989 
03990     if( -1 == shared_dim )
03991     {
03992         if( !proc_ents.empty() )
03993             shared_dim = mbImpl->dimension_from_handle( *proc_ents.begin() ) - 1;
03994         else if( resolve_dim == 3 )
03995             shared_dim = 2;
03996     }
03997     int max_global_resolve_dim = -1;
03998     int err = MPI_Allreduce( &resolve_dim, &max_global_resolve_dim, 1, MPI_INT, MPI_MAX, proc_config().proc_comm() );
03999     if( MPI_SUCCESS != err )
04000     {
04001         MB_SET_ERR( MB_FAILURE, "Unable to guess global resolve_dim" );
04002     }
04003     if( shared_dim < 0 || resolve_dim < 0 )
04004     {
04005         // MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
04006         resolve_dim = max_global_resolve_dim;
04007         shared_dim  = resolve_dim - 1;
04008     }
04009 
04010     if( resolve_dim < 0 || shared_dim < 0 ) return MB_SUCCESS;
04011     // no task has any mesh, get out
04012 
04013     // Get the skin entities by dimension
04014     Range tmp_skin_ents[4];
04015 
04016     // Get the entities to be skinned
04017     // Find the skin
04018     int skin_dim = resolve_dim - 1;
04019     if( !skin_ents )
04020     {
04021         skin_ents              = tmp_skin_ents;
04022         skin_ents[resolve_dim] = proc_ents;
04023         Skinner skinner( mbImpl );
04024         result =
04025             skinner.find_skin( this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim], NULL, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
04026         myDebug->tprintf( 1, "Found skin:   skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim );
04027         myDebug->tprintf( 3, "skin_ents[0].size(): %d skin_ents[1].size(): %d  \n", (int)skin_ents[0].size(),
04028                           (int)skin_ents[1].size() );
04029         // Get entities adjacent to skin ents from shared_dim down to zero
04030         for( int this_dim = skin_dim - 1; this_dim >= 0; this_dim-- )
04031         {
04032             result =
04033                 mbImpl->get_adjacencies( skin_ents[skin_dim], this_dim, true, skin_ents[this_dim], Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
04034 
04035             if( this_set && skin_dim == 2 && this_dim == 1 )
04036             {
04037                 result = mbImpl->add_entities( this_set, skin_ents[this_dim] );MB_CHK_ERR( result );
04038             }
04039         }
04040     }
04041     else if( skin_ents[resolve_dim].empty() )
04042         skin_ents[resolve_dim] = proc_ents;
04043 
04044     // Global id tag
04045     Tag gid_tag;
04046     if( id_tag )
04047         gid_tag = *id_tag;
04048     else
04049     {
04050         bool tag_created = false;
04051         int def_val      = -1;
04052         result = mbImpl->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
04053                                          &def_val, &tag_created );
04054         if( MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result )
04055         {
04056             MB_SET_ERR( result, "Failed to create/get gid tag handle" );
04057         }
04058         else if( tag_created )
04059         {
04060             // Just created it, so we need global ids
04061             result = assign_global_ids( this_set, skin_dim + 1, true, true, true );MB_CHK_SET_ERR( result, "Failed to assign global ids" );
04062         }
04063     }
04064 
04065     DataType tag_type;
04066     result = mbImpl->tag_get_data_type( gid_tag, tag_type );MB_CHK_SET_ERR( result, "Failed to get tag data type" );
04067     int bytes_per_tag;
04068     result = mbImpl->tag_get_bytes( gid_tag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed to get number of bytes per tag" );
04069     // On 64 bits, long and int are different
04070     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
04071 
04072     // Get gids for skin ents in a vector, to pass to gs
04073     std::vector< long > lgid_data( skin_ents[0].size() );
04074     // Size is either long or int
04075     // On 64 bit is 8 or 4
04076     if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
04077     {  // It is a special id tag
04078         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &lgid_data[0] );MB_CHK_SET_ERR( result, "Couldn't get gid tag for skin vertices" );
04079     }
04080     else if( 4 == bytes_per_tag )
04081     {  // Must be GLOBAL_ID tag or 32 bits ...
04082         std::vector< int > gid_data( lgid_data.size() );
04083         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &gid_data[0] );MB_CHK_SET_ERR( result, "Failed to get gid tag for skin vertices" );
04084         std::copy( gid_data.begin(), gid_data.end(), lgid_data.begin() );
04085     }
04086     else
04087     {
04088         // Not supported flag
04089         MB_SET_ERR( MB_FAILURE, "Unsupported id tag" );
04090     }
04091 
04092     // Put handles in vector for passing to gs setup
04093     std::vector< Ulong > handle_vec;  // Assumes that we can do conversion from Ulong to EntityHandle
04094     std::copy( skin_ents[0].begin(), skin_ents[0].end(), std::back_inserter( handle_vec ) );
04095 
04096 #ifdef MOAB_HAVE_MPE
04097     if( myDebug->get_verbosity() == 2 )
04098     {
04099         MPE_Log_event( SHAREDV_START, procConfig.proc_rank(), "Creating crystal router." );
04100     }
04101 #endif
04102 
04103     // Get a crystal router
04104     gs_data::crystal_data* cd = procConfig.crystal_router();
04105 
04106     /*
04107     // Get total number of entities; will overshoot highest global id, but
04108     // that's OK
04109     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
04110     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
04111     if (MB_SUCCESS != result)return result;
04112     int failure = MPI_Allreduce(num_local, num_total, 1,
04113     MPI_INT, MPI_SUM, procConfig.proc_comm());
04114     if (failure) {
04115       MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
04116     }
04117     */
04118     // Call gather-scatter to get shared ids & procs
04119     gs_data* gsd = new gs_data();
04120     // assert(sizeof(ulong_) == sizeof(EntityHandle));
04121     result = gsd->initialize( skin_ents[0].size(), &lgid_data[0], &handle_vec[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
04122 
04123     // Get shared proc tags
04124     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04125     result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
04126 
04127     // Load shared verts into a tuple, then sort by index
04128     TupleList shared_verts;
04129     shared_verts.initialize( 2, 0, 1, 0, skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 ) );
04130     shared_verts.enableWriteAccess();
04131 
04132     unsigned int i = 0, j = 0;
04133     for( unsigned int p = 0; p < gsd->nlinfo->_np; p++ )
04134         for( unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
04135         {
04136             shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
04137             shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
04138             shared_verts.vul_wr[j]  = gsd->nlinfo->_ulabels[j];
04139             j++;
04140             shared_verts.inc_n();
04141         }
04142 
04143     myDebug->tprintf( 3, " shared verts size %d \n", (int)shared_verts.get_n() );
04144 
04145     int max_size = skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 );
04146     moab::TupleList::buffer sort_buffer;
04147     sort_buffer.buffer_init( max_size );
04148     shared_verts.sort( 0, &sort_buffer );
04149     sort_buffer.reset();
04150 
04151     // Set sharing procs and handles tags on skin ents
04152     int maxp = -1;
04153     std::vector< int > sharing_procs( MAX_SHARING_PROCS );
04154     std::fill( sharing_procs.begin(), sharing_procs.end(), maxp );
04155     j = 0;
04156     i = 0;
04157 
04158     // Get ents shared by 1 or n procs
04159     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
04160     Range proc_verts;
04161     result = mbImpl->get_adjacencies( proc_ents, 0, false, proc_verts, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get proc_verts" );
04162 
04163     myDebug->print( 3, " resolve shared ents:  proc verts ", proc_verts );
04164     result = tag_shared_verts( shared_verts, skin_ents, proc_nvecs, proc_verts );MB_CHK_SET_ERR( result, "Failed to tag shared verts" );
04165 
04166 #ifdef MOAB_HAVE_MPE
04167     if( myDebug->get_verbosity() == 2 )
04168     {
04169         MPE_Log_event( SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts." );
04170     }
04171 #endif
04172 
04173     // Get entities shared by 1 or n procs
04174     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );MB_CHK_SET_ERR( result, "Failed to tag shared entities" );
04175 
04176     shared_verts.reset();
04177 
04178     if( myDebug->get_verbosity() > 0 )
04179     {
04180         for( std::map< std::vector< int >, std::vector< EntityHandle > >::const_iterator mit = proc_nvecs.begin();
04181              mit != proc_nvecs.end(); ++mit )
04182         {
04183             myDebug->tprintf( 1, "Iface: " );
04184             for( std::vector< int >::const_iterator vit = ( mit->first ).begin(); vit != ( mit->first ).end(); ++vit )
04185                 myDebug->printf( 1, " %d", *vit );
04186             myDebug->print( 1, "\n" );
04187         }
04188     }
04189 
04190     // Create the sets for each interface; store them as tags on
04191     // the interface instance
04192     Range iface_sets;
04193     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
04194 
04195     // Establish comm procs and buffers for them
04196     std::set< unsigned int > procs;
04197     result = get_interface_procs( procs, true );MB_CHK_SET_ERR( result, "Failed to get interface procs" );
04198 
04199 #ifndef NDEBUG
04200     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Shared handle check failed after interface vertex exchange" );
04201 #endif
04202 
04203     // Resolve shared entity remote handles; implemented in ghost cell exchange
04204     // code because it's so similar
04205     result = exchange_ghost_cells( -1, -1, 0, 0, true, true );MB_CHK_SET_ERR( result, "Failed to resolve shared entity remote handles" );
04206 
04207     // Now build parent/child links for interface sets
04208     result = create_iface_pc_links();MB_CHK_SET_ERR( result, "Failed to create interface parent/child links" );
04209 
04210     gsd->reset();
04211     delete gsd;
04212 
04213 #ifdef MOAB_HAVE_MPE
04214     if( myDebug->get_verbosity() == 2 )
04215     {
04216         MPE_Log_event( RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents." );
04217     }
04218 #endif
04219 
04220     // std::ostringstream ent_str;
04221     // ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
04222     // mbImpl->write_mesh(ent_str.str().c_str());
04223 
04224     // Done
04225     return result;
04226 }
04227 
04228 void ParallelComm::define_mpe()
04229 {
04230 #ifdef MOAB_HAVE_MPE
04231     if( myDebug->get_verbosity() == 2 )
04232     {
04233         // Define mpe states used for logging
04234         int success;
04235         MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END );
04236         MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END );
04237         MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END );
04238         MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END );
04239         MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END );
04240         MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END );
04241         MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END );
04242         success = MPE_Describe_state( IFACE_START, IFACE_END, "Resolve interface ents", "green" );
04243         assert( MPE_LOG_OK == success );
04244         success = MPE_Describe_state( GHOST_START, GHOST_END, "Exchange ghost ents", "red" );
04245         assert( MPE_LOG_OK == success );
04246         success = MPE_Describe_state( SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue" );
04247         assert( MPE_LOG_OK == success );
04248         success = MPE_Describe_state( RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple" );
04249         assert( MPE_LOG_OK == success );
04250         success = MPE_Describe_state( ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow" );
04251         assert( MPE_LOG_OK == success );
04252         success = MPE_Describe_state( RHANDLES_START, RHANDLES_END, "Remote handles", "cyan" );
04253         assert( MPE_LOG_OK == success );
04254         success = MPE_Describe_state( OWNED_START, OWNED_END, "Exchange owned ents", "black" );
04255         assert( MPE_LOG_OK == success );
04256     }
04257 #endif
04258 }
04259 
04260 ErrorCode ParallelComm::resolve_shared_ents( ParallelComm** pc,
04261                                              const unsigned int np,
04262                                              EntityHandle this_set,
04263                                              const int part_dim )
04264 {
04265     std::vector< Range > verts( np );
04266     int tot_verts = 0;
04267     unsigned int p, i, j, v;
04268     ErrorCode rval;
04269     for( p = 0; p < np; p++ )
04270     {
04271         Skinner skinner( pc[p]->get_moab() );
04272         Range part_ents, skin_ents;
04273         rval = pc[p]->get_moab()->get_entities_by_dimension( this_set, part_dim, part_ents );
04274         if( MB_SUCCESS != rval ) return rval;
04275         rval = skinner.find_skin( this_set, part_ents, false, skin_ents, 0, true, true, true );
04276         if( MB_SUCCESS != rval ) return rval;
04277         rval = pc[p]->get_moab()->get_adjacencies( skin_ents, 0, true, verts[p], Interface::UNION );
04278         if( MB_SUCCESS != rval ) return rval;
04279         tot_verts += verts[p].size();
04280     }
04281 
04282     TupleList shared_ents;
04283     shared_ents.initialize( 2, 0, 1, 0, tot_verts );
04284     shared_ents.enableWriteAccess();
04285 
04286     i = 0;
04287     j = 0;
04288     std::vector< int > gids;
04289     Range::iterator rit;
04290     Tag gid_tag;
04291     for( p = 0; p < np; p++ )
04292     {
04293         gid_tag = pc[p]->get_moab()->globalId_tag();
04294 
04295         gids.resize( verts[p].size() );
04296         rval = pc[p]->get_moab()->tag_get_data( gid_tag, verts[p], &gids[0] );
04297         if( MB_SUCCESS != rval ) return rval;
04298 
04299         for( v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit )
04300         {
04301             shared_ents.vi_wr[i++] = gids[v];
04302             shared_ents.vi_wr[i++] = p;
04303             shared_ents.vul_wr[j]  = *rit;
04304             j++;
04305             shared_ents.inc_n();
04306         }
04307     }
04308 
04309     moab::TupleList::buffer sort_buffer;
04310     sort_buffer.buffer_init( tot_verts );
04311     shared_ents.sort( 0, &sort_buffer );
04312     sort_buffer.reset();
04313 
04314     j = 0;
04315     i = 0;
04316     std::vector< EntityHandle > handles;
04317     std::vector< int > procs;
04318 
04319     while( i < shared_ents.get_n() )
04320     {
04321         handles.clear();
04322         procs.clear();
04323 
04324         // Count & accumulate sharing procs
04325         int this_gid = shared_ents.vi_rd[j];
04326         while( i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid )
04327         {
04328             j++;
04329             procs.push_back( shared_ents.vi_rd[j++] );
04330             handles.push_back( shared_ents.vul_rd[i++] );
04331         }
04332         if( 1 == procs.size() ) continue;
04333 
04334         for( v = 0; v < procs.size(); v++ )
04335         {
04336             rval = pc[procs[v]]->update_remote_data( handles[v], &procs[0], &handles[0], procs.size(),
04337                                                      ( procs[0] == (int)pc[procs[v]]->rank()
04338                                                            ? PSTATUS_INTERFACE
04339                                                            : ( PSTATUS_NOT_OWNED | PSTATUS_INTERFACE ) ) );
04340             if( MB_SUCCESS != rval ) return rval;
04341         }
04342     }
04343 
04344     std::set< unsigned int > psets;
04345     for( p = 0; p < np; p++ )
04346     {
04347         rval = pc[p]->create_interface_sets( this_set, part_dim, part_dim - 1 );
04348         if( MB_SUCCESS != rval ) return rval;
04349         // Establish comm procs and buffers for them
04350         psets.clear();
04351         rval = pc[p]->get_interface_procs( psets, true );
04352         if( MB_SUCCESS != rval ) return rval;
04353     }
04354 
04355     shared_ents.reset();
04356 
04357     return MB_SUCCESS;
04358 }
04359 
04360 ErrorCode ParallelComm::tag_iface_entities()
04361 {
04362     ErrorCode result = MB_SUCCESS;
04363     Range iface_ents, tmp_ents, rmv_ents;
04364     std::vector< unsigned char > pstat;
04365     unsigned char set_pstat;
04366     Range::iterator rit2;
04367     unsigned int i;
04368 
04369     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04370     {
04371         iface_ents.clear();
04372 
04373         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get interface set contents" );
04374         pstat.resize( iface_ents.size() );
04375         result = mbImpl->tag_get_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set entities" );
04376         result = mbImpl->tag_get_data( pstatus_tag(), &( *rit ), 1, &set_pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set" );
04377         rmv_ents.clear();
04378         for( rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++ )
04379         {
04380             if( !( pstat[i] & PSTATUS_INTERFACE ) )
04381             {
04382                 rmv_ents.insert( *rit2 );
04383                 pstat[i] = 0x0;
04384             }
04385         }
04386         result = mbImpl->remove_entities( *rit, rmv_ents );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
04387 
04388         if( !( set_pstat & PSTATUS_NOT_OWNED ) ) continue;
04389         // If we're here, we need to set the notowned status on (remaining) set contents
04390 
04391         // Remove rmv_ents from the contents list
04392         iface_ents = subtract( iface_ents, rmv_ents );
04393         // Compress the pstat vector (removing 0x0's)
04394         std::remove_if( pstat.begin(), pstat.end(),
04395                         std::bind( std::equal_to< unsigned char >(), std::placeholders::_1, 0x0 ) );
04396         // std::bind2nd(std::equal_to<unsigned char>(), 0x0));
04397         // https://stackoverflow.com/questions/32739018/a-replacement-for-stdbind2nd
04398         // Fold the not_owned bit into remaining values
04399         unsigned int sz = iface_ents.size();
04400         for( i = 0; i < sz; i++ )
04401             pstat[i] |= PSTATUS_NOT_OWNED;
04402 
04403         // Set the tag on the entities
04404         result = mbImpl->tag_set_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus values for interface set entities" );
04405     }
04406 
04407     return MB_SUCCESS;
04408 }
04409 
04410 ErrorCode ParallelComm::set_pstatus_entities( Range& pstatus_ents,
04411                                               unsigned char pstatus_val,
04412                                               bool lower_dim_ents,
04413                                               bool verts_too,
04414                                               int operation )
04415 {
04416     std::vector< unsigned char > pstatus_vals( pstatus_ents.size() );
04417     Range all_ents, *range_ptr = &pstatus_ents;
04418     ErrorCode result;
04419     if( lower_dim_ents || verts_too )
04420     {
04421         all_ents      = pstatus_ents;
04422         range_ptr     = &all_ents;
04423         int start_dim = ( lower_dim_ents ? mbImpl->dimension_from_handle( *pstatus_ents.rbegin() ) - 1 : 0 );
04424         for( ; start_dim >= 0; start_dim-- )
04425         {
04426             result = mbImpl->get_adjacencies( all_ents, start_dim, true, all_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get adjacencies for pstatus entities" );
04427         }
04428     }
04429     if( Interface::UNION == operation )
04430     {
04431         result = mbImpl->tag_get_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04432         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04433             pstatus_vals[i] |= pstatus_val;
04434     }
04435     else
04436     {
04437         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04438             pstatus_vals[i] = pstatus_val;
04439     }
04440     result = mbImpl->tag_set_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04441 
04442     return MB_SUCCESS;
04443 }
04444 
04445 ErrorCode ParallelComm::set_pstatus_entities( EntityHandle* pstatus_ents,
04446                                               int num_ents,
04447                                               unsigned char pstatus_val,
04448                                               bool lower_dim_ents,
04449                                               bool verts_too,
04450                                               int operation )
04451 {
04452     std::vector< unsigned char > pstatus_vals( num_ents );
04453     ErrorCode result;
04454     if( lower_dim_ents || verts_too )
04455     {
04456         // In this case, call the range-based version
04457         Range tmp_range;
04458         std::copy( pstatus_ents, pstatus_ents + num_ents, range_inserter( tmp_range ) );
04459         return set_pstatus_entities( tmp_range, pstatus_val, lower_dim_ents, verts_too, operation );
04460     }
04461 
04462     if( Interface::UNION == operation )
04463     {
04464         result = mbImpl->tag_get_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04465         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04466             pstatus_vals[i] |= pstatus_val;
04467     }
04468     else
04469     {
04470         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04471             pstatus_vals[i] = pstatus_val;
04472     }
04473     result = mbImpl->tag_set_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04474 
04475     return MB_SUCCESS;
04476 }
04477 
04478 static size_t choose_owner_idx( const std::vector< unsigned >& proc_list )
04479 {
04480     // Try to assign owners randomly so we get a good distribution,
04481     // (note: specifying the same seed on all procs is essential)
04482     unsigned val = 0;
04483     for( size_t i = 0; i < proc_list.size(); i++ )
04484         val ^= proc_list[i];
04485     srand( (int)( val ) );
04486     return rand() % proc_list.size();
04487 }
04488 
04489 struct set_tuple
04490 {
04491     unsigned idx;
04492     unsigned proc;
04493     EntityHandle handle;
04494     inline bool operator<( set_tuple other ) const
04495     {
04496         return ( idx == other.idx ) ? ( proc < other.proc ) : ( idx < other.idx );
04497     }
04498 };
04499 
04500 ErrorCode ParallelComm::resolve_shared_sets( EntityHandle file, const Tag* idtag )
04501 {
04502     // Find all sets with any of the following tags:
04503     const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME, MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
04504                                                  NEUMANN_SET_TAG_NAME, PARALLEL_PARTITION_TAG_NAME };
04505     int num_tags                             = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04506     Range candidate_sets;
04507     ErrorCode result = MB_FAILURE;
04508 
04509     // If we're not given an ID tag to use to globally identify sets,
04510     // then fall back to using known tag values
04511     if( !idtag )
04512     {
04513         Tag gid, tag;
04514         gid = mbImpl->globalId_tag();
04515         if( NULL != gid ) result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
04516         if( MB_SUCCESS == result )
04517         {
04518             for( int d = 0; d < 4; d++ )
04519             {
04520                 candidate_sets.clear();
04521                 const void* vals[] = { &d };
04522                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
04523                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, gid );
04524             }
04525         }
04526 
04527         for( int i = 1; i < num_tags; i++ )
04528         {
04529             result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
04530             if( MB_SUCCESS == result )
04531             {
04532                 candidate_sets.clear();
04533                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
04534                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, tag );
04535             }
04536         }
04537 
04538         return MB_SUCCESS;
04539     }
04540 
04541     for( int i = 0; i < num_tags; i++ )
04542     {
04543         Tag tag;
04544         result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag, MB_TAG_ANY );
04545         if( MB_SUCCESS != result ) continue;
04546 
04547         mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
04548     }
04549 
04550     // Find any additional sets that contain shared entities
04551     Range::iterator hint = candidate_sets.begin();
04552     Range all_sets;
04553     mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
04554     all_sets           = subtract( all_sets, candidate_sets );
04555     Range::iterator it = all_sets.begin();
04556     while( it != all_sets.end() )
04557     {
04558         Range contents;
04559         mbImpl->get_entities_by_handle( *it, contents );
04560         contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
04561         filter_pstatus( contents, PSTATUS_SHARED, PSTATUS_OR );
04562         if( contents.empty() )
04563         {
04564             ++it;
04565         }
04566         else
04567         {
04568             hint = candidate_sets.insert( hint, *it );
04569             it   = all_sets.erase( it );
04570         }
04571     }
04572 
04573     // Find any additionl sets that contain or are parents of potential shared sets
04574     Range prev_list = candidate_sets;
04575     while( !prev_list.empty() )
04576     {
04577         it = all_sets.begin();
04578         Range new_list;
04579         hint = new_list.begin();
04580         while( it != all_sets.end() )
04581         {
04582             Range contents;
04583             mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
04584             if( !intersect( prev_list, contents ).empty() )
04585             {
04586                 hint = new_list.insert( hint, *it );
04587                 it   = all_sets.erase( it );
04588             }
04589             else
04590             {
04591                 new_list.clear();
04592                 mbImpl->get_child_meshsets( *it, contents );
04593                 if( !intersect( prev_list, contents ).empty() )
04594                 {
04595                     hint = new_list.insert( hint, *it );
04596                     it   = all_sets.erase( it );
04597                 }
04598                 else
04599                 {
04600                     ++it;
04601                 }
04602             }
04603         }
04604 
04605         candidate_sets.merge( new_list );
04606         prev_list.swap( new_list );
04607     }
04608 
04609     return resolve_shared_sets( candidate_sets, *idtag );
04610 }
04611 
04612 #ifndef NDEBUG
04613 bool is_sorted_unique( std::vector< unsigned >& v )
04614 {
04615     for( size_t i = 1; i < v.size(); i++ )
04616         if( v[i - 1] >= v[i] ) return false;
04617     return true;
04618 }
04619 #endif
04620 
04621 ErrorCode ParallelComm::resolve_shared_sets( Range& sets, Tag idtag )
04622 {
04623     ErrorCode result;
04624     const unsigned rk = proc_config().proc_rank();
04625     MPI_Comm cm       = proc_config().proc_comm();
04626 
04627     // Build sharing list for all sets
04628 
04629     // Get ids for sets in a vector, to pass to gs
04630     std::vector< long > larray;  // Allocate sufficient space for longs
04631     std::vector< Ulong > handles;
04632     Range tmp_sets;
04633     // The id tag can be size 4 or size 8
04634     // Based on that, convert to int or to long, similarly to what we do
04635     // for resolving shared vertices;
04636     // This code must work on 32 bit too, where long is 4 bytes, also
04637     // so test first size 4, then we should be fine
04638     DataType tag_type;
04639     result = mbImpl->tag_get_data_type( idtag, tag_type );MB_CHK_SET_ERR( result, "Failed getting tag data type" );
04640     int bytes_per_tag;
04641     result = mbImpl->tag_get_bytes( idtag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed getting number of bytes per tag" );
04642     // On 64 bits, long and int are different
04643     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
04644 
04645     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
04646     {
04647         if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
04648         {  // It is a special id tag
04649             long dum;
04650             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04651             if( MB_SUCCESS == result )
04652             {
04653                 larray.push_back( dum );
04654                 handles.push_back( *rit );
04655                 tmp_sets.insert( tmp_sets.end(), *rit );
04656             }
04657         }
04658         else if( 4 == bytes_per_tag )
04659         {  // Must be GLOBAL_ID tag or MATERIAL_ID, etc
04660             int dum;
04661             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04662             if( MB_SUCCESS == result )
04663             {
04664                 larray.push_back( dum );
04665                 handles.push_back( *rit );
04666                 tmp_sets.insert( tmp_sets.end(), *rit );
04667             }
04668         }
04669     }
04670 
04671     const size_t nsets = handles.size();
04672 
04673     // Get handle array for sets
04674     // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
04675     // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
04676 
04677     // Do communication of data
04678     gs_data::crystal_data* cd = procConfig.crystal_router();
04679     gs_data* gsd              = new gs_data();
04680     result                    = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
04681 
04682     // Convert from global IDs grouped by process rank to list
04683     // of <idx, rank> pairs so that we can sort primarily
04684     // by idx and secondarily by rank (we want lists of procs for each
04685     // idx, not lists if indices for each proc).
04686     size_t ntuple = 0;
04687     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04688         ntuple += gsd->nlinfo->_nshared[p];
04689     std::vector< set_tuple > tuples;
04690     tuples.reserve( ntuple );
04691     size_t j = 0;
04692     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04693     {
04694         for( unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
04695         {
04696             set_tuple t;
04697             t.idx    = gsd->nlinfo->_sh_ind[j];
04698             t.proc   = gsd->nlinfo->_target[p];
04699             t.handle = gsd->nlinfo->_ulabels[j];
04700             tuples.push_back( t );
04701             j++;
04702         }
04703     }
04704     std::sort( tuples.begin(), tuples.end() );
04705 
04706     // Release crystal router stuff
04707     gsd->reset();
04708     delete gsd;
04709 
04710     // Storing sharing data for each set
04711     size_t ti    = 0;
04712     unsigned idx = 0;
04713     std::vector< unsigned > procs;
04714     Range::iterator si = tmp_sets.begin();
04715     while( si != tmp_sets.end() && ti < tuples.size() )
04716     {
04717         assert( idx <= tuples[ti].idx );
04718         if( idx < tuples[ti].idx ) si += ( tuples[ti].idx - idx );
04719         idx = tuples[ti].idx;
04720 
04721         procs.clear();
04722         size_t ti_init = ti;
04723         while( ti < tuples.size() && tuples[ti].idx == idx )
04724         {
04725             procs.push_back( tuples[ti].proc );
04726             ++ti;
04727         }
04728         assert( is_sorted_unique( procs ) );
04729 
04730         result = sharedSetData->set_sharing_procs( *si, procs );
04731         if( MB_SUCCESS != result )
04732         {
04733             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04734             std::cerr.flush();
04735             MPI_Abort( cm, 1 );
04736         }
04737 
04738         // Add this proc to list of sharing procs in correct position
04739         // so that all procs select owner based on same list
04740         std::vector< unsigned >::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
04741         assert( it == procs.end() || *it > rk );
04742         procs.insert( it, rk );
04743         size_t owner_idx = choose_owner_idx( procs );
04744         EntityHandle owner_handle;
04745         if( procs[owner_idx] == rk )
04746             owner_handle = *si;
04747         else if( procs[owner_idx] > rk )
04748             owner_handle = tuples[ti_init + owner_idx - 1].handle;
04749         else
04750             owner_handle = tuples[ti_init + owner_idx].handle;
04751         result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
04752         if( MB_SUCCESS != result )
04753         {
04754             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04755             std::cerr.flush();
04756             MPI_Abort( cm, 1 );
04757         }
04758 
04759         ++si;
04760         ++idx;
04761     }
04762 
04763     return MB_SUCCESS;
04764 }
04765 // populate sets with ghost entities, if necessary
04766 ErrorCode ParallelComm::augment_default_sets_with_ghosts( EntityHandle file_set )
04767 {
04768     // gather all default sets we are interested in, material, neumann, etc
04769     // we will skip geometry sets, because they are not uniquely identified with their tag value
04770     // maybe we will add another tag, like category
04771 
04772     if( procConfig.proc_size() < 2 ) return MB_SUCCESS;  // no reason to stop by
04773     const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
04774                                                  PARALLEL_PARTITION_TAG_NAME };
04775 
04776     int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04777 
04778     Range* rangeSets = new Range[num_tags];
04779     Tag* tags        = new Tag[num_tags + 1];  // one extra for global id tag, which is an int, so far
04780 
04781     int my_rank   = rank();
04782     int** tagVals = new int*[num_tags];
04783     for( int i = 0; i < num_tags; i++ )
04784         tagVals[i] = NULL;
04785     ErrorCode rval;
04786 
04787     // for each tag, we keep a local map, from the value to the actual set with that value
04788     // we assume that the tag values are unique, for a given set, otherwise we
04789     // do not know to which set to add the entity
04790 
04791     typedef std::map< int, EntityHandle > MVal;
04792     typedef std::map< int, EntityHandle >::iterator itMVal;
04793     MVal* localMaps = new MVal[num_tags];
04794 
04795     for( int i = 0; i < num_tags; i++ )
04796     {
04797 
04798         rval = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tags[i], MB_TAG_ANY );
04799         if( MB_SUCCESS != rval ) continue;
04800         rval = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &( tags[i] ), 0, 1, rangeSets[i],
04801                                                      Interface::UNION );MB_CHK_SET_ERR( rval, "can't get sets with a tag" );
04802 
04803         if( rangeSets[i].size() > 0 )
04804         {
04805             tagVals[i] = new int[rangeSets[i].size()];
04806             // fill up with the tag values
04807             rval = mbImpl->tag_get_data( tags[i], rangeSets[i], tagVals[i] );MB_CHK_SET_ERR( rval, "can't get set tag values" );
04808             // now for inverse mapping:
04809             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04810             {
04811                 localMaps[i][tagVals[i][j]] = rangeSets[i][j];
04812             }
04813         }
04814     }
04815     // get the global id tag too
04816     tags[num_tags] = mbImpl->globalId_tag();
04817 
04818     TupleList remoteEnts;
04819     // processor to send to, type of tag (0-mat,) tag value,     remote handle
04820     //                         1-diri
04821     //                         2-neum
04822     //                         3-part
04823     //
04824     int initialSize = (int)sharedEnts.size();  // estimate that on average, each shared ent
04825     // will be sent to one processor, for one tag
04826     // we will actually send only entities that are owned locally, and from those
04827     // only those that do have a special tag (material, neumann, etc)
04828     // if we exceed the capacity, we resize the tuple
04829     remoteEnts.initialize( 3, 0, 1, 0, initialSize );
04830     remoteEnts.enableWriteAccess();
04831 
04832     // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
04833     // belongs to one of the sets; if yes, create a tuple and append it
04834 
04835     std::set< EntityHandle > own_and_sha;
04836     int ir = 0, jr = 0;
04837     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04838     {
04839         // ghosted eh
04840         EntityHandle geh = *vit;
04841         if( own_and_sha.find( geh ) != own_and_sha.end() )  // already encountered
04842             continue;
04843         int procs[MAX_SHARING_PROCS];
04844         EntityHandle handles[MAX_SHARING_PROCS];
04845         int nprocs;
04846         unsigned char pstat;
04847         rval = get_sharing_data( geh, procs, handles, pstat, nprocs );
04848         if( rval != MB_SUCCESS )
04849         {
04850             for( int i = 0; i < num_tags; i++ )
04851                 delete[] tagVals[i];
04852             delete[] tagVals;
04853 
04854             MB_CHK_SET_ERR( rval, "Failed to get sharing data" );
04855         }
04856         if( pstat & PSTATUS_NOT_OWNED ) continue;  // we will send info only for entities that we own
04857         own_and_sha.insert( geh );
04858         for( int i = 0; i < num_tags; i++ )
04859         {
04860             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04861             {
04862                 EntityHandle specialSet = rangeSets[i][j];  // this set has tag i, value tagVals[i][j];
04863                 if( mbImpl->contains_entities( specialSet, &geh, 1 ) )
04864                 {
04865                     // this ghosted entity is in a special set, so form the tuple
04866                     // to send to the processors that do not own this
04867                     for( int k = 0; k < nprocs; k++ )
04868                     {
04869                         if( procs[k] != my_rank )
04870                         {
04871                             if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04872                             {
04873                                 // resize, so we do not overflow
04874                                 int oldSize = remoteEnts.get_max();
04875                                 // increase with 50% the capacity
04876                                 remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04877                             }
04878                             remoteEnts.vi_wr[ir++]  = procs[k];       // send to proc
04879                             remoteEnts.vi_wr[ir++]  = i;              // for the tags [i] (0-3)
04880                             remoteEnts.vi_wr[ir++]  = tagVals[i][j];  // actual value of the tag
04881                             remoteEnts.vul_wr[jr++] = handles[k];
04882                             remoteEnts.inc_n();
04883                         }
04884                     }
04885                 }
04886             }
04887         }
04888         // if the local entity has a global id, send it too, so we avoid
04889         // another "exchange_tags" for global id
04890         int gid;
04891         rval = mbImpl->tag_get_data( tags[num_tags], &geh, 1, &gid );MB_CHK_SET_ERR( rval, "Failed to get global id" );
04892         if( gid != 0 )
04893         {
04894             for( int k = 0; k < nprocs; k++ )
04895             {
04896                 if( procs[k] != my_rank )
04897                 {
04898                     if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04899                     {
04900                         // resize, so we do not overflow
04901                         int oldSize = remoteEnts.get_max();
04902                         // increase with 50% the capacity
04903                         remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04904                     }
04905                     remoteEnts.vi_wr[ir++]  = procs[k];  // send to proc
04906                     remoteEnts.vi_wr[ir++]  = num_tags;  // for the tags [j] (4)
04907                     remoteEnts.vi_wr[ir++]  = gid;       // actual value of the tag
04908                     remoteEnts.vul_wr[jr++] = handles[k];
04909                     remoteEnts.inc_n();
04910                 }
04911             }
04912         }
04913     }
04914 
04915 #ifndef NDEBUG
04916     if( my_rank == 1 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 1, before augment routing" );
04917     MPI_Barrier( procConfig.proc_comm() );
04918     int sentEnts = remoteEnts.get_n();
04919     assert( ( sentEnts == jr ) && ( 3 * sentEnts == ir ) );
04920 #endif
04921     // exchange the info now, and send to
04922     gs_data::crystal_data* cd = this->procConfig.crystal_router();
04923     // All communication happens here; no other mpi calls
04924     // Also, this is a collective call
04925     rval = cd->gs_transfer( 1, remoteEnts, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
04926 #ifndef NDEBUG
04927     if( my_rank == 0 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 0, after augment routing" );
04928     MPI_Barrier( procConfig.proc_comm() );
04929 #endif
04930 
04931     // now process the data received from other processors
04932     int received = remoteEnts.get_n();
04933     for( int i = 0; i < received; i++ )
04934     {
04935         // int from = ents_to_delete.vi_rd[i];
04936         EntityHandle geh = (EntityHandle)remoteEnts.vul_rd[i];
04937         int from_proc    = remoteEnts.vi_rd[3 * i];
04938         if( my_rank == from_proc )
04939             std::cout << " unexpected receive from my rank " << my_rank << " during augmenting with ghosts\n ";
04940         int tag_type = remoteEnts.vi_rd[3 * i + 1];
04941         assert( ( 0 <= tag_type ) && ( tag_type <= num_tags ) );
04942         int value = remoteEnts.vi_rd[3 * i + 2];
04943         if( tag_type == num_tags )
04944         {
04945             // it is global id
04946             rval = mbImpl->tag_set_data( tags[num_tags], &geh, 1, &value );MB_CHK_SET_ERR( rval, "Error in setting gid tag" );
04947         }
04948         else
04949         {
04950             // now, based on value and tag type, see if we have that value in the map
04951             MVal& lmap = localMaps[tag_type];
04952             itMVal itm = lmap.find( value );
04953             if( itm == lmap.end() )
04954             {
04955                 // the value was not found yet in the local map, so we have to create the set
04956                 EntityHandle newSet;
04957                 rval = mbImpl->create_meshset( MESHSET_SET, newSet );MB_CHK_SET_ERR( rval, "can't create new set" );
04958                 lmap[value] = newSet;
04959                 // set the tag value
04960                 rval = mbImpl->tag_set_data( tags[tag_type], &newSet, 1, &value );MB_CHK_SET_ERR( rval, "can't set tag for new set" );
04961 
04962                 // we also need to add the new created set to the file set, if not null
04963                 if( file_set )
04964                 {
04965                     rval = mbImpl->add_entities( file_set, &newSet, 1 );MB_CHK_SET_ERR( rval, "can't add new set to the file set" );
04966                 }
04967             }
04968             // add the entity to the set pointed to by the map
04969             rval = mbImpl->add_entities( lmap[value], &geh, 1 );MB_CHK_SET_ERR( rval, "can't add ghost ent to the set" );
04970         }
04971     }
04972 
04973     for( int i = 0; i < num_tags; i++ )
04974         delete[] tagVals[i];
04975     delete[] tagVals;
04976     delete[] rangeSets;
04977     delete[] tags;
04978     delete[] localMaps;
04979     return MB_SUCCESS;
04980 }
04981 ErrorCode ParallelComm::create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim )
04982 {
04983     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
04984 
04985     // Build up the list of shared entities
04986     int procs[MAX_SHARING_PROCS];
04987     EntityHandle handles[MAX_SHARING_PROCS];
04988     ErrorCode result;
04989     int nprocs;
04990     unsigned char pstat;
04991     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04992     {
04993         if( shared_dim != -1 && mbImpl->dimension_from_handle( *vit ) > shared_dim ) continue;
04994         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
04995         std::sort( procs, procs + nprocs );
04996         std::vector< int > tmp_procs( procs, procs + nprocs );
04997         assert( tmp_procs.size() != 2 );
04998         proc_nvecs[tmp_procs].push_back( *vit );
04999     }
05000 
05001     Skinner skinner( mbImpl );
05002     Range skin_ents[4];
05003     result = mbImpl->get_entities_by_dimension( this_set, resolve_dim, skin_ents[resolve_dim] );MB_CHK_SET_ERR( result, "Failed to get skin entities by dimension" );
05004     result =
05005         skinner.find_skin( this_set, skin_ents[resolve_dim], false, skin_ents[resolve_dim - 1], 0, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
05006     if( shared_dim > 1 )
05007     {
05008         result = mbImpl->get_adjacencies( skin_ents[resolve_dim - 1], resolve_dim - 2, true, skin_ents[resolve_dim - 2],
05009                                           Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
05010     }
05011 
05012     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );
05013 
05014     return create_interface_sets( proc_nvecs );
05015 }
05016 
05017 ErrorCode ParallelComm::create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
05018 {
05019     if( proc_nvecs.empty() ) return MB_SUCCESS;
05020 
05021     int proc_ids[MAX_SHARING_PROCS];
05022     EntityHandle proc_handles[MAX_SHARING_PROCS];
05023     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05024     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in create_interface_sets" );
05025     Range::iterator rit;
05026 
05027     // Create interface sets, tag them, and tag their contents with iface set tag
05028     std::vector< unsigned char > pstatus;
05029     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator vit = proc_nvecs.begin();
05030          vit != proc_nvecs.end(); ++vit )
05031     {
05032         // Create the set
05033         EntityHandle new_set;
05034         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
05035         interfaceSets.insert( new_set );
05036 
05037         // Add entities
05038         assert( !vit->second.empty() );
05039         result = mbImpl->add_entities( new_set, &( vit->second )[0], ( vit->second ).size() );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
05040         // Tag set with the proc rank(s)
05041         if( vit->first.size() == 1 )
05042         {
05043             assert( ( vit->first )[0] != (int)procConfig.proc_rank() );
05044             result = mbImpl->tag_set_data( shp_tag, &new_set, 1, &( vit->first )[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
05045             proc_handles[0] = 0;
05046             result          = mbImpl->tag_set_data( shh_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
05047         }
05048         else
05049         {
05050             // Pad tag data out to MAX_SHARING_PROCS with -1
05051             if( vit->first.size() > MAX_SHARING_PROCS )
05052             {
05053                 std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_set ) )
05054                           << ' ' << ID_FROM_HANDLE( new_set ) << " on process " << proc_config().proc_rank()
05055                           << std::endl;
05056                 std::cerr.flush();
05057                 MPI_Abort( proc_config().proc_comm(), 66 );
05058             }
05059             // assert(vit->first.size() <= MAX_SHARING_PROCS);
05060             std::copy( vit->first.begin(), vit->first.end(), proc_ids );
05061             std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
05062             result = mbImpl->tag_set_data( shps_tag, &new_set, 1, proc_ids );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
05063             unsigned int ind = std::find( proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank() ) - proc_ids;
05064             assert( ind < vit->first.size() );
05065             std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0 );
05066             proc_handles[ind] = new_set;
05067             result            = mbImpl->tag_set_data( shhs_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
05068         }
05069 
05070         // Get the owning proc, then set the pstatus tag on iface set
05071         int min_proc       = ( vit->first )[0];
05072         unsigned char pval = ( PSTATUS_SHARED | PSTATUS_INTERFACE );
05073         if( min_proc < (int)procConfig.proc_rank() ) pval |= PSTATUS_NOT_OWNED;
05074         if( vit->first.size() > 1 ) pval |= PSTATUS_MULTISHARED;
05075         result = mbImpl->tag_set_data( pstat_tag, &new_set, 1, &pval );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
05076 
05077         // Tag the vertices with the same thing
05078         pstatus.clear();
05079         std::vector< EntityHandle > verts;
05080         for( std::vector< EntityHandle >::iterator v2it = ( vit->second ).begin(); v2it != ( vit->second ).end();
05081              ++v2it )
05082             if( mbImpl->type_from_handle( *v2it ) == MBVERTEX ) verts.push_back( *v2it );
05083         pstatus.resize( verts.size(), pval );
05084         if( !verts.empty() )
05085         {
05086             result = mbImpl->tag_set_data( pstat_tag, &verts[0], verts.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set vertices with pstatus" );
05087         }
05088     }
05089 
05090     return MB_SUCCESS;
05091 }
05092 
05093 ErrorCode ParallelComm::create_iface_pc_links()
05094 {
05095     // Now that we've resolved the entities in the iface sets,
05096     // set parent/child links between the iface sets
05097 
05098     // First tag all entities in the iface sets
05099     Tag tmp_iface_tag;
05100     EntityHandle tmp_iface_set = 0;
05101     ErrorCode result           = mbImpl->tag_get_handle( "__tmp_iface", 1, MB_TYPE_HANDLE, tmp_iface_tag,
05102                                                          MB_TAG_DENSE | MB_TAG_CREAT, &tmp_iface_set );MB_CHK_SET_ERR( result, "Failed to create temporary interface set tag" );
05103 
05104     Range iface_ents;
05105     std::vector< EntityHandle > tag_vals;
05106     Range::iterator rit;
05107 
05108     for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
05109     {
05110         // tag entities with interface set
05111         iface_ents.clear();
05112         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in interface set" );
05113 
05114         if( iface_ents.empty() ) continue;
05115 
05116         tag_vals.resize( iface_ents.size() );
05117         std::fill( tag_vals.begin(), tag_vals.end(), *rit );
05118         result = mbImpl->tag_set_data( tmp_iface_tag, iface_ents, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to tag iface entities with interface set" );
05119     }
05120 
05121     // Now go back through interface sets and add parent/child links
05122     Range tmp_ents2;
05123     for( int d = 2; d >= 0; d-- )
05124     {
05125         for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
05126         {
05127             // Get entities on this interface
05128             iface_ents.clear();
05129             result = mbImpl->get_entities_by_handle( *rit, iface_ents, true );MB_CHK_SET_ERR( result, "Failed to get entities by handle" );
05130             if( iface_ents.empty() || mbImpl->dimension_from_handle( *iface_ents.rbegin() ) != d ) continue;
05131 
05132             // Get higher-dimensional entities and their interface sets
05133             result = mbImpl->get_adjacencies( &( *iface_ents.begin() ), 1, d + 1, false, tmp_ents2 );MB_CHK_SET_ERR( result, "Failed to get adjacencies for interface sets" );
05134             tag_vals.resize( tmp_ents2.size() );
05135             result = mbImpl->tag_get_data( tmp_iface_tag, tmp_ents2, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tmp iface tag for interface sets" );
05136 
05137             // Go through and for any on interface make it a parent
05138             EntityHandle last_set = 0;
05139             for( unsigned int i = 0; i < tag_vals.size(); i++ )
05140             {
05141                 if( tag_vals[i] && tag_vals[i] != last_set )
05142                 {
05143                     result = mbImpl->add_parent_child( tag_vals[i], *rit );MB_CHK_SET_ERR( result, "Failed to add parent/child link for interface set" );
05144                     last_set = tag_vals[i];
05145                 }
05146             }
05147         }
05148     }
05149 
05150     // Delete the temporary tag
05151     result = mbImpl->tag_delete( tmp_iface_tag );MB_CHK_SET_ERR( result, "Failed to delete tmp iface tag" );
05152 
05153     return MB_SUCCESS;
05154 }
05155 
05156 ErrorCode ParallelComm::get_proc_nvecs( int resolve_dim,
05157                                         int shared_dim,
05158                                         Range* skin_ents,
05159                                         std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
05160 {
05161     // Set sharing procs tags on other skin ents
05162     ErrorCode result;
05163     const EntityHandle* connect;
05164     int num_connect;
05165     std::set< int > sharing_procs;
05166     std::vector< EntityHandle > dum_connect;
05167     std::vector< int > sp_vec;
05168 
05169     for( int d = 3; d > 0; d-- )
05170     {
05171         if( resolve_dim == d ) continue;
05172 
05173         for( Range::iterator rit = skin_ents[d].begin(); rit != skin_ents[d].end(); ++rit )
05174         {
05175             // Get connectivity
05176             result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect );MB_CHK_SET_ERR( result, "Failed to get connectivity on non-vertex skin entities" );
05177 
05178             int op = ( resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT );
05179             result = get_sharing_data( connect, num_connect, sharing_procs, op );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_proc_nvecs" );
05180             if( sharing_procs.empty() ||
05181                 ( sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank() ) )
05182                 continue;
05183 
05184             // Need to specify sharing data correctly for entities or they will
05185             // end up in a different interface set than corresponding vertices
05186             if( sharing_procs.size() == 2 )
05187             {
05188                 std::set< int >::iterator it = sharing_procs.find( proc_config().proc_rank() );
05189                 assert( it != sharing_procs.end() );
05190                 sharing_procs.erase( it );
05191             }
05192 
05193             // Intersection is the owning proc(s) for this skin ent
05194             sp_vec.clear();
05195             std::copy( sharing_procs.begin(), sharing_procs.end(), std::back_inserter( sp_vec ) );
05196             assert( sp_vec.size() != 2 );
05197             proc_nvecs[sp_vec].push_back( *rit );
05198         }
05199     }
05200 
05201 #ifndef NDEBUG
05202     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05203     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05204          mit != proc_nvecs.end(); ++mit )
05205     {
05206         std::vector< EntityHandle > tmp_vec = ( mit->second );
05207         std::sort( tmp_vec.begin(), tmp_vec.end() );
05208         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05209         assert( vit == tmp_vec.end() );
05210     }
05211 #endif
05212 
05213     return MB_SUCCESS;
05214 }
05215 
05216 // Overloaded form of tag_shared_verts
05217 // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
05218 // Also will check for doubles in the list if the list is sorted
05219 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents,
05220                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
05221                                           Range& /*proc_verts*/,
05222                                           unsigned int i_extra )
05223 {
05224     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05225     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
05226 
05227     unsigned int j = 0, i = 0;
05228     std::vector< int > sharing_procs, sharing_procs2, tag_procs;
05229     std::vector< EntityHandle > sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
05230     std::vector< unsigned char > pstatus;
05231 
05232     // Were on tuple j/2
05233     if( i_extra ) i += i_extra;
05234     while( j < 2 * shared_ents.get_n() )
05235     {
05236         // Count & accumulate sharing procs
05237         EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
05238         int other_proc = -1;
05239         while( j < 2 * shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent )
05240         {
05241             j++;
05242             // Shouldn't have same proc
05243             assert( shared_ents.vi_rd[i] != (int)procConfig.proc_rank() );
05244             // Grab the remote data if its not a dublicate
05245             if( shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc )
05246             {
05247                 assert( 0 != shared_ents.vul_rd[j] );
05248                 sharing_procs.push_back( shared_ents.vi_rd[i] );
05249                 sharing_handles.push_back( shared_ents.vul_rd[j] );
05250             }
05251             other_proc = shared_ents.vi_rd[i];
05252             other_ent  = shared_ents.vul_rd[j];
05253             j++;
05254             i += 1 + i_extra;
05255         }
05256 
05257         if( sharing_procs.size() > 1 )
05258         {
05259             // Add current proc/handle to list
05260             sharing_procs.push_back( procConfig.proc_rank() );
05261             sharing_handles.push_back( this_ent );
05262 
05263             // Sort sharing_procs and sharing_handles such that
05264             // sharing_procs is in ascending order. Use temporary
05265             // lists and binary search to re-order sharing_handles.
05266             sharing_procs2 = sharing_procs;
05267             std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05268             sharing_handles2.resize( sharing_handles.size() );
05269             for( size_t k = 0; k < sharing_handles.size(); k++ )
05270             {
05271                 size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05272                              sharing_procs2.begin();
05273                 sharing_handles2[idx] = sharing_handles[k];
05274             }
05275             sharing_procs.swap( sharing_procs2 );
05276             sharing_handles.swap( sharing_handles2 );
05277         }
05278 
05279         assert( sharing_procs.size() != 2 );
05280         proc_nvecs[sharing_procs].push_back( this_ent );
05281 
05282         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05283         if( sharing_procs.size() == 1 )
05284         {
05285             tag_procs.push_back( sharing_procs[0] );
05286             tag_lhandles.push_back( this_ent );
05287             tag_rhandles.push_back( sharing_handles[0] );
05288             pstatus.push_back( share_flag );
05289         }
05290         else
05291         {
05292             // Pad lists
05293             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05294             if( sharing_procs.size() > MAX_SHARING_PROCS )
05295             {
05296                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05297                           << proc_config().proc_rank() << std::endl;
05298                 std::cerr.flush();
05299                 MPI_Abort( proc_config().proc_comm(), 66 );
05300             }
05301             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05302             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05303             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05304             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05305             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05306             sharedEnts.insert( this_ent );
05307         }
05308 
05309         // Reset sharing proc(s) tags
05310         sharing_procs.clear();
05311         sharing_handles.clear();
05312     }
05313 
05314     if( !tag_procs.empty() )
05315     {
05316         result = mbImpl->tag_set_data( shp_tag, &tag_lhandles[0], tag_procs.size(), &tag_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05317         result = mbImpl->tag_set_data( shh_tag, &tag_lhandles[0], tag_procs.size(), &tag_rhandles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05318         result = mbImpl->tag_set_data( pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05319         for( std::vector< EntityHandle >::iterator vvt = tag_lhandles.begin(); vvt != tag_lhandles.end(); vvt++ )
05320             sharedEnts.insert( *vvt );
05321     }
05322 
05323 #ifndef NDEBUG
05324     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05325     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05326          mit != proc_nvecs.end(); ++mit )
05327     {
05328         std::vector< EntityHandle > tmp_vec = ( mit->second );
05329         std::sort( tmp_vec.begin(), tmp_vec.end() );
05330         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05331         assert( vit == tmp_vec.end() );
05332     }
05333 #endif
05334 
05335     return MB_SUCCESS;
05336 }
05337 
05338 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents,
05339                                           Range* skin_ents,
05340                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
05341                                           Range& /*proc_verts*/ )
05342 {
05343     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05344     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
05345 
05346     unsigned int j = 0, i = 0;
05347     std::vector< int > sharing_procs, sharing_procs2;
05348     std::vector< EntityHandle > sharing_handles, sharing_handles2, skin_verts( skin_ents[0].size() );
05349     for( Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); ++rit, i++ )
05350         skin_verts[i] = *rit;
05351     i = 0;
05352 
05353     while( j < 2 * shared_ents.get_n() )
05354     {
05355         // Count & accumulate sharing procs
05356         int this_idx          = shared_ents.vi_rd[j];
05357         EntityHandle this_ent = skin_verts[this_idx];
05358         while( j < 2 * shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx )
05359         {
05360             j++;
05361             // Shouldn't have same proc
05362             assert( shared_ents.vi_rd[j] != (int)procConfig.proc_rank() );
05363             sharing_procs.push_back( shared_ents.vi_rd[j++] );
05364             sharing_handles.push_back( shared_ents.vul_rd[i++] );
05365         }
05366 
05367         if( sharing_procs.size() > 1 )
05368         {
05369             // Add current proc/handle to list
05370             sharing_procs.push_back( procConfig.proc_rank() );
05371             sharing_handles.push_back( this_ent );
05372         }
05373 
05374         // Sort sharing_procs and sharing_handles such that
05375         // sharing_procs is in ascending order. Use temporary
05376         // lists and binary search to re-order sharing_handles.
05377         sharing_procs2 = sharing_procs;
05378         std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05379         sharing_handles2.resize( sharing_handles.size() );
05380         for( size_t k = 0; k < sharing_handles.size(); k++ )
05381         {
05382             size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05383                          sharing_procs2.begin();
05384             sharing_handles2[idx] = sharing_handles[k];
05385         }
05386         sharing_procs.swap( sharing_procs2 );
05387         sharing_handles.swap( sharing_handles2 );
05388 
05389         assert( sharing_procs.size() != 2 );
05390         proc_nvecs[sharing_procs].push_back( this_ent );
05391 
05392         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05393         if( sharing_procs.size() == 1 )
05394         {
05395             result = mbImpl->tag_set_data( shp_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05396             result = mbImpl->tag_set_data( shh_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05397             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &share_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05398             sharedEnts.insert( this_ent );
05399         }
05400         else
05401         {
05402             // Pad lists
05403             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05404             if( sharing_procs.size() > MAX_SHARING_PROCS )
05405             {
05406                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05407                           << proc_config().proc_rank() << std::endl;
05408                 std::cerr.flush();
05409                 MPI_Abort( proc_config().proc_comm(), 66 );
05410             }
05411             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05412             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05413             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05414             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05415             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05416             sharedEnts.insert( this_ent );
05417         }
05418 
05419         // Reset sharing proc(s) tags
05420         sharing_procs.clear();
05421         sharing_handles.clear();
05422     }
05423 
05424 #ifndef NDEBUG
05425     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05426     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05427          mit != proc_nvecs.end(); ++mit )
05428     {
05429         std::vector< EntityHandle > tmp_vec = ( mit->second );
05430         std::sort( tmp_vec.begin(), tmp_vec.end() );
05431         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05432         assert( vit == tmp_vec.end() );
05433     }
05434 #endif
05435 
05436     return MB_SUCCESS;
05437 }
05438 
05439 //! Get processors with which this processor communicates; sets are sorted by processor
05440 ErrorCode ParallelComm::get_interface_procs( std::set< unsigned int >& procs_set, bool get_buffs )
05441 {
05442     // Make sure the sharing procs vector is empty
05443     procs_set.clear();
05444 
05445     // Pre-load vector of single-proc tag values
05446     unsigned int i, j;
05447     std::vector< int > iface_proc( interfaceSets.size() );
05448     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), interfaceSets, &iface_proc[0] );MB_CHK_SET_ERR( result, "Failed to get iface_proc for iface sets" );
05449 
05450     // Get sharing procs either from single-proc vector or by getting
05451     // multi-proc tag value
05452     int tmp_iface_procs[MAX_SHARING_PROCS];
05453     std::fill( tmp_iface_procs, tmp_iface_procs + MAX_SHARING_PROCS, -1 );
05454     Range::iterator rit;
05455     for( rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); ++rit, i++ )
05456     {
05457         if( -1 != iface_proc[i] )
05458         {
05459             assert( iface_proc[i] != (int)procConfig.proc_rank() );
05460             procs_set.insert( (unsigned int)iface_proc[i] );
05461         }
05462         else
05463         {
05464             // Get the sharing_procs tag
05465             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, tmp_iface_procs );MB_CHK_SET_ERR( result, "Failed to get iface_procs for iface set" );
05466             for( j = 0; j < MAX_SHARING_PROCS; j++ )
05467             {
05468                 if( -1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank() )
05469                     procs_set.insert( (unsigned int)tmp_iface_procs[j] );
05470                 else if( -1 == tmp_iface_procs[j] )
05471                 {
05472                     std::fill( tmp_iface_procs, tmp_iface_procs + j, -1 );
05473                     break;
05474                 }
05475             }
05476         }
05477     }
05478 
05479     if( get_buffs )
05480     {
05481         for( std::set< unsigned int >::iterator sit = procs_set.begin(); sit != procs_set.end(); ++sit )
05482             get_buffers( *sit );
05483     }
05484 
05485     return MB_SUCCESS;
05486 }
05487 
05488 ErrorCode ParallelComm::get_pstatus( EntityHandle entity, unsigned char& pstatus_val )
05489 {
05490     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstatus_val );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05491     return result;
05492 }
05493 
05494 ErrorCode ParallelComm::get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents )
05495 {
05496     Range ents;
05497     ErrorCode result;
05498 
05499     if( -1 == dim )
05500     {
05501         result = mbImpl->get_entities_by_handle( 0, ents );MB_CHK_SET_ERR( result, "Failed to get all entities" );
05502     }
05503     else
05504     {
05505         result = mbImpl->get_entities_by_dimension( 0, dim, ents );MB_CHK_SET_ERR( result, "Failed to get entities of dimension " << dim );
05506     }
05507 
05508     std::vector< unsigned char > pstatus( ents.size() );
05509     result = mbImpl->tag_get_data( pstatus_tag(), ents, &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05510     Range::iterator rit = ents.begin();
05511     int i               = 0;
05512     if( pstatus_val )
05513     {
05514         for( ; rit != ents.end(); i++, ++rit )
05515         {
05516             if( pstatus[i] & pstatus_val && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05517                 pstatus_ents.insert( *rit );
05518         }
05519     }
05520     else
05521     {
05522         for( ; rit != ents.end(); i++, ++rit )
05523         {
05524             if( !pstatus[i] && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05525                 pstatus_ents.insert( *rit );
05526         }
05527     }
05528 
05529     return MB_SUCCESS;
05530 }
05531 
05532 ErrorCode ParallelComm::check_global_ids( EntityHandle this_set,
05533                                           const int dimension,
05534                                           const int start_id,
05535                                           const bool largest_dim_only,
05536                                           const bool parallel,
05537                                           const bool owned_only )
05538 {
05539     // Global id tag
05540     Tag gid_tag = mbImpl->globalId_tag();
05541     int def_val = -1;
05542     Range dum_range;
05543 
05544     void* tag_ptr    = &def_val;
05545     ErrorCode result = mbImpl->get_entities_by_type_and_tag( this_set, MBVERTEX, &gid_tag, &tag_ptr, 1, dum_range );MB_CHK_SET_ERR( result, "Failed to get entities by MBVERTEX type and gid tag" );
05546 
05547     if( !dum_range.empty() )
05548     {
05549         // Just created it, so we need global ids
05550         result = assign_global_ids( this_set, dimension, start_id, largest_dim_only, parallel, owned_only );MB_CHK_SET_ERR( result, "Failed assigning global ids" );
05551     }
05552 
05553     return MB_SUCCESS;
05554 }
05555 
05556 bool ParallelComm::is_iface_proc( EntityHandle this_set, int to_proc )
05557 {
05558     int sharing_procs[MAX_SHARING_PROCS];
05559     std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05560     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &this_set, 1, sharing_procs );
05561     if( MB_SUCCESS == result && to_proc == sharing_procs[0] ) return true;
05562 
05563     result = mbImpl->tag_get_data( sharedps_tag(), &this_set, 1, sharing_procs );
05564     if( MB_SUCCESS != result ) return false;
05565 
05566     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
05567     {
05568         if( to_proc == sharing_procs[i] )
05569             return true;
05570         else if( -1 == sharing_procs[i] )
05571             return false;
05572     }
05573 
05574     return false;
05575 }
05576 
05577 ErrorCode ParallelComm::filter_pstatus( Range& ents,
05578                                         unsigned char pstat,
05579                                         unsigned char op,
05580                                         int to_proc,
05581                                         Range* returned_ents )
05582 {
05583     Range tmp_ents;
05584 
05585     // assert(!ents.empty());
05586     if( ents.empty() )
05587     {
05588         if( returned_ents ) returned_ents->clear();
05589         return MB_SUCCESS;
05590     }
05591 
05592     // Put into tmp_ents any entities which are not owned locally or
05593     // who are already shared with to_proc
05594     std::vector< unsigned char > shared_flags( ents.size() ), shared_flags2;
05595     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), ents, &shared_flags[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus flag" );
05596     Range::const_iterator rit, hint = tmp_ents.begin();
05597     ;
05598     int i;
05599     if( op == PSTATUS_OR )
05600     {
05601         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05602         {
05603             if( ( ( shared_flags[i] & ~pstat ) ^ shared_flags[i] ) & pstat )
05604             {
05605                 hint = tmp_ents.insert( hint, *rit );
05606                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05607             }
05608         }
05609     }
05610     else if( op == PSTATUS_AND )
05611     {
05612         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05613         {
05614             if( ( shared_flags[i] & pstat ) == pstat )
05615             {
05616                 hint = tmp_ents.insert( hint, *rit );
05617                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05618             }
05619         }
05620     }
05621     else if( op == PSTATUS_NOT )
05622     {
05623         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05624         {
05625             if( !( shared_flags[i] & pstat ) )
05626             {
05627                 hint = tmp_ents.insert( hint, *rit );
05628                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05629             }
05630         }
05631     }
05632     else
05633     {
05634         assert( false );
05635         return MB_FAILURE;
05636     }
05637 
05638     if( -1 != to_proc )
05639     {
05640         int sharing_procs[MAX_SHARING_PROCS];
05641         std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05642         Range tmp_ents2;
05643         hint = tmp_ents2.begin();
05644 
05645         for( rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); ++rit, i++ )
05646         {
05647             // We need to check sharing procs
05648             if( shared_flags2[i] & PSTATUS_MULTISHARED )
05649             {
05650                 result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag" );
05651                 assert( -1 != sharing_procs[0] );
05652                 for( unsigned int j = 0; j < MAX_SHARING_PROCS; j++ )
05653                 {
05654                     // If to_proc shares this entity, add it to list
05655                     if( sharing_procs[j] == to_proc )
05656                     {
05657                         hint = tmp_ents2.insert( hint, *rit );
05658                     }
05659                     else if( -1 == sharing_procs[j] )
05660                         break;
05661 
05662                     sharing_procs[j] = -1;
05663                 }
05664             }
05665             else if( shared_flags2[i] & PSTATUS_SHARED )
05666             {
05667                 result = mbImpl->tag_get_data( sharedp_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag" );
05668                 assert( -1 != sharing_procs[0] );
05669                 if( sharing_procs[0] == to_proc ) hint = tmp_ents2.insert( hint, *rit );
05670                 sharing_procs[0] = -1;
05671             }
05672             else
05673                 assert( "should never get here" && false );
05674         }
05675 
05676         tmp_ents.swap( tmp_ents2 );
05677     }
05678 
05679     if( returned_ents )
05680         returned_ents->swap( tmp_ents );
05681     else
05682         ents.swap( tmp_ents );
05683 
05684     return MB_SUCCESS;
05685 }
05686 
05687 ErrorCode ParallelComm::exchange_ghost_cells( int ghost_dim,
05688                                               int bridge_dim,
05689                                               int num_layers,
05690                                               int addl_ents,
05691                                               bool store_remote_handles,
05692                                               bool wait_all,
05693                                               EntityHandle* file_set )
05694 {
05695 #ifdef MOAB_HAVE_MPE
05696     if( myDebug->get_verbosity() == 2 )
05697     {
05698         if( !num_layers )
05699             MPE_Log_event( IFACE_START, procConfig.proc_rank(), "Starting interface exchange." );
05700         else
05701             MPE_Log_event( GHOST_START, procConfig.proc_rank(), "Starting ghost exchange." );
05702     }
05703 #endif
05704 
05705     myDebug->tprintf( 1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers );
05706     if( myDebug->get_verbosity() == 4 )
05707     {
05708         msgs.clear();
05709         msgs.reserve( MAX_SHARING_PROCS );
05710     }
05711 
05712     // If we're only finding out about existing ents, we have to be storing
05713     // remote handles too
05714     assert( num_layers > 0 || store_remote_handles );
05715 
05716     const bool is_iface = !num_layers;
05717 
05718     // Get the b-dimensional interface(s) with with_proc, where b = bridge_dim
05719 
05720     int success;
05721     ErrorCode result = MB_SUCCESS;
05722     int incoming1 = 0, incoming2 = 0;
05723 
05724     reset_all_buffers();
05725 
05726     // When this function is called, buffProcs should already have any
05727     // communicating procs
05728 
05729     //===========================================
05730     // Post ghost irecv's for ghost entities from all communicating procs
05731     //===========================================
05732 #ifdef MOAB_HAVE_MPE
05733     if( myDebug->get_verbosity() == 2 )
05734     {
05735         MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." );
05736     }
05737 #endif
05738 
05739     // Index reqs the same as buffer/sharing procs indices
05740     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL ),
05741         recv_remoteh_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05742     std::vector< unsigned int >::iterator proc_it;
05743     int ind, p;
05744     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05745     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
05746     {
05747         incoming1++;
05748         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
05749                            MB_MESG_ENTS_SIZE, incoming1 );
05750         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
05751                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
05752         if( success != MPI_SUCCESS )
05753         {
05754             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" );
05755         }
05756     }
05757 
05758     //===========================================
05759     // Get entities to be sent to neighbors
05760     //===========================================
05761     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
05762     TupleList entprocs;
05763     int dum_ack_buff;
05764     result = get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents, allsent, entprocs );MB_CHK_SET_ERR( result, "get_sent_ents failed" );
05765 
05766     // augment file set with the entities to be sent
05767     // we might have created new entities if addl_ents>0, edges and/or faces
05768     if( addl_ents > 0 && file_set && !allsent.empty() )
05769     {
05770         result = mbImpl->add_entities( *file_set, allsent );MB_CHK_SET_ERR( result, "Failed to add new sub-entities to set" );
05771     }
05772     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
05773                       (unsigned long)allsent.size() );
05774 
05775     //===========================================
05776     // Pack and send ents from this proc to others
05777     //===========================================
05778     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05779     {
05780         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
05781                           (unsigned long)sent_ents[p].size() );
05782 
05783         // Reserve space on front for size and for initial buff size
05784         localOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05785 
05786         // Entities
05787         result = pack_entities( sent_ents[p], localOwnedBuffs[p], store_remote_handles, buffProcs[p], is_iface,
05788                                 &entprocs, &allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
05789 
05790         if( myDebug->get_verbosity() == 4 )
05791         {
05792             msgs.resize( msgs.size() + 1 );
05793             msgs.back() = new Buffer( *localOwnedBuffs[p] );
05794         }
05795 
05796         // Send the buffer (size stored in front in send_buffer)
05797         result = send_buffer( *proc_it, localOwnedBuffs[p], MB_MESG_ENTS_SIZE, sendReqs[3 * p],
05798                               recv_ent_reqs[3 * p + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
05799                               ( !is_iface && store_remote_handles ?  // this used for ghosting only
05800                                     localOwnedBuffs[p]
05801                                                                   : NULL ),
05802                               &recv_remoteh_reqs[3 * p], &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
05803     }
05804 
05805     entprocs.reset();
05806 
05807     //===========================================
05808     // Receive/unpack new entities
05809     //===========================================
05810     // Number of incoming messages for ghosts is the number of procs we
05811     // communicate with; for iface, it's the number of those with lower rank
05812     MPI_Status status;
05813     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
05814     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
05815     std::vector< std::vector< int > > L1p( buffProcs.size() );
05816     std::vector< EntityHandle > L2hloc, L2hrem;
05817     std::vector< unsigned int > L2p;
05818     std::vector< EntityHandle > new_ents;
05819 
05820     while( incoming1 )
05821     {
05822         // Wait for all recvs of ghost ents before proceeding to sending remote handles,
05823         // b/c some procs may have sent to a 3rd proc ents owned by me;
05824         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
05825 
05826         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
05827         if( MPI_SUCCESS != success )
05828         {
05829             MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" );
05830         }
05831 
05832         PRINT_DEBUG_RECD( status );
05833 
05834         // OK, received something; decrement incoming counter
05835         incoming1--;
05836         bool done = false;
05837 
05838         // In case ind is for ack, we need index of one before it
05839         unsigned int base_ind = 3 * ( ind / 3 );
05840         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
05841                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
05842                               sendReqs[base_ind + 2], done,
05843                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
05844                               MB_MESG_REMOTEH_SIZE,  // maybe base_ind+1?
05845                               &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
05846 
05847         if( done )
05848         {
05849             if( myDebug->get_verbosity() == 4 )
05850             {
05851                 msgs.resize( msgs.size() + 1 );
05852                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
05853             }
05854 
05855             // Message completely received - process buffer that was sent
05856             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
05857             result = unpack_entities( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, ind / 3, is_iface,
05858                                       L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );
05859             if( MB_SUCCESS != result )
05860             {
05861                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
05862                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
05863                 return result;
05864             }
05865 
05866             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05867             {
05868                 // Post irecv's for remote handles from new proc; shouldn't be iface,
05869                 // since we know about all procs we share with
05870                 assert( !is_iface );
05871                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05872                 for( unsigned int i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
05873                 {
05874                     localOwnedBuffs[i / 3]->reset_buffer();
05875                     incoming2++;
05876                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
05877                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
05878                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
05879                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
05880                                          &recv_remoteh_reqs[i] );
05881                     if( success != MPI_SUCCESS )
05882                     {
05883                         MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
05884                     }
05885                 }
05886                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05887                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05888             }
05889         }
05890     }
05891 
05892     // Add requests for any new addl procs
05893     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05894     {
05895         // Shouldn't get here...
05896         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in ghost exchange" );
05897     }
05898 
05899 #ifdef MOAB_HAVE_MPE
05900     if( myDebug->get_verbosity() == 2 )
05901     {
05902         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." );
05903     }
05904 #endif
05905 
05906     if( is_iface )
05907     {
05908         // Need to check over entities I sent and make sure I received
05909         // handles for them from all expected procs; if not, need to clean
05910         // them up
05911         result = check_clean_iface( allsent );
05912         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05913 
05914         // Now set the shared/interface tag on non-vertex entities on interface
05915         result = tag_iface_entities();MB_CHK_SET_ERR( result, "Failed to tag iface entities" );
05916 
05917 #ifndef NDEBUG
05918         result = check_sent_ents( allsent );
05919         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05920         result = check_all_shared_handles( true );
05921         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05922 #endif
05923 
05924 #ifdef MOAB_HAVE_MPE
05925         if( myDebug->get_verbosity() == 2 )
05926         {
05927             MPE_Log_event( IFACE_END, procConfig.proc_rank(), "Ending interface exchange." );
05928         }
05929 #endif
05930 
05931         //===========================================
05932         // Wait if requested
05933         //===========================================
05934         if( wait_all )
05935         {
05936             if( myDebug->get_verbosity() == 5 )
05937             {
05938                 success = MPI_Barrier( procConfig.proc_comm() );
05939             }
05940             else
05941             {
05942                 MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05943                 success = MPI_Waitall( 3 * buffProcs.size(), &recv_ent_reqs[0], mult_status );
05944                 if( MPI_SUCCESS != success )
05945                 {
05946                     MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" );
05947                 }
05948                 success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05949                 if( MPI_SUCCESS != success )
05950                 {
05951                     MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" );
05952                 }
05953                 /*success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
05954                 if (MPI_SUCCESS != success) {
05955                   MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
05956                 }*/
05957             }
05958         }
05959 
05960         myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
05961         myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==true \n" );
05962 
05963         return MB_SUCCESS;
05964     }
05965 
05966     // we still need to wait on sendReqs, if they are not fulfilled yet
05967     if( wait_all )
05968     {
05969         if( myDebug->get_verbosity() == 5 )
05970         {
05971             success = MPI_Barrier( procConfig.proc_comm() );
05972         }
05973         else
05974         {
05975             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05976             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05977             if( MPI_SUCCESS != success )
05978             {
05979                 MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" );
05980             }
05981         }
05982     }
05983     //===========================================
05984     // Send local handles for new ghosts to owner, then add
05985     // those to ghost list for that owner
05986     //===========================================
05987     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05988     {
05989 
05990         // Reserve space on front for size and for initial buff size
05991         remoteOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05992 
05993         result = pack_remote_handles( L1hloc[p], L1hrem[p], L1p[p], *proc_it, remoteOwnedBuffs[p] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
05994         remoteOwnedBuffs[p]->set_stored_size();
05995 
05996         if( myDebug->get_verbosity() == 4 )
05997         {
05998             msgs.resize( msgs.size() + 1 );
05999             msgs.back() = new Buffer( *remoteOwnedBuffs[p] );
06000         }
06001         result = send_buffer( buffProcs[p], remoteOwnedBuffs[p], MB_MESG_REMOTEH_SIZE, sendReqs[3 * p],
06002                               recv_remoteh_reqs[3 * p + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
06003     }
06004 
06005     //===========================================
06006     // Process remote handles of my ghosteds
06007     //===========================================
06008     while( incoming2 )
06009     {
06010         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
06011         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
06012         if( MPI_SUCCESS != success )
06013         {
06014             MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" );
06015         }
06016 
06017         // OK, received something; decrement incoming counter
06018         incoming2--;
06019 
06020         PRINT_DEBUG_RECD( status );
06021 
06022         bool done             = false;
06023         unsigned int base_ind = 3 * ( ind / 3 );
06024         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
06025                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
06026                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
06027         if( done )
06028         {
06029             // Incoming remote handles
06030             if( myDebug->get_verbosity() == 4 )
06031             {
06032                 msgs.resize( msgs.size() + 1 );
06033                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
06034             }
06035             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
06036             result =
06037                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
06038         }
06039     }
06040 
06041 #ifdef MOAB_HAVE_MPE
06042     if( myDebug->get_verbosity() == 2 )
06043     {
06044         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
06045         MPE_Log_event( GHOST_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
06046     }
06047 #endif
06048 
06049     //===========================================
06050     // Wait if requested
06051     //===========================================
06052     if( wait_all )
06053     {
06054         if( myDebug->get_verbosity() == 5 )
06055         {
06056             success = MPI_Barrier( procConfig.proc_comm() );
06057         }
06058         else
06059         {
06060             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
06061             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
06062             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
06063         }
06064         if( MPI_SUCCESS != success )
06065         {
06066             MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" );
06067         }
06068     }
06069 
06070 #ifndef NDEBUG
06071     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
06072     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Failed check on all shared handles" );
06073 #endif
06074 
06075     if( file_set && !new_ents.empty() )
06076     {
06077         result = mbImpl->add_entities( *file_set, &new_ents[0], new_ents.size() );MB_CHK_SET_ERR( result, "Failed to add new entities to set" );
06078     }
06079 
06080     myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
06081     myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==false \n" );
06082 
06083     return MB_SUCCESS;
06084 }
06085 
06086 ErrorCode ParallelComm::send_buffer( const unsigned int to_proc,
06087                                      Buffer* send_buff,
06088                                      int mesg_tag,
06089                                      MPI_Request& send_req,
06090                                      MPI_Request& ack_req,
06091                                      int* ack_buff,
06092                                      int& this_incoming,
06093                                      int next_mesg_tag,
06094                                      Buffer* next_recv_buff,
06095                                      MPI_Request* next_recv_req,
06096                                      int* next_incoming )
06097 {
06098     ErrorCode result = MB_SUCCESS;
06099     int success;
06100 
06101     // If small message, post recv for remote handle message
06102     if( send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff )
06103     {
06104         ( *next_incoming )++;
06105         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, next_mesg_tag,
06106                            *next_incoming );
06107         success = MPI_Irecv( next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, to_proc, next_mesg_tag,
06108                              procConfig.proc_comm(), next_recv_req );
06109         if( success != MPI_SUCCESS )
06110         {
06111             MB_SET_ERR( MB_FAILURE, "Failed to post irecv for next message in ghost exchange" );
06112         }
06113     }
06114     // If large, we'll need an ack before sending the rest
06115     else if( send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
06116     {
06117         this_incoming++;
06118         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff, sizeof( int ), mesg_tag - 1,
06119                            this_incoming );
06120         success = MPI_Irecv( (void*)ack_buff, sizeof( int ), MPI_UNSIGNED_CHAR, to_proc, mesg_tag - 1,
06121                              procConfig.proc_comm(), &ack_req );
06122         if( success != MPI_SUCCESS )
06123         {
06124             MB_SET_ERR( MB_FAILURE, "Failed to post irecv for entity ack in ghost exchange" );
06125         }
06126     }
06127 
06128     // Send the buffer
06129     PRINT_DEBUG_ISEND( procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
06130                        std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ) );
06131     assert( 0 <= send_buff->get_stored_size() && send_buff->get_stored_size() <= (int)send_buff->alloc_size );
06132     success = MPI_Isend( send_buff->mem_ptr, std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ),
06133                          MPI_UNSIGNED_CHAR, to_proc, mesg_tag, procConfig.proc_comm(), &send_req );
06134     if( success != MPI_SUCCESS ) return MB_FAILURE;
06135 
06136     return result;
06137 }
06138 
06139 ErrorCode ParallelComm::recv_buffer( int mesg_tag_expected,
06140                                      const MPI_Status& mpi_status,
06141                                      Buffer* recv_buff,
06142                                      MPI_Request& recv_req,
06143                                      MPI_Request& /*ack_recvd_req*/,
06144                                      int& this_incoming,
06145                                      Buffer* send_buff,
06146                                      MPI_Request& send_req,
06147                                      MPI_Request& sent_ack_req,
06148                                      bool& done,
06149                                      Buffer* next_buff,
06150                                      int next_tag,
06151                                      MPI_Request* next_req,
06152                                      int* next_incoming )
06153 {
06154     // Process a received message; if there will be more coming,
06155     // post a receive for 2nd part then send an ack message
06156     int from_proc = mpi_status.MPI_SOURCE;
06157     int success;
06158 
06159     // Set the buff_ptr on the recv_buffer; needs to point beyond any
06160     // valid data already in the buffer
06161     recv_buff->reset_ptr( std::min( recv_buff->get_stored_size(), (int)recv_buff->alloc_size ) );
06162 
06163     if( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
06164     {
06165         // 1st message & large - allocate buffer, post irecv for 2nd message,
06166         // then send ack
06167         recv_buff->reserve( recv_buff->get_stored_size() );
06168         assert( recv_buff->alloc_size > INITIAL_BUFF_SIZE );
06169 
06170         // Will expect a 2nd message
06171         this_incoming++;
06172 
06173         PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
06174                            recv_buff->get_stored_size() - INITIAL_BUFF_SIZE, mesg_tag_expected + 1, this_incoming );
06175         success = MPI_Irecv( recv_buff->mem_ptr + INITIAL_BUFF_SIZE, recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
06176                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &recv_req );
06177         if( success != MPI_SUCCESS )
06178         {
06179             MB_SET_ERR( MB_FAILURE, "Failed to post 2nd iRecv in ghost exchange" );
06180         }
06181 
06182         // Send ack, doesn't matter what data actually is
06183         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr, mesg_tag_expected - 1,
06184                            sizeof( int ) );
06185         success = MPI_Isend( recv_buff->mem_ptr, sizeof( int ), MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected - 1,
06186                              procConfig.proc_comm(), &sent_ack_req );
06187         if( success != MPI_SUCCESS )
06188         {
06189             MB_SET_ERR( MB_FAILURE, "Failed to send ack in ghost exchange" );
06190         }
06191     }
06192     else if( mpi_status.MPI_TAG == mesg_tag_expected - 1 )
06193     {
06194         // Got an ack back, send the 2nd half of message
06195 
06196         // Should be a large message if we got this
06197         assert( *( (size_t*)send_buff->mem_ptr ) > INITIAL_BUFF_SIZE );
06198 
06199         // Post irecv for next message, then send 2nd message
06200         if( next_buff )
06201         {
06202             // We'll expect a return message
06203             ( *next_incoming )++;
06204             PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, next_buff->mem_ptr, INITIAL_BUFF_SIZE, next_tag,
06205                                *next_incoming );
06206 
06207             success = MPI_Irecv( next_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc, next_tag,
06208                                  procConfig.proc_comm(), next_req );
06209             if( success != MPI_SUCCESS )
06210             {
06211                 MB_SET_ERR( MB_FAILURE, "Failed to post next irecv in ghost exchange" );
06212             }
06213         }
06214 
06215         // Send 2nd message
06216         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, send_buff->mem_ptr + INITIAL_BUFF_SIZE,
06217                            mesg_tag_expected + 1, send_buff->get_stored_size() - INITIAL_BUFF_SIZE );
06218 
06219         assert( send_buff->get_stored_size() - INITIAL_BUFF_SIZE < send_buff->alloc_size &&
06220                 0 <= send_buff->get_stored_size() );
06221         success = MPI_Isend( send_buff->mem_ptr + INITIAL_BUFF_SIZE, send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
06222                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &send_req );
06223         if( success != MPI_SUCCESS )
06224         {
06225             MB_SET_ERR( MB_FAILURE, "Failed to send 2nd message in ghost exchange" );
06226         }
06227     }
06228     else if( ( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE ) ||
06229              mpi_status.MPI_TAG == mesg_tag_expected + 1 )
06230     {
06231         // Message completely received - signal that we're done
06232         done = true;
06233     }
06234 
06235     return MB_SUCCESS;
06236 }
06237 
06238 struct ProcList
06239 {
06240     int procs[MAX_SHARING_PROCS];
06241 };
06242 static bool operator<( const ProcList& a, const ProcList& b )
06243 {
06244     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
06245     {
06246         if( a.procs[i] < b.procs[i] )
06247             return true;
06248         else if( b.procs[i] < a.procs[i] )
06249             return false;
06250         else if( a.procs[i] < 0 )
06251             return false;
06252     }
06253     return false;
06254 }
06255 
06256 ErrorCode ParallelComm::check_clean_iface( Range& allsent )
06257 {
06258     // allsent is all entities I think are on interface; go over them, looking
06259     // for zero-valued handles, and fix any I find
06260 
06261     // Keep lists of entities for which teh sharing data changed, grouped
06262     // by set of sharing procs.
06263     typedef std::map< ProcList, Range > procmap_t;
06264     procmap_t old_procs, new_procs;
06265 
06266     ErrorCode result = MB_SUCCESS;
06267     Range::iterator rit;
06268     Range::reverse_iterator rvit;
06269     unsigned char pstatus;
06270     int nump;
06271     ProcList sharedp;
06272     EntityHandle sharedh[MAX_SHARING_PROCS];
06273     for( rvit = allsent.rbegin(); rvit != allsent.rend(); ++rvit )
06274     {
06275         result = get_sharing_data( *rvit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
06276         assert( "Should be shared with at least one other proc" &&
06277                 ( nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank() ) );
06278         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
06279 
06280         // Look for first null handle in list
06281         int idx = std::find( sharedh, sharedh + nump, (EntityHandle)0 ) - sharedh;
06282         if( idx == nump ) continue;  // All handles are valid
06283 
06284         ProcList old_list( sharedp );
06285         std::sort( old_list.procs, old_list.procs + nump );
06286         old_procs[old_list].insert( *rvit );
06287 
06288         // Remove null handles and corresponding proc ranks from lists
06289         int new_nump       = idx;
06290         bool removed_owner = !idx;
06291         for( ++idx; idx < nump; ++idx )
06292         {
06293             if( sharedh[idx] )
06294             {
06295                 sharedh[new_nump]       = sharedh[idx];
06296                 sharedp.procs[new_nump] = sharedp.procs[idx];
06297                 ++new_nump;
06298             }
06299         }
06300         sharedp.procs[new_nump] = -1;
06301 
06302         if( removed_owner && new_nump > 1 )
06303         {
06304             // The proc that we choose as the entity owner isn't sharing the
06305             // entity (doesn't have a copy of it). We need to pick a different
06306             // owner. Choose the proc with lowest rank.
06307             idx = std::min_element( sharedp.procs, sharedp.procs + new_nump ) - sharedp.procs;
06308             std::swap( sharedp.procs[0], sharedp.procs[idx] );
06309             std::swap( sharedh[0], sharedh[idx] );
06310             if( sharedp.procs[0] == (int)proc_config().proc_rank() ) pstatus &= ~PSTATUS_NOT_OWNED;
06311         }
06312 
06313         result = set_sharing_data( *rvit, pstatus, nump, new_nump, sharedp.procs, sharedh );MB_CHK_SET_ERR( result, "Failed to set sharing data in check_clean_iface" );
06314 
06315         if( new_nump > 1 )
06316         {
06317             if( new_nump == 2 )
06318             {
06319                 if( sharedp.procs[1] != (int)proc_config().proc_rank() )
06320                 {
06321                     assert( sharedp.procs[0] == (int)proc_config().proc_rank() );
06322                     sharedp.procs[0] = sharedp.procs[1];
06323                 }
06324                 sharedp.procs[1] = -1;
06325             }
06326             else
06327             {
06328                 std::sort( sharedp.procs, sharedp.procs + new_nump );
06329             }
06330             new_procs[sharedp].insert( *rvit );
06331         }
06332     }
06333 
06334     if( old_procs.empty() )
06335     {
06336         assert( new_procs.empty() );
06337         return MB_SUCCESS;
06338     }
06339 
06340     // Update interface sets
06341     procmap_t::iterator pmit;
06342     // std::vector<unsigned char> pstatus_list;
06343     rit = interface_sets().begin();
06344     while( rit != interface_sets().end() )
06345     {
06346         result = get_sharing_data( *rit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data for interface set" );
06347         assert( nump != 2 );
06348         std::sort( sharedp.procs, sharedp.procs + nump );
06349         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
06350 
06351         pmit = old_procs.find( sharedp );
06352         if( pmit != old_procs.end() )
06353         {
06354             result = mbImpl->remove_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
06355         }
06356 
06357         pmit = new_procs.find( sharedp );
06358         if( pmit == new_procs.end() )
06359         {
06360             int count;
06361             result = mbImpl->get_number_entities_by_handle( *rit, count );MB_CHK_SET_ERR( result, "Failed to get number of entities in interface set" );
06362             if( !count )
06363             {
06364                 result = mbImpl->delete_entities( &*rit, 1 );MB_CHK_SET_ERR( result, "Failed to delete entities from interface set" );
06365                 rit = interface_sets().erase( rit );
06366             }
06367             else
06368             {
06369                 ++rit;
06370             }
06371         }
06372         else
06373         {
06374             result = mbImpl->add_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06375 
06376             // Remove those that we've processed so that we know which ones
06377             // are new.
06378             new_procs.erase( pmit );
06379             ++rit;
06380         }
06381     }
06382 
06383     // Create interface sets for new proc id combinations
06384     std::fill( sharedh, sharedh + MAX_SHARING_PROCS, 0 );
06385     for( pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit )
06386     {
06387         EntityHandle new_set;
06388         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
06389         interfaceSets.insert( new_set );
06390 
06391         // Add entities
06392         result = mbImpl->add_entities( new_set, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06393         // Tag set with the proc rank(s)
06394         assert( pmit->first.procs[0] >= 0 );
06395         pstatus = PSTATUS_SHARED | PSTATUS_INTERFACE;
06396         if( pmit->first.procs[1] == -1 )
06397         {
06398             int other = pmit->first.procs[0];
06399             assert( other != (int)procConfig.proc_rank() );
06400             result = mbImpl->tag_set_data( sharedp_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06401             sharedh[0] = 0;
06402             result     = mbImpl->tag_set_data( sharedh_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06403             if( other < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06404         }
06405         else
06406         {
06407             result = mbImpl->tag_set_data( sharedps_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06408             result = mbImpl->tag_set_data( sharedhs_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06409             pstatus |= PSTATUS_MULTISHARED;
06410             if( pmit->first.procs[0] < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06411         }
06412 
06413         result = mbImpl->tag_set_data( pstatus_tag(), &new_set, 1, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
06414 
06415         // Set pstatus on all interface entities in set
06416         result = mbImpl->tag_clear_data( pstatus_tag(), pmit->second, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface entities with pstatus" );
06417     }
06418 
06419     return MB_SUCCESS;
06420 }
06421 
06422 ErrorCode ParallelComm::set_sharing_data( EntityHandle ent,
06423                                           unsigned char pstatus,
06424                                           int old_nump,
06425                                           int new_nump,
06426                                           int* ps,
06427                                           EntityHandle* hs )
06428 {
06429     // If new nump is less than 3, the entity is no longer mutishared
06430     if( old_nump > 2 && ( pstatus & PSTATUS_MULTISHARED ) && new_nump < 3 )
06431     {
06432         // Unset multishared flag
06433         pstatus ^= PSTATUS_MULTISHARED;
06434     }
06435 
06436     // Check for consistency in input data
06437     // DBG
06438     /*  bool con1 = ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) ||
06439       (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)); bool con2 =
06440       (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED); bool con3 = (new_nump < 3 ||
06441       (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || (!(pstatus&PSTATUS_NOT_OWNED) && ps[0]
06442       == (int)rank())); std::cout<<"current rank = "<<rank()<<std::endl; std::cout<<"condition
06443       1::"<<con1<<std::endl; std::cout<<"condition 2::"<<con2<<std::endl; std::cout<<"condition
06444       3::"<<con3<<std::endl;*/
06445 
06446     // DBG
06447 
06448     assert( new_nump > 1 &&
06449             ( ( new_nump == 2 && pstatus & PSTATUS_SHARED &&
06450                 !( pstatus & PSTATUS_MULTISHARED ) ) ||  // If <= 2 must not be multishared
06451               ( new_nump > 2 && pstatus & PSTATUS_SHARED &&
06452                 pstatus & PSTATUS_MULTISHARED ) ) &&                         // If > 2 procs, must be multishared
06453             ( !( pstatus & PSTATUS_GHOST ) || pstatus & PSTATUS_SHARED ) &&  // If ghost, it must also be shared
06454             ( new_nump < 3 ||
06455               ( pstatus & PSTATUS_NOT_OWNED && ps[0] != (int)rank() ) ||      // I'm not owner and first proc not me
06456               ( !( pstatus & PSTATUS_NOT_OWNED ) && ps[0] == (int)rank() ) )  // I'm owner and first proc is me
06457     );
06458 
06459 #ifndef NDEBUG
06460     {
06461         // Check for duplicates in proc list
06462         std::set< unsigned int > dumprocs;
06463         int dp = 0;
06464         for( ; dp < old_nump && -1 != ps[dp]; dp++ )
06465             dumprocs.insert( ps[dp] );
06466         assert( dp == (int)dumprocs.size() );
06467     }
06468 #endif
06469 
06470     ErrorCode result;
06471     // Reset any old data that needs to be
06472     if( old_nump > 2 && new_nump < 3 )
06473     {
06474         // Need to remove multishared tags
06475         result = mbImpl->tag_delete_data( sharedps_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:1" );
06476         result = mbImpl->tag_delete_data( sharedhs_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:2" );
06477         //    if (new_nump < 2)
06478         //      pstatus = 0x0;
06479         //    else if (ps[0] != (int)proc_config().proc_rank())
06480         //      pstatus |= PSTATUS_NOT_OWNED;
06481     }
06482     else if( ( old_nump < 3 && new_nump > 2 ) || ( old_nump > 1 && new_nump == 1 ) )
06483     {
06484         // Reset sharedp and sharedh tags
06485         int tmp_p          = -1;
06486         EntityHandle tmp_h = 0;
06487         result             = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, &tmp_p );MB_CHK_SET_ERR( result, "set_sharing_data:3" );
06488         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, &tmp_h );MB_CHK_SET_ERR( result, "set_sharing_data:4" );
06489     }
06490 
06491     assert( "check for multishared/owner I'm first proc" &&
06492             ( !( pstatus & PSTATUS_MULTISHARED ) || ( pstatus & ( PSTATUS_NOT_OWNED | PSTATUS_GHOST ) ) ||
06493               ( ps[0] == (int)rank() ) ) &&
06494             "interface entities should have > 1 proc" && ( !( pstatus & PSTATUS_INTERFACE ) || new_nump > 1 ) &&
06495             "ghost entities should have > 1 proc" && ( !( pstatus & PSTATUS_GHOST ) || new_nump > 1 ) );
06496 
06497     // Now set new data
06498     if( new_nump > 2 )
06499     {
06500         result = mbImpl->tag_set_data( sharedps_tag(), &ent, 1, ps );MB_CHK_SET_ERR( result, "set_sharing_data:5" );
06501         result = mbImpl->tag_set_data( sharedhs_tag(), &ent, 1, hs );MB_CHK_SET_ERR( result, "set_sharing_data:6" );
06502     }
06503     else
06504     {
06505         unsigned int j = ( ps[0] == (int)procConfig.proc_rank() ? 1 : 0 );
06506         assert( -1 != ps[j] );
06507         result = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, ps + j );MB_CHK_SET_ERR( result, "set_sharing_data:7" );
06508         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, hs + j );MB_CHK_SET_ERR( result, "set_sharing_data:8" );
06509     }
06510 
06511     result = mbImpl->tag_set_data( pstatus_tag(), &ent, 1, &pstatus );MB_CHK_SET_ERR( result, "set_sharing_data:9" );
06512 
06513     if( old_nump > 1 && new_nump < 2 ) sharedEnts.erase( ent );
06514 
06515     return result;
06516 }
06517 
06518 ErrorCode ParallelComm::get_sent_ents( const bool is_iface,
06519                                        const int bridge_dim,
06520                                        const int ghost_dim,
06521                                        const int num_layers,
06522                                        const int addl_ents,
06523                                        Range* sent_ents,
06524                                        Range& allsent,
06525                                        TupleList& entprocs )
06526 {
06527     ErrorCode result;
06528     unsigned int ind;
06529     std::vector< unsigned int >::iterator proc_it;
06530     Range tmp_range;
06531 
06532     // Done in a separate loop over procs because sometimes later procs
06533     // need to add info to earlier procs' messages
06534     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06535     {
06536         if( !is_iface )
06537         {
06538             result =
06539                 get_ghosted_entities( bridge_dim, ghost_dim, buffProcs[ind], num_layers, addl_ents, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get ghost layers" );
06540         }
06541         else
06542         {
06543             result = get_iface_entities( buffProcs[ind], -1, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get interface layers" );
06544         }
06545 
06546         // Filter out entities already shared with destination
06547         tmp_range.clear();
06548         result = filter_pstatus( sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06549         if( !tmp_range.empty() ) sent_ents[ind] = subtract( sent_ents[ind], tmp_range );
06550 
06551         allsent.merge( sent_ents[ind] );
06552     }
06553 
06554     //===========================================
06555     // Need to get procs each entity is sent to
06556     //===========================================
06557 
06558     // Get the total # of proc/handle pairs
06559     int npairs = 0;
06560     for( ind = 0; ind < buffProcs.size(); ind++ )
06561         npairs += sent_ents[ind].size();
06562 
06563     // Allocate a TupleList of that size
06564     entprocs.initialize( 1, 0, 1, 0, npairs );
06565     entprocs.enableWriteAccess();
06566 
06567     // Put the proc/handle pairs in the list
06568     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06569     {
06570         for( Range::iterator rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); ++rit )
06571         {
06572             entprocs.vi_wr[entprocs.get_n()]  = *proc_it;
06573             entprocs.vul_wr[entprocs.get_n()] = *rit;
06574             entprocs.inc_n();
06575         }
06576     }
06577     // Sort by handle
06578     moab::TupleList::buffer sort_buffer;
06579     sort_buffer.buffer_init( npairs );
06580     entprocs.sort( 1, &sort_buffer );
06581 
06582     entprocs.disableWriteAccess();
06583     sort_buffer.reset();
06584 
06585     return MB_SUCCESS;
06586 }
06587 
06588 ErrorCode ParallelComm::exchange_ghost_cells( ParallelComm** pcs,
06589                                               unsigned int num_procs,
06590                                               int ghost_dim,
06591                                               int bridge_dim,
06592                                               int num_layers,
06593                                               int addl_ents,
06594                                               bool store_remote_handles,
06595                                               EntityHandle* file_sets )
06596 {
06597     // Static version of function, exchanging info through buffers rather
06598     // than through messages
06599 
06600     // If we're only finding out about existing ents, we have to be storing
06601     // remote handles too
06602     assert( num_layers > 0 || store_remote_handles );
06603 
06604     const bool is_iface = !num_layers;
06605 
06606     unsigned int ind;
06607     ParallelComm* pc;
06608     ErrorCode result = MB_SUCCESS;
06609 
06610     std::vector< Error* > ehs( num_procs );
06611     for( unsigned int i = 0; i < num_procs; i++ )
06612     {
06613         result = pcs[i]->get_moab()->query_interface( ehs[i] );
06614         assert( MB_SUCCESS == result );
06615     }
06616 
06617     // When this function is called, buffProcs should already have any
06618     // communicating procs
06619 
06620     //===========================================
06621     // Get entities to be sent to neighbors
06622     //===========================================
06623 
06624     // Done in a separate loop over procs because sometimes later procs
06625     // need to add info to earlier procs' messages
06626     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], allsent[MAX_SHARING_PROCS];
06627 
06628     //===========================================
06629     // Get entities to be sent to neighbors
06630     //===========================================
06631     TupleList entprocs[MAX_SHARING_PROCS];
06632     for( unsigned int p = 0; p < num_procs; p++ )
06633     {
06634         pc     = pcs[p];
06635         result = pc->get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents[p], allsent[p],
06636                                     entprocs[p] );MB_CHK_SET_ERR( result, "p = " << p << ", get_sent_ents failed" );
06637 
06638         //===========================================
06639         // Pack entities into buffers
06640         //===========================================
06641         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06642         {
06643             // Entities
06644             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06645             result = pc->pack_entities( sent_ents[p][ind], pc->localOwnedBuffs[ind], store_remote_handles,
06646                                         pc->buffProcs[ind], is_iface, &entprocs[p], &allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", packing entities failed" );
06647         }
06648 
06649         entprocs[p].reset();
06650     }
06651 
06652     //===========================================
06653     // Receive/unpack new entities
06654     //===========================================
06655     // Number of incoming messages for ghosts is the number of procs we
06656     // communicate with; for iface, it's the number of those with lower rank
06657     std::vector< std::vector< EntityHandle > > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
06658     std::vector< std::vector< int > > L1p[MAX_SHARING_PROCS];
06659     std::vector< EntityHandle > L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
06660     std::vector< unsigned int > L2p[MAX_SHARING_PROCS];
06661     std::vector< EntityHandle > new_ents[MAX_SHARING_PROCS];
06662 
06663     for( unsigned int p = 0; p < num_procs; p++ )
06664     {
06665         L1hloc[p].resize( pcs[p]->buffProcs.size() );
06666         L1hrem[p].resize( pcs[p]->buffProcs.size() );
06667         L1p[p].resize( pcs[p]->buffProcs.size() );
06668     }
06669 
06670     for( unsigned int p = 0; p < num_procs; p++ )
06671     {
06672         pc = pcs[p];
06673 
06674         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06675         {
06676             // Incoming ghost entities; unpack; returns entities received
06677             // both from sending proc and from owning proc (which may be different)
06678 
06679             // Buffer could be empty, which means there isn't any message to
06680             // unpack (due to this comm proc getting added as a result of indirect
06681             // communication); just skip this unpack
06682             if( pc->localOwnedBuffs[ind]->get_stored_size() == 0 ) continue;
06683 
06684             unsigned int to_p = pc->buffProcs[ind];
06685             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06686             result = pcs[to_p]->unpack_entities( pc->localOwnedBuffs[ind]->buff_ptr, store_remote_handles, ind,
06687                                                  is_iface, L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p],
06688                                                  L2hrem[to_p], L2p[to_p], new_ents[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack entities" );
06689         }
06690     }
06691 
06692     if( is_iface )
06693     {
06694         // Need to check over entities I sent and make sure I received
06695         // handles for them from all expected procs; if not, need to clean
06696         // them up
06697         for( unsigned int p = 0; p < num_procs; p++ )
06698         {
06699             result = pcs[p]->check_clean_iface( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06700         }
06701 
06702 #ifndef NDEBUG
06703         for( unsigned int p = 0; p < num_procs; p++ )
06704         {
06705             result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06706         }
06707         result = check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06708 #endif
06709         return MB_SUCCESS;
06710     }
06711 
06712     //===========================================
06713     // Send local handles for new ghosts to owner, then add
06714     // those to ghost list for that owner
06715     //===========================================
06716     std::vector< unsigned int >::iterator proc_it;
06717     for( unsigned int p = 0; p < num_procs; p++ )
06718     {
06719         pc = pcs[p];
06720 
06721         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06722         {
06723             // Skip if iface layer and higher-rank proc
06724             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06725             result = pc->pack_remote_handles( L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
06726                                               pc->localOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to pack remote handles" );
06727         }
06728     }
06729 
06730     //===========================================
06731     // Process remote handles of my ghosteds
06732     //===========================================
06733     for( unsigned int p = 0; p < num_procs; p++ )
06734     {
06735         pc = pcs[p];
06736 
06737         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06738         {
06739             // Incoming remote handles
06740             unsigned int to_p = pc->buffProcs[ind];
06741             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06742             result = pcs[to_p]->unpack_remote_handles( p, pc->localOwnedBuffs[ind]->buff_ptr, L2hloc[to_p],
06743                                                        L2hrem[to_p], L2p[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack remote handles" );
06744         }
06745     }
06746 
06747 #ifndef NDEBUG
06748     for( unsigned int p = 0; p < num_procs; p++ )
06749     {
06750         result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06751     }
06752 
06753     result = ParallelComm::check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06754 #endif
06755 
06756     if( file_sets )
06757     {
06758         for( unsigned int p = 0; p < num_procs; p++ )
06759         {
06760             if( new_ents[p].empty() ) continue;
06761             result = pcs[p]->get_moab()->add_entities( file_sets[p], &new_ents[p][0], new_ents[p].size() );MB_CHK_SET_ERR( result, "p = " << p << ", failed to add new entities to set" );
06762         }
06763     }
06764 
06765     return MB_SUCCESS;
06766 }
06767 
06768 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& exchange_procs )
06769 {
06770     // Set buffers
06771     int n_proc = exchange_procs.size();
06772     for( int i = 0; i < n_proc; i++ )
06773         get_buffers( exchange_procs[i] );
06774     reset_all_buffers();
06775 
06776     // Post ghost irecv's for entities from all communicating procs
06777     // Index requests the same as buffer/sharing procs indices
06778     int success;
06779     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06780     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06781     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06782 
06783     int incoming = 0;
06784     for( int i = 0; i < n_proc; i++ )
06785     {
06786         int ind = get_buffers( exchange_procs[i] );
06787         incoming++;
06788         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06789                            MB_MESG_ENTS_SIZE, incoming );
06790         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06791                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06792         if( success != MPI_SUCCESS )
06793         {
06794             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" );
06795         }
06796     }
06797 
06798     return MB_SUCCESS;
06799 }
06800 
06801 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs )
06802 {
06803     // Set buffers
06804     int num = shared_procs.size();
06805     for( int i = 0; i < num; i++ )
06806         get_buffers( shared_procs[i] );
06807     reset_all_buffers();
06808     num = remoteOwnedBuffs.size();
06809     for( int i = 0; i < num; i++ )
06810         remoteOwnedBuffs[i]->set_stored_size();
06811     num = localOwnedBuffs.size();
06812     for( int i = 0; i < num; i++ )
06813         localOwnedBuffs[i]->set_stored_size();
06814 
06815     // Post ghost irecv's for entities from all communicating procs
06816     // Index requests the same as buffer/sharing procs indices
06817     int success;
06818     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06819     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06820     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06821 
06822     int incoming                           = 0;
06823     std::set< unsigned int >::iterator it  = recv_procs.begin();
06824     std::set< unsigned int >::iterator eit = recv_procs.end();
06825     for( ; it != eit; ++it )
06826     {
06827         int ind = get_buffers( *it );
06828         incoming++;
06829         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06830                            MB_MESG_ENTS_SIZE, incoming );
06831         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06832                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06833         if( success != MPI_SUCCESS )
06834         {
06835             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" );
06836         }
06837     }
06838 
06839     return MB_SUCCESS;
06840 }
06841 
06842 ErrorCode ParallelComm::exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
06843                                               std::vector< Range* >& exchange_ents,
06844                                               std::vector< MPI_Request >& recv_ent_reqs,
06845                                               std::vector< MPI_Request >& recv_remoteh_reqs,
06846                                               bool store_remote_handles,
06847                                               bool wait_all,
06848                                               bool migrate,
06849                                               int dim )
06850 {
06851     // Filter out entities already shared with destination
06852     // Exchange twice for entities and sets
06853     ErrorCode result;
06854     std::vector< unsigned int > exchange_procs_sets;
06855     std::vector< Range* > exchange_sets;
06856     int n_proc = exchange_procs.size();
06857     for( int i = 0; i < n_proc; i++ )
06858     {
06859         Range set_range   = exchange_ents[i]->subset_by_type( MBENTITYSET );
06860         *exchange_ents[i] = subtract( *exchange_ents[i], set_range );
06861         Range* tmp_range  = new Range( set_range );
06862         exchange_sets.push_back( tmp_range );
06863         exchange_procs_sets.push_back( exchange_procs[i] );
06864     }
06865 
06866     if( dim == 2 )
06867     {
06868         // Exchange entities first
06869         result = exchange_owned_mesh( exchange_procs, exchange_ents, recvReqs, recvRemotehReqs, true,
06870                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06871 
06872         // Exchange sets
06873         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recvReqs, recvRemotehReqs, false,
06874                                       store_remote_handles, wait_all, migrate );
06875     }
06876     else
06877     {
06878         // Exchange entities first
06879         result = exchange_owned_mesh( exchange_procs, exchange_ents, recv_ent_reqs, recv_remoteh_reqs, false,
06880                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06881 
06882         // Exchange sets
06883         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recv_ent_reqs, recv_remoteh_reqs, false,
06884                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh sets" );
06885     }
06886 
06887     for( int i = 0; i < n_proc; i++ )
06888         delete exchange_sets[i];
06889 
06890     // Build up the list of shared entities
06891     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
06892     int procs[MAX_SHARING_PROCS];
06893     EntityHandle handles[MAX_SHARING_PROCS];
06894     int nprocs;
06895     unsigned char pstat;
06896     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
06897     {
06898         if( mbImpl->dimension_from_handle( *vit ) > 2 ) continue;
06899         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data in exchange_owned_meshs" );
06900         std::sort( procs, procs + nprocs );
06901         std::vector< int > tmp_procs( procs, procs + nprocs );
06902         assert( tmp_procs.size() != 2 );
06903         proc_nvecs[tmp_procs].push_back( *vit );
06904     }
06905 
06906     // Create interface sets from shared entities
06907     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
06908 
06909     return MB_SUCCESS;
06910 }
06911 
06912 ErrorCode ParallelComm::exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
06913                                              std::vector< Range* >& exchange_ents,
06914                                              std::vector< MPI_Request >& recv_ent_reqs,
06915                                              std::vector< MPI_Request >& recv_remoteh_reqs,
06916                                              const bool recv_posted,
06917                                              bool store_remote_handles,
06918                                              bool wait_all,
06919                                              bool migrate )
06920 {
06921 #ifdef MOAB_HAVE_MPE
06922     if( myDebug->get_verbosity() == 2 )
06923     {
06924         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange." );
06925     }
06926 #endif
06927 
06928     myDebug->tprintf( 1, "Entering exchange_owned_mesh\n" );
06929     if( myDebug->get_verbosity() == 4 )
06930     {
06931         msgs.clear();
06932         msgs.reserve( MAX_SHARING_PROCS );
06933     }
06934     unsigned int i;
06935     int ind, success;
06936     ErrorCode result = MB_SUCCESS;
06937     int incoming1 = 0, incoming2 = 0;
06938 
06939     // Set buffProcs with communicating procs
06940     unsigned int n_proc = exchange_procs.size();
06941     for( i = 0; i < n_proc; i++ )
06942     {
06943         ind    = get_buffers( exchange_procs[i] );
06944         result = add_verts( *exchange_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
06945 
06946         // Filter out entities already shared with destination
06947         Range tmp_range;
06948         result = filter_pstatus( *exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06949         if( !tmp_range.empty() )
06950         {
06951             *exchange_ents[i] = subtract( *exchange_ents[i], tmp_range );
06952         }
06953     }
06954 
06955     //===========================================
06956     // Post ghost irecv's for entities from all communicating procs
06957     //===========================================
06958 #ifdef MOAB_HAVE_MPE
06959     if( myDebug->get_verbosity() == 2 )
06960     {
06961         MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." );
06962     }
06963 #endif
06964 
06965     // Index reqs the same as buffer/sharing procs indices
06966     if( !recv_posted )
06967     {
06968         reset_all_buffers();
06969         recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06970         recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06971         sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06972 
06973         for( i = 0; i < n_proc; i++ )
06974         {
06975             ind = get_buffers( exchange_procs[i] );
06976             incoming1++;
06977             PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr,
06978                                INITIAL_BUFF_SIZE, MB_MESG_ENTS_SIZE, incoming1 );
06979             success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06980                                  MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
06981             if( success != MPI_SUCCESS )
06982             {
06983                 MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" );
06984             }
06985         }
06986     }
06987     else
06988         incoming1 += n_proc;
06989 
06990     //===========================================
06991     // Get entities to be sent to neighbors
06992     // Need to get procs each entity is sent to
06993     //===========================================
06994     Range allsent, tmp_range;
06995     int dum_ack_buff;
06996     int npairs = 0;
06997     TupleList entprocs;
06998     for( i = 0; i < n_proc; i++ )
06999     {
07000         int n_ents = exchange_ents[i]->size();
07001         if( n_ents > 0 )
07002         {
07003             npairs += n_ents;  // Get the total # of proc/handle pairs
07004             allsent.merge( *exchange_ents[i] );
07005         }
07006     }
07007 
07008     // Allocate a TupleList of that size
07009     entprocs.initialize( 1, 0, 1, 0, npairs );
07010     entprocs.enableWriteAccess();
07011 
07012     // Put the proc/handle pairs in the list
07013     for( i = 0; i < n_proc; i++ )
07014     {
07015         for( Range::iterator rit = exchange_ents[i]->begin(); rit != exchange_ents[i]->end(); ++rit )
07016         {
07017             entprocs.vi_wr[entprocs.get_n()]  = exchange_procs[i];
07018             entprocs.vul_wr[entprocs.get_n()] = *rit;
07019             entprocs.inc_n();
07020         }
07021     }
07022 
07023     // Sort by handle
07024     moab::TupleList::buffer sort_buffer;
07025     sort_buffer.buffer_init( npairs );
07026     entprocs.sort( 1, &sort_buffer );
07027     sort_buffer.reset();
07028 
07029     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
07030                       (unsigned long)allsent.size() );
07031 
07032     //===========================================
07033     // Pack and send ents from this proc to others
07034     //===========================================
07035     for( i = 0; i < n_proc; i++ )
07036     {
07037         ind = get_buffers( exchange_procs[i] );
07038         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", exchange_ents[i]->compactness(),
07039                           (unsigned long)exchange_ents[i]->size() );
07040         // Reserve space on front for size and for initial buff size
07041         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
07042         result = pack_buffer( *exchange_ents[i], false, true, store_remote_handles, buffProcs[ind],
07043                               localOwnedBuffs[ind], &entprocs, &allsent );
07044 
07045         if( myDebug->get_verbosity() == 4 )
07046         {
07047             msgs.resize( msgs.size() + 1 );
07048             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
07049         }
07050 
07051         // Send the buffer (size stored in front in send_buffer)
07052         result = send_buffer( exchange_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
07053                               recv_ent_reqs[3 * ind + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
07054                               ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recv_remoteh_reqs[3 * ind],
07055                               &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
07056     }
07057 
07058     entprocs.reset();
07059 
07060     //===========================================
07061     // Receive/unpack new entities
07062     //===========================================
07063     // Number of incoming messages is the number of procs we communicate with
07064     MPI_Status status;
07065     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
07066     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
07067     std::vector< std::vector< int > > L1p( buffProcs.size() );
07068     std::vector< EntityHandle > L2hloc, L2hrem;
07069     std::vector< unsigned int > L2p;
07070     std::vector< EntityHandle > new_ents;
07071 
07072     while( incoming1 )
07073     {
07074         // Wait for all recvs of ents before proceeding to sending remote handles,
07075         // b/c some procs may have sent to a 3rd proc ents owned by me;
07076         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
07077 
07078         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
07079         if( MPI_SUCCESS != success )
07080         {
07081             MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
07082         }
07083 
07084         PRINT_DEBUG_RECD( status );
07085 
07086         // OK, received something; decrement incoming counter
07087         incoming1--;
07088         bool done = false;
07089 
07090         // In case ind is for ack, we need index of one before it
07091         unsigned int base_ind = 3 * ( ind / 3 );
07092         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
07093                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
07094                               sendReqs[base_ind + 2], done, ( store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
07095                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
07096 
07097         if( done )
07098         {
07099             if( myDebug->get_verbosity() == 4 )
07100             {
07101                 msgs.resize( msgs.size() + 1 );
07102                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
07103             }
07104 
07105             // Message completely received - process buffer that was sent
07106             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
07107             result = unpack_buffer( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, buffProcs[ind / 3],
07108                                     ind / 3, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
07109             if( MB_SUCCESS != result )
07110             {
07111                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
07112                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
07113                 return result;
07114             }
07115 
07116             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
07117             {
07118                 // Post irecv's for remote handles from new proc
07119                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07120                 for( i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
07121                 {
07122                     localOwnedBuffs[i / 3]->reset_buffer();
07123                     incoming2++;
07124                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
07125                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
07126                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
07127                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
07128                                          &recv_remoteh_reqs[i] );
07129                     if( success != MPI_SUCCESS )
07130                     {
07131                         MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
07132                     }
07133                 }
07134                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07135                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07136             }
07137         }
07138     }
07139 
07140     // Assign and remove newly created elements from/to receive processor
07141     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
07142     if( migrate )
07143     {
07144         result = remove_entities_part( allsent, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to remove entities to part" );
07145     }
07146 
07147     // Add requests for any new addl procs
07148     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
07149     {
07150         // Shouldn't get here...
07151         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
07152     }
07153 
07154 #ifdef MOAB_HAVE_MPE
07155     if( myDebug->get_verbosity() == 2 )
07156     {
07157         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." );
07158     }
07159 #endif
07160 
07161     // we still need to wait on sendReqs, if they are not fulfilled yet
07162     if( wait_all )
07163     {
07164         if( myDebug->get_verbosity() == 5 )
07165         {
07166             success = MPI_Barrier( procConfig.proc_comm() );
07167         }
07168         else
07169         {
07170             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
07171             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
07172             if( MPI_SUCCESS != success )
07173             {
07174                 MB_SET_ERR( MB_FAILURE, "Failed in waitall in exchange owned mesh" );
07175             }
07176         }
07177     }
07178 
07179     //===========================================
07180     // Send local handles for new entity to owner
07181     //===========================================
07182     for( i = 0; i < n_proc; i++ )
07183     {
07184         ind = get_buffers( exchange_procs[i] );
07185         // Reserve space on front for size and for initial buff size
07186         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
07187 
07188         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
07189         remoteOwnedBuffs[ind]->set_stored_size();
07190 
07191         if( myDebug->get_verbosity() == 4 )
07192         {
07193             msgs.resize( msgs.size() + 1 );
07194             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
07195         }
07196         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[3 * ind],
07197                               recv_remoteh_reqs[3 * ind + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
07198     }
07199 
07200     //===========================================
07201     // Process remote handles of my ghosteds
07202     //===========================================
07203     while( incoming2 )
07204     {
07205         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
07206         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
07207         if( MPI_SUCCESS != success )
07208         {
07209             MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" );
07210         }
07211 
07212         // OK, received something; decrement incoming counter
07213         incoming2--;
07214 
07215         PRINT_DEBUG_RECD( status );
07216 
07217         bool done             = false;
07218         unsigned int base_ind = 3 * ( ind / 3 );
07219         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
07220                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
07221                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
07222 
07223         if( done )
07224         {
07225             // Incoming remote handles
07226             if( myDebug->get_verbosity() == 4 )
07227             {
07228                 msgs.resize( msgs.size() + 1 );
07229                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
07230             }
07231 
07232             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
07233             result =
07234                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
07235         }
07236     }
07237 
07238 #ifdef MOAB_HAVE_MPE
07239     if( myDebug->get_verbosity() == 2 )
07240     {
07241         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
07242         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
07243     }
07244 #endif
07245 
07246     //===========================================
07247     // Wait if requested
07248     //===========================================
07249     if( wait_all )
07250     {
07251         if( myDebug->get_verbosity() == 5 )
07252         {
07253             success = MPI_Barrier( procConfig.proc_comm() );
07254         }
07255         else
07256         {
07257             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
07258             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
07259             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
07260         }
07261         if( MPI_SUCCESS != success )
07262         {
07263             MB_SET_ERR( MB_FAILURE, "Failed in waitall in owned entity exchange" );
07264         }
07265     }
07266 
07267 #ifndef NDEBUG
07268     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
07269 #endif
07270     myDebug->tprintf( 1, "Exiting exchange_owned_mesh\n" );
07271 
07272     return MB_SUCCESS;
07273 }
07274 
07275 ErrorCode ParallelComm::get_iface_entities( int other_proc, int dim, Range& iface_ents )
07276 {
07277     Range iface_sets;
07278     ErrorCode result = MB_SUCCESS;
07279 
07280     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
07281     {
07282         if( -1 != other_proc && !is_iface_proc( *rit, other_proc ) ) continue;
07283 
07284         if( -1 == dim )
07285         {
07286             result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in iface set" );
07287         }
07288         else
07289         {
07290             result = mbImpl->get_entities_by_dimension( *rit, dim, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in iface set" );
07291         }
07292     }
07293 
07294     return MB_SUCCESS;
07295 }
07296 
07297 ErrorCode ParallelComm::assign_entities_part( std::vector< EntityHandle >& entities, const int proc )
07298 {
07299     EntityHandle part_set;
07300     ErrorCode result = get_part_handle( proc, part_set );MB_CHK_SET_ERR( result, "Failed to get part handle" );
07301 
07302     if( part_set > 0 )
07303     {
07304         result = mbImpl->add_entities( part_set, &entities[0], entities.size() );MB_CHK_SET_ERR( result, "Failed to add entities to part set" );
07305     }
07306 
07307     return MB_SUCCESS;
07308 }
07309 
07310 ErrorCode ParallelComm::remove_entities_part( Range& entities, const int proc )
07311 {
07312     EntityHandle part_set;
07313     ErrorCode result = get_part_handle( proc, part_set );MB_CHK_SET_ERR( result, "Failed to get part handle" );
07314 
07315     if( part_set > 0 )
07316     {
07317         result = mbImpl->remove_entities( part_set, entities );MB_CHK_SET_ERR( result, "Failed to remove entities from part set" );
07318     }
07319 
07320     return MB_SUCCESS;
07321 }
07322 
07323 ErrorCode ParallelComm::check_sent_ents( Range& allsent )
07324 {
07325     // Check entities to make sure there are no zero-valued remote handles
07326     // where they shouldn't be
07327     std::vector< unsigned char > pstat( allsent.size() );
07328     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), allsent, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
07329     std::vector< EntityHandle > handles( allsent.size() );
07330     result = mbImpl->tag_get_data( sharedh_tag(), allsent, &handles[0] );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
07331     std::vector< int > procs( allsent.size() );
07332     result = mbImpl->tag_get_data( sharedp_tag(), allsent, &procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
07333 
07334     Range bad_entities;
07335 
07336     Range::iterator rit;
07337     unsigned int i;
07338     EntityHandle dum_hs[MAX_SHARING_PROCS];
07339     int dum_ps[MAX_SHARING_PROCS];
07340 
07341     for( rit = allsent.begin(), i = 0; rit != allsent.end(); ++rit, i++ )
07342     {
07343         if( -1 != procs[i] && 0 == handles[i] )
07344             bad_entities.insert( *rit );
07345         else
07346         {
07347             // Might be multi-shared...
07348             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, dum_ps );
07349             if( MB_TAG_NOT_FOUND == result )
07350                 continue;
07351             else if( MB_SUCCESS != result )
07352                 MB_SET_ERR( result, "Failed to get sharedps tag data" );
07353             result = mbImpl->tag_get_data( sharedhs_tag(), &( *rit ), 1, dum_hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
07354 
07355             // Find first non-set proc
07356             int* ns_proc  = std::find( dum_ps, dum_ps + MAX_SHARING_PROCS, -1 );
07357             int num_procs = ns_proc - dum_ps;
07358             assert( num_procs <= MAX_SHARING_PROCS );
07359             // Now look for zero handles in active part of dum_hs
07360             EntityHandle* ns_handle = std::find( dum_hs, dum_hs + num_procs, 0 );
07361             int num_handles         = ns_handle - dum_hs;
07362             assert( num_handles <= num_procs );
07363             if( num_handles != num_procs ) bad_entities.insert( *rit );
07364         }
07365     }
07366 
07367     return MB_SUCCESS;
07368 }
07369 
07370 ErrorCode ParallelComm::pack_remote_handles( std::vector< EntityHandle >& L1hloc,
07371                                              std::vector< EntityHandle >& L1hrem,
07372                                              std::vector< int >& L1p,
07373                                              unsigned int /*to_proc*/,
07374                                              Buffer* buff )
07375 {
07376     assert( std::find( L1hloc.begin(), L1hloc.end(), (EntityHandle)0 ) == L1hloc.end() );
07377 
07378     // 2 vectors of handles plus ints
07379     buff->check_space( ( ( L1p.size() + 1 ) * sizeof( int ) + ( L1hloc.size() + 1 ) * sizeof( EntityHandle ) +
07380                          ( L1hrem.size() + 1 ) * sizeof( EntityHandle ) ) );
07381 
07382     // Should be in pairs of handles
07383     PACK_INT( buff->buff_ptr, L1hloc.size() );
07384     PACK_INTS( buff->buff_ptr, &L1p[0], L1p.size() );
07385     // Pack handles in reverse order, (remote, local), so on destination they
07386     // are ordered (local, remote)
07387     PACK_EH( buff->buff_ptr, &L1hrem[0], L1hrem.size() );
07388     PACK_EH( buff->buff_ptr, &L1hloc[0], L1hloc.size() );
07389 
07390     buff->set_stored_size();
07391 
07392     return MB_SUCCESS;
07393 }
07394 
07395 ErrorCode ParallelComm::unpack_remote_handles( unsigned int from_proc,
07396                                                unsigned char*& buff_ptr,
07397                                                std::vector< EntityHandle >& L2hloc,
07398                                                std::vector< EntityHandle >& L2hrem,
07399                                                std::vector< unsigned int >& L2p )
07400 {
07401     // Incoming remote handles; use to set remote handles
07402     int num_eh;
07403     UNPACK_INT( buff_ptr, num_eh );
07404 
07405     unsigned char* buff_proc = buff_ptr;
07406     buff_ptr += num_eh * sizeof( int );
07407     unsigned char* buff_rem = buff_ptr + num_eh * sizeof( EntityHandle );
07408     ErrorCode result;
07409     EntityHandle hpair[2], new_h;
07410     int proc;
07411     for( int i = 0; i < num_eh; i++ )
07412     {
07413         UNPACK_INT( buff_proc, proc );
07414         // Handles packed (local, remote), though here local is either on this
07415         // proc or owner proc, depending on value of proc (-1 = here, otherwise owner);
07416         // this is decoded in find_existing_entity
07417         UNPACK_EH( buff_ptr, hpair, 1 );
07418         UNPACK_EH( buff_rem, hpair + 1, 1 );
07419 
07420         if( -1 != proc )
07421         {
07422             result = find_existing_entity( false, proc, hpair[0], 3, NULL, 0, mbImpl->type_from_handle( hpair[1] ),
07423                                            L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Didn't get existing entity" );
07424             if( new_h )
07425                 hpair[0] = new_h;
07426             else
07427                 hpair[0] = 0;
07428         }
07429         if( !( hpair[0] && hpair[1] ) ) return MB_FAILURE;
07430         int this_proc = from_proc;
07431         result        = update_remote_data( hpair[0], &this_proc, hpair + 1, 1, 0 );MB_CHK_SET_ERR( result, "Failed to set remote data range on sent entities in ghost exchange" );
07432     }
07433 
07434     return MB_SUCCESS;
07435 }
07436 
07437 ErrorCode ParallelComm::get_ghosted_entities( int bridge_dim,
07438                                               int ghost_dim,
07439                                               int to_proc,
07440                                               int num_layers,
07441                                               int addl_ents,
07442                                               Range& ghosted_ents )
07443 {
07444     // Get bridge ents on interface(s)
07445     Range from_ents;
07446     ErrorCode result = MB_SUCCESS;
07447     assert( 0 < num_layers );
07448     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
07449     {
07450         if( !is_iface_proc( *rit, to_proc ) ) continue;
07451 
07452         // Get starting "from" entities
07453         if( bridge_dim == -1 )
07454         {
07455             result = mbImpl->get_entities_by_handle( *rit, from_ents );MB_CHK_SET_ERR( result, "Failed to get bridge ents in the set" );
07456         }
07457         else
07458         {
07459             result = mbImpl->get_entities_by_dimension( *rit, bridge_dim, from_ents );MB_CHK_SET_ERR( result, "Failed to get bridge ents in the set" );
07460         }
07461 
07462         // Need to get layers of bridge-adj entities
07463         if( from_ents.empty() ) continue;
07464         result =
07465             MeshTopoUtil( mbImpl ).get_bridge_adjacencies( from_ents, bridge_dim, ghost_dim, ghosted_ents, num_layers );MB_CHK_SET_ERR( result, "Failed to get bridge adjacencies" );
07466     }
07467 
07468     result = add_verts( ghosted_ents );MB_CHK_SET_ERR( result, "Failed to add verts" );
07469 
07470     if( addl_ents )
07471     {
07472         // First get the ents of ghost_dim
07473         Range tmp_ents, tmp_owned, tmp_notowned;
07474         tmp_owned = ghosted_ents.subset_by_dimension( ghost_dim );
07475         if( tmp_owned.empty() ) return result;
07476 
07477         tmp_notowned = tmp_owned;
07478 
07479         // Next, filter by pstatus; can only create adj entities for entities I own
07480         result = filter_pstatus( tmp_owned, PSTATUS_NOT_OWNED, PSTATUS_NOT, -1, &tmp_owned );MB_CHK_SET_ERR( result, "Failed to filter owned entities" );
07481 
07482         tmp_notowned -= tmp_owned;
07483 
07484         // Get edges first
07485         if( 1 == addl_ents || 3 == addl_ents )
07486         {
07487             result = mbImpl->get_adjacencies( tmp_owned, 1, true, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get edge adjacencies for owned ghost entities" );
07488             result = mbImpl->get_adjacencies( tmp_notowned, 1, false, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get edge adjacencies for notowned ghost entities" );
07489         }
07490         if( 2 == addl_ents || 3 == addl_ents )
07491         {
07492             result = mbImpl->get_adjacencies( tmp_owned, 2, true, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get face adjacencies for owned ghost entities" );
07493             result = mbImpl->get_adjacencies( tmp_notowned, 2, false, tmp_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get face adjacencies for notowned ghost entities" );
07494         }
07495 
07496         ghosted_ents.merge( tmp_ents );
07497     }
07498 
07499     return result;
07500 }
07501 
07502 ErrorCode ParallelComm::add_verts( Range& sent_ents )
07503 {
07504     // Get the verts adj to these entities, since we'll have to send those too
07505 
07506     // First check sets
07507     std::pair< Range::const_iterator, Range::const_iterator > set_range = sent_ents.equal_range( MBENTITYSET );
07508     ErrorCode result                                                    = MB_SUCCESS, tmp_result;
07509     for( Range::const_iterator rit = set_range.first; rit != set_range.second; ++rit )
07510     {
07511         tmp_result = mbImpl->get_entities_by_type( *rit, MBVERTEX, sent_ents );MB_CHK_SET_ERR( tmp_result, "Failed to get contained verts" );
07512     }
07513 
07514     // Now non-sets
07515     Range tmp_ents;
07516     std::copy( sent_ents.begin(), set_range.first, range_inserter( tmp_ents ) );
07517     result = mbImpl->get_adjacencies( tmp_ents, 0, false, sent_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get vertices adj to ghosted ents" );
07518 
07519     // if polyhedra, need to add all faces from there
07520     Range polyhedra = sent_ents.subset_by_type( MBPOLYHEDRON );
07521     // get all faces adjacent to every polyhedra
07522     result = mbImpl->get_connectivity( polyhedra, sent_ents );MB_CHK_SET_ERR( result, "Failed to get polyhedra faces" );
07523     return result;
07524 }
07525 
07526 ErrorCode ParallelComm::exchange_tags( const std::vector< Tag >& src_tags,
07527                                        const std::vector< Tag >& dst_tags,
07528                                        const Range& entities_in )
07529 {
07530     ErrorCode result;
07531     int success;
07532 
07533     myDebug->tprintf( 1, "Entering exchange_tags\n" );
07534 
07535     // Get all procs interfacing to this proc
07536     std::set< unsigned int > exch_procs;
07537     result = get_comm_procs( exch_procs );
07538 
07539     // Post ghost irecv's for all interface procs
07540     // Index requests the same as buffer/sharing procs indices
07541     std::vector< MPI_Request > recv_tag_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07542     // sent_ack_reqs(buffProcs.size(), MPI_REQUEST_NULL);
07543     std::vector< unsigned int >::iterator sit;
07544     int ind;
07545 
07546     reset_all_buffers();
07547     int incoming = 0;
07548 
07549     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
07550     {
07551         incoming++;
07552         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
07553                            MB_MESG_TAGS_SIZE, incoming );
07554 
07555         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
07556                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_tag_reqs[3 * ind] );
07557         if( success != MPI_SUCCESS )
07558         {
07559             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" );
07560         }
07561     }
07562 
07563     // Pack and send tags from this proc to others
07564     // Make sendReqs vector to simplify initialization
07565     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07566 
07567     // Take all shared entities if incoming list is empty
07568     Range entities;
07569     if( entities_in.empty() )
07570         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
07571     else
07572         entities = entities_in;
07573 
07574     int dum_ack_buff;
07575 
07576     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
07577     {
07578         Range tag_ents = entities;
07579 
07580         // Get ents shared by proc *sit
07581         result = filter_pstatus( tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
07582 
07583         // Remote nonowned entities
07584         if( !tag_ents.empty() )
07585         {
07586             result = filter_pstatus( tag_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failed pstatus NOT check" );
07587         }
07588 
07589         // Pack-send; this also posts receives if store_remote_handles is true
07590         std::vector< Range > tag_ranges;
07591         for( std::vector< Tag >::const_iterator vit = src_tags.begin(); vit != src_tags.end(); ++vit )
07592         {
07593             const void* ptr;
07594             int sz;
07595             if( mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS )
07596             {
07597                 Range tagged_ents;
07598                 mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
07599                 tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
07600             }
07601             else
07602             {
07603                 tag_ranges.push_back( tag_ents );
07604             }
07605         }
07606 
07607         // Pack the data
07608         // Reserve space on front for size and for initial buff size
07609         localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
07610 
07611         result = pack_tags( tag_ents, src_tags, dst_tags, tag_ranges, localOwnedBuffs[ind], true, *sit );MB_CHK_SET_ERR( result, "Failed to count buffer in pack_send_tag" );
07612 
07613         // Now send it
07614         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
07615                               recv_tag_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
07616     }
07617 
07618     // Receive/unpack tags
07619     while( incoming )
07620     {
07621         MPI_Status status;
07622         int index_in_recv_requests;
07623         PRINT_DEBUG_WAITANY( recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
07624         success = MPI_Waitany( 3 * buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status );
07625         if( MPI_SUCCESS != success )
07626         {
07627             MB_SET_ERR( MB_FAILURE, "Failed in waitany in tag exchange" );
07628         }
07629         // Processor index in the list is divided by 3
07630         ind = index_in_recv_requests / 3;
07631 
07632         PRINT_DEBUG_RECD( status );
07633 
07634         // OK, received something; decrement incoming counter
07635         incoming--;
07636 
07637         bool done = false;
07638         std::vector< EntityHandle > dum_vec;
07639         result = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
07640                               recv_tag_reqs[3 * ind + 1],  // This is for receiving the second message
07641                               recv_tag_reqs[3 * ind + 2],  // This would be for ack, but it is not
07642                                                            // used; consider removing it
07643                               incoming, localOwnedBuffs[ind],
07644                               sendReqs[3 * ind + 1],  // Send request for sending the second message
07645                               sendReqs[3 * ind + 2],  // This is for sending the ack
07646                               done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
07647         if( done )
07648         {
07649             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
07650             result = unpack_tags( remoteOwnedBuffs[ind]->buff_ptr, dum_vec, true, buffProcs[ind] );MB_CHK_SET_ERR( result, "Failed to recv-unpack-tag message" );
07651         }
07652     }
07653 
07654     // OK, now wait
07655     if( myDebug->get_verbosity() == 5 )
07656     {
07657         success = MPI_Barrier( procConfig.proc_comm() );
07658     }
07659     else
07660     {
07661         MPI_Status status[3 * MAX_SHARING_PROCS];
07662         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
07663     }
07664     if( MPI_SUCCESS != success )
07665     {
07666         MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" );
07667     }
07668 
07669     // If source tag is not equal to destination tag, then
07670     // do local copy for owned entities (communicate w/ self)
07671     assert( src_tags.size() == dst_tags.size() );
07672     if( src_tags != dst_tags )
07673     {
07674         std::vector< unsigned char > data;
07675         Range owned_ents;
07676         if( entities_in.empty() )
07677             std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
07678         else
07679             owned_ents = entities_in;
07680         result = filter_pstatus( owned_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failure to get subset of owned entities" );
07681 
07682         if( !owned_ents.empty() )
07683         {  // Check this here, otherwise we get
07684             // Unexpected results from get_entities_by_type_and_tag w/ Interface::INTERSECT
07685             for( size_t i = 0; i < src_tags.size(); i++ )
07686             {
07687                 if( src_tags[i] == dst_tags[i] ) continue;
07688 
07689                 Range tagged_ents( owned_ents );
07690                 result = mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &src_tags[0], 0, 1, tagged_ents,
07691                                                                Interface::INTERSECT );MB_CHK_SET_ERR( result, "get_entities_by_type_and_tag(type == MBMAXTYPE) failed" );
07692 
07693                 int sz, size2;
07694                 result = mbImpl->tag_get_bytes( src_tags[i], sz );MB_CHK_SET_ERR( result, "tag_get_size failed" );
07695                 result = mbImpl->tag_get_bytes( dst_tags[i], size2 );MB_CHK_SET_ERR( result, "tag_get_size failed" );
07696                 if( sz != size2 )
07697                 {
07698                     MB_SET_ERR( MB_FAILURE, "tag sizes don't match" );
07699                 }
07700 
07701                 data.resize( sz * tagged_ents.size() );
07702                 result = mbImpl->tag_get_data( src_tags[i], tagged_ents, &data[0] );MB_CHK_SET_ERR( result, "tag_get_data failed" );
07703                 result = mbImpl->tag_set_data( dst_tags[i], tagged_ents, &data[0] );MB_CHK_SET_ERR( result, "tag_set_data failed" );
07704             }
07705         }
07706     }
07707 
07708     myDebug->tprintf( 1, "Exiting exchange_tags" );
07709 
07710     return MB_SUCCESS;
07711 }
07712 
07713 ErrorCode ParallelComm::reduce_tags( const std::vector< Tag >& src_tags,
07714                                      const std::vector< Tag >& dst_tags,
07715                                      const MPI_Op mpi_op,
07716                                      const Range& entities_in )
07717 {
07718     ErrorCode result;
07719     int success;
07720 
07721     myDebug->tprintf( 1, "Entering reduce_tags\n" );
07722 
07723     // Check that restrictions are met: number of source/dst tags...
07724     if( src_tags.size() != dst_tags.size() )
07725     {
07726         MB_SET_ERR( MB_FAILURE, "Source and destination tag handles must be specified for reduce_tags" );
07727     }
07728 
07729     // ... tag data types
07730     std::vector< Tag >::const_iterator vits, vitd;
07731     int tags_size, tagd_size;
07732     DataType tags_type, tagd_type;
07733     std::vector< unsigned char > vals;
07734     std::vector< int > tags_sizes;
07735     for( vits = src_tags.begin(), vitd = dst_tags.begin(); vits != src_tags.end(); ++vits, ++vitd )
07736     {
07737         // Checks on tag characteristics
07738         result = mbImpl->tag_get_data_type( *vits, tags_type );MB_CHK_SET_ERR( result, "Failed to get src tag data type" );
07739         if( tags_type != MB_TYPE_INTEGER && tags_type != MB_TYPE_DOUBLE && tags_type != MB_TYPE_BIT )
07740         {
07741             MB_SET_ERR( MB_FAILURE, "Src/dst tags must have integer, double, or bit data type" );
07742         }
07743 
07744         result = mbImpl->tag_get_bytes( *vits, tags_size );MB_CHK_SET_ERR( result, "Failed to get src tag bytes" );
07745         vals.resize( tags_size );
07746         result = mbImpl->tag_get_default_value( *vits, &vals[0] );MB_CHK_SET_ERR( result, "Src tag must have default value" );
07747 
07748         tags_sizes.push_back( tags_size );
07749 
07750         // OK, those passed; now check whether dest tags, if specified, agree with src tags
07751         if( *vits == *vitd ) continue;
07752 
07753         result = mbImpl->tag_get_bytes( *vitd, tagd_size );MB_CHK_SET_ERR( result, "Coudln't get dst tag bytes" );
07754         if( tags_size != tagd_size )
07755         {
07756             MB_SET_ERR( MB_FAILURE, "Sizes between src and dst tags don't match" );
07757         }
07758         result = mbImpl->tag_get_data_type( *vitd, tagd_type );MB_CHK_SET_ERR( result, "Coudln't get dst tag data type" );
07759         if( tags_type != tagd_type )
07760         {
07761             MB_SET_ERR( MB_FAILURE, "Src and dst tags must be of same data type" );
07762         }
07763     }
07764 
07765     // Get all procs interfacing to this proc
07766     std::set< unsigned int > exch_procs;
07767     result = get_comm_procs( exch_procs );
07768 
07769     // Post ghost irecv's for all interface procs
07770     // Index requests the same as buffer/sharing procs indices
07771     std::vector< MPI_Request > recv_tag_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07772 
07773     std::vector< unsigned int >::iterator sit;
07774     int ind;
07775 
07776     reset_all_buffers();
07777     int incoming = 0;
07778 
07779     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
07780     {
07781         incoming++;
07782         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
07783                            MB_MESG_TAGS_SIZE, incoming );
07784 
07785         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
07786                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_tag_reqs[3 * ind] );
07787         if( success != MPI_SUCCESS )
07788         {
07789             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" );
07790         }
07791     }
07792 
07793     // Pack and send tags from this proc to others
07794     // Make sendReqs vector to simplify initialization
07795     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
07796 
07797     // Take all shared entities if incoming list is empty
07798     Range entities;
07799     if( entities_in.empty() )
07800         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( entities ) );
07801     else
07802         entities = entities_in;
07803 
07804     // If the tags are different, copy the source to the dest tag locally
07805     std::vector< Tag >::const_iterator vit = src_tags.begin(), vit2 = dst_tags.begin();
07806     std::vector< int >::const_iterator vsizes = tags_sizes.begin();
07807     for( ; vit != src_tags.end(); ++vit, ++vit2, ++vsizes )
07808     {
07809         if( *vit == *vit2 ) continue;
07810         vals.resize( entities.size() * ( *vsizes ) );
07811         result = mbImpl->tag_get_data( *vit, entities, &vals[0] );MB_CHK_SET_ERR( result, "Didn't get data properly" );
07812         result = mbImpl->tag_set_data( *vit2, entities, &vals[0] );MB_CHK_SET_ERR( result, "Didn't set data properly" );
07813     }
07814 
07815     int dum_ack_buff;
07816 
07817     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
07818     {
07819         Range tag_ents = entities;
07820 
07821         // Get ents shared by proc *sit
07822         result = filter_pstatus( tag_ents, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
07823 
07824         // Pack-send
07825         std::vector< Range > tag_ranges;
07826         for( vit = src_tags.begin(); vit != src_tags.end(); ++vit )
07827         {
07828             const void* ptr;
07829             int sz;
07830             if( mbImpl->tag_get_default_value( *vit, ptr, sz ) != MB_SUCCESS )
07831             {
07832                 Range tagged_ents;
07833                 mbImpl->get_entities_by_type_and_tag( 0, MBMAXTYPE, &*vit, 0, 1, tagged_ents );
07834                 tag_ranges.push_back( intersect( tag_ents, tagged_ents ) );
07835             }
07836             else
07837                 tag_ranges.push_back( tag_ents );
07838         }
07839 
07840         // Pack the data
07841         // Reserve space on front for size and for initial buff size
07842         localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
07843 
07844         result = pack_tags( tag_ents, src_tags, dst_tags, tag_ranges, localOwnedBuffs[ind], true, *sit );MB_CHK_SET_ERR( result, "Failed to count buffer in pack_send_tag" );
07845 
07846         // Now send it
07847         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
07848                               recv_tag_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
07849     }
07850 
07851     // Receive/unpack tags
07852     while( incoming )
07853     {
07854         MPI_Status status;
07855         int index_in_recv_requests;
07856         PRINT_DEBUG_WAITANY( recv_tag_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
07857         success = MPI_Waitany( 3 * buffProcs.size(), &recv_tag_reqs[0], &index_in_recv_requests, &status );
07858         if( MPI_SUCCESS != success )
07859         {
07860             MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" );
07861         }
07862         ind = index_in_recv_requests / 3;
07863 
07864         PRINT_DEBUG_RECD( status );
07865 
07866         // OK, received something; decrement incoming counter
07867         incoming--;
07868 
07869         bool done = false;
07870         std::vector< EntityHandle > dum_vec;
07871         result = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
07872                               recv_tag_reqs[3 * ind + 1],  // This is for receiving the second message
07873                               recv_tag_reqs[3 * ind + 2],  // This would be for ack, but it is not
07874                                                            // used; consider removing it
07875                               incoming, localOwnedBuffs[ind],
07876                               sendReqs[3 * ind + 1],  // Send request for sending the second message
07877                               sendReqs[3 * ind + 2],  // This is for sending the ack
07878                               done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
07879         if( done )
07880         {
07881             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
07882             result = unpack_tags( remoteOwnedBuffs[ind]->buff_ptr, dum_vec, true, buffProcs[ind], &mpi_op );MB_CHK_SET_ERR( result, "Failed to recv-unpack-tag message" );
07883         }
07884     }
07885 
07886     // OK, now wait
07887     if( myDebug->get_verbosity() == 5 )
07888     {
07889         success = MPI_Barrier( procConfig.proc_comm() );
07890     }
07891     else
07892     {
07893         MPI_Status status[3 * MAX_SHARING_PROCS];
07894         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
07895     }
07896     if( MPI_SUCCESS != success )
07897     {
07898         MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" );
07899     }
07900 
07901     myDebug->tprintf( 1, "Exiting reduce_tags" );
07902 
07903     return MB_SUCCESS;
07904 }
07905 
07906 //! return sharedp tag
07907 Tag ParallelComm::sharedp_tag()
07908 {
07909     if( !sharedpTag )
07910     {
07911         int def_val      = -1;
07912         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_PROC_TAG_NAME, 1, MB_TYPE_INTEGER, sharedpTag,
07913                                                    MB_TAG_DENSE | MB_TAG_CREAT, &def_val );
07914         if( MB_SUCCESS != result ) return 0;
07915     }
07916 
07917     return sharedpTag;
07918 }
07919 
07920 //! return sharedps tag
07921 Tag ParallelComm::sharedps_tag()
07922 {
07923     if( !sharedpsTag )
07924     {
07925         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_PROCS_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_INTEGER,
07926                                                    sharedpsTag, MB_TAG_SPARSE | MB_TAG_CREAT );
07927         if( MB_SUCCESS != result ) return 0;
07928     }
07929 
07930     return sharedpsTag;
07931 }
07932 
07933 //! return sharedh tag
07934 Tag ParallelComm::sharedh_tag()
07935 {
07936     if( !sharedhTag )
07937     {
07938         EntityHandle def_val = 0;
07939         ErrorCode result     = mbImpl->tag_get_handle( PARALLEL_SHARED_HANDLE_TAG_NAME, 1, MB_TYPE_HANDLE, sharedhTag,
07940                                                        MB_TAG_DENSE | MB_TAG_CREAT, &def_val );
07941         if( MB_SUCCESS != result ) return 0;
07942     }
07943 
07944     return sharedhTag;
07945 }
07946 
07947 //! return sharedhs tag
07948 Tag ParallelComm::sharedhs_tag()
07949 {
07950     if( !sharedhsTag )
07951     {
07952         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_SHARED_HANDLES_TAG_NAME, MAX_SHARING_PROCS, MB_TYPE_HANDLE,
07953                                                    sharedhsTag, MB_TAG_SPARSE | MB_TAG_CREAT );
07954         if( MB_SUCCESS != result ) return 0;
07955     }
07956 
07957     return sharedhsTag;
07958 }
07959 
07960 //! return pstatus tag
07961 Tag ParallelComm::pstatus_tag()
07962 {
07963     if( !pstatusTag )
07964     {
07965         unsigned char tmp_pstatus = 0;
07966         ErrorCode result          = mbImpl->tag_get_handle( PARALLEL_STATUS_TAG_NAME, 1, MB_TYPE_OPAQUE, pstatusTag,
07967                                                             MB_TAG_DENSE | MB_TAG_CREAT, &tmp_pstatus );
07968         if( MB_SUCCESS != result ) return 0;
07969     }
07970 
07971     return pstatusTag;
07972 }
07973 
07974 //! return partition set tag
07975 Tag ParallelComm::partition_tag()
07976 {
07977     if( !partitionTag )
07978     {
07979         int dum_id       = -1;
07980         ErrorCode result = mbImpl->tag_get_handle( PARALLEL_PARTITION_TAG_NAME, 1, MB_TYPE_INTEGER, partitionTag,
07981                                                    MB_TAG_SPARSE | MB_TAG_CREAT, &dum_id );
07982         if( MB_SUCCESS != result ) return 0;
07983     }
07984 
07985     return partitionTag;
07986 }
07987 
07988 //! return pcomm tag; passes in impl 'cuz this is a static function
07989 Tag ParallelComm::pcomm_tag( Interface* impl, bool create_if_missing )
07990 {
07991     Tag this_tag = 0;
07992     ErrorCode result;
07993     if( create_if_missing )
07994     {
07995         result = impl->tag_get_handle( PARALLEL_COMM_TAG_NAME, MAX_SHARING_PROCS * sizeof( ParallelComm* ),
07996                                        MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE | MB_TAG_CREAT );
07997     }
07998     else
07999     {
08000         result = impl->tag_get_handle( PARALLEL_COMM_TAG_NAME, MAX_SHARING_PROCS * sizeof( ParallelComm* ),
08001                                        MB_TYPE_OPAQUE, this_tag, MB_TAG_SPARSE );
08002     }
08003 
08004     if( MB_SUCCESS != result ) return 0;
08005 
08006     return this_tag;
08007 }
08008 
08009 //! get the indexed pcomm object from the interface
08010 ParallelComm* ParallelComm::get_pcomm( Interface* impl, const int index )
08011 {
08012     Tag pc_tag = pcomm_tag( impl, false );
08013     if( 0 == pc_tag ) return NULL;
08014 
08015     const EntityHandle root = 0;
08016     ParallelComm* pc_array[MAX_SHARING_PROCS];
08017     ErrorCode result = impl->tag_get_data( pc_tag, &root, 1, (void*)pc_array );
08018     if( MB_SUCCESS != result ) return NULL;
08019 
08020     return pc_array[index];
08021 }
08022 
08023 ErrorCode ParallelComm::get_all_pcomm( Interface* impl, std::vector< ParallelComm* >& list )
08024 {
08025     Tag pc_tag = pcomm_tag( impl, false );
08026     if( 0 == pc_tag ) return MB_TAG_NOT_FOUND;
08027 
08028     const EntityHandle root = 0;
08029     ParallelComm* pc_array[MAX_SHARING_PROCS];
08030     ErrorCode rval = impl->tag_get_data( pc_tag, &root, 1, pc_array );
08031     if( MB_SUCCESS != rval ) return rval;
08032 
08033     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
08034     {
08035         if( pc_array[i] ) list.push_back( pc_array[i] );
08036     }
08037 
08038     return MB_SUCCESS;
08039 }
08040 
08041 //! get the indexed pcomm object from the interface
08042 ParallelComm* ParallelComm::get_pcomm( Interface* impl, EntityHandle prtn, const MPI_Comm* comm )
08043 {
08044     ErrorCode rval;
08045     ParallelComm* result = 0;
08046 
08047     Tag prtn_tag;
08048     rval =
08049         impl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 1, MB_TYPE_INTEGER, prtn_tag, MB_TAG_SPARSE | MB_TAG_CREAT );
08050     if( MB_SUCCESS != rval ) return 0;
08051 
08052     int pcomm_id;
08053     rval = impl->tag_get_data( prtn_tag, &prtn, 1, &pcomm_id );
08054     if( MB_SUCCESS == rval )
08055     {
08056         result = get_pcomm( impl, pcomm_id );
08057     }
08058     else if( MB_TAG_NOT_FOUND == rval && comm )
08059     {
08060         result = new ParallelComm( impl, *comm, &pcomm_id );
08061         if( !result ) return 0;
08062         result->set_partitioning( prtn );
08063 
08064         rval = impl->tag_set_data( prtn_tag, &prtn, 1, &pcomm_id );
08065         if( MB_SUCCESS != rval )
08066         {
08067             delete result;
08068             result = 0;
08069         }
08070     }
08071 
08072     return result;
08073 }
08074 
08075 ErrorCode ParallelComm::set_partitioning( EntityHandle set )
08076 {
08077     ErrorCode rval;
08078     Tag prtn_tag;
08079     rval = mbImpl->tag_get_handle( PARTITIONING_PCOMM_TAG_NAME, 1, MB_TYPE_INTEGER, prtn_tag,
08080                                    MB_TAG_SPARSE | MB_TAG_CREAT );
08081     if( MB_SUCCESS != rval ) return rval;
08082 
08083     // Get my id
08084     ParallelComm* pcomm_arr[MAX_SHARING_PROCS];
08085     Tag pc_tag = pcomm_tag( mbImpl, false );
08086     if( 0 == pc_tag ) return MB_FAILURE;
08087     const EntityHandle root = 0;
08088     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, pcomm_arr );
08089     if( MB_SUCCESS != result ) return MB_FAILURE;
08090     int id = std::find( pcomm_arr, pcomm_arr + MAX_SHARING_PROCS, this ) - pcomm_arr;
08091     if( id == MAX_SHARING_PROCS ) return MB_FAILURE;
08092 
08093     EntityHandle old = partitioningSet;
08094     if( old )
08095     {
08096         rval = mbImpl->tag_delete_data( prtn_tag, &old, 1 );
08097         if( MB_SUCCESS != rval ) return rval;
08098         partitioningSet = 0;
08099     }
08100 
08101     if( !set ) return MB_SUCCESS;
08102 
08103     Range contents;
08104     if( old )
08105     {
08106         rval = mbImpl->get_entities_by_handle( old, contents );
08107         if( MB_SUCCESS != rval ) return rval;
08108     }
08109     else
08110     {
08111         contents = partition_sets();
08112     }
08113 
08114     rval = mbImpl->add_entities( set, contents );
08115     if( MB_SUCCESS != rval ) return rval;
08116 
08117     // Store pcomm id on new partition set
08118     rval = mbImpl->tag_set_data( prtn_tag, &set, 1, &id );
08119     if( MB_SUCCESS != rval ) return rval;
08120 
08121     partitioningSet = set;
08122     return MB_SUCCESS;
08123 }
08124 
08125 //! return all the entities in parts owned locally
08126 ErrorCode ParallelComm::get_part_entities( Range& ents, int dim )
08127 {
08128     ErrorCode result;
08129 
08130     for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
08131     {
08132         Range tmp_ents;
08133         if( -1 == dim )
08134             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
08135         else
08136             result = mbImpl->get_entities_by_dimension( *rit, dim, tmp_ents, true );
08137 
08138         if( MB_SUCCESS != result ) return result;
08139         ents.merge( tmp_ents );
08140     }
08141 
08142     return MB_SUCCESS;
08143 }
08144 
08145 /** \brief Return the rank of the entity owner
08146  */
08147 ErrorCode ParallelComm::get_owner_handle( EntityHandle entity, int& owner, EntityHandle& handle )
08148 {
08149     unsigned char pstat;
08150     int sharing_procs[MAX_SHARING_PROCS];
08151     EntityHandle sharing_handles[MAX_SHARING_PROCS];
08152 
08153     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
08154     if( !( pstat & PSTATUS_NOT_OWNED ) )
08155     {
08156         owner  = proc_config().proc_rank();
08157         handle = entity;
08158     }
08159     else if( pstat & PSTATUS_MULTISHARED )
08160     {
08161         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
08162         owner  = sharing_procs[0];
08163         result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, sharing_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
08164         handle = sharing_handles[0];
08165     }
08166     else if( pstat & PSTATUS_SHARED )
08167     {
08168         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
08169         owner  = sharing_procs[0];
08170         result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, sharing_handles );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
08171         handle = sharing_handles[0];
08172     }
08173     else
08174     {
08175         owner  = -1;
08176         handle = 0;
08177     }
08178 
08179     return MB_SUCCESS;
08180 }
08181 
08182 ErrorCode ParallelComm::get_global_part_count( int& count_out ) const
08183 {
08184     count_out = globalPartCount;
08185     return count_out < 0 ? MB_FAILURE : MB_SUCCESS;
08186 }
08187 
08188 ErrorCode ParallelComm::get_part_owner( int part_id, int& owner ) const
08189 {
08190     // FIXME: assumes only 1 local part
08191     owner = part_id;
08192     return MB_SUCCESS;
08193 }
08194 
08195 ErrorCode ParallelComm::get_part_id( EntityHandle /*part*/, int& id_out ) const
08196 {
08197     // FIXME: assumes only 1 local part
08198     id_out = proc_config().proc_rank();
08199     return MB_SUCCESS;
08200 }
08201 
08202 ErrorCode ParallelComm::get_part_handle( int id, EntityHandle& handle_out ) const
08203 {
08204     // FIXME: assumes only 1 local part
08205     if( (unsigned)id != proc_config().proc_rank() ) return MB_ENTITY_NOT_FOUND;
08206     handle_out = partition_sets().front();
08207     return MB_SUCCESS;
08208 }
08209 
08210 ErrorCode ParallelComm::create_part( EntityHandle& set_out )
08211 {
08212     // Mark as invalid so we know that it needs to be updated
08213     globalPartCount = -1;
08214 
08215     // Create set representing part
08216     ErrorCode rval = mbImpl->create_meshset( MESHSET_SET, set_out );
08217     if( MB_SUCCESS != rval ) return rval;
08218 
08219     // Set tag on set
08220     int val = proc_config().proc_rank();
08221     rval    = mbImpl->tag_set_data( part_tag(), &set_out, 1, &val );
08222 
08223     if( MB_SUCCESS != rval )
08224     {
08225         mbImpl->delete_entities( &set_out, 1 );
08226         return rval;
08227     }
08228 
08229     if( get_partitioning() )
08230     {
08231         rval = mbImpl->add_entities( get_partitioning(), &set_out, 1 );
08232         if( MB_SUCCESS != rval )
08233         {
08234             mbImpl->delete_entities( &set_out, 1 );
08235             return rval;
08236         }
08237     }
08238 
08239     moab::Range& pSets = this->partition_sets();
08240     if( pSets.index( set_out ) < 0 )
08241     {
08242         pSets.insert( set_out );
08243     }
08244 
08245     return MB_SUCCESS;
08246 }
08247 
08248 ErrorCode ParallelComm::destroy_part( EntityHandle part_id )
08249 {
08250     // Mark as invalid so we know that it needs to be updated
08251     globalPartCount = -1;
08252 
08253     ErrorCode rval;
08254     if( get_partitioning() )
08255     {
08256         rval = mbImpl->remove_entities( get_partitioning(), &part_id, 1 );
08257         if( MB_SUCCESS != rval ) return rval;
08258     }
08259 
08260     moab::Range& pSets = this->partition_sets();
08261     if( pSets.index( part_id ) >= 0 )
08262     {
08263         pSets.erase( part_id );
08264     }
08265     return mbImpl->delete_entities( &part_id, 1 );
08266 }
08267 
08268 ErrorCode ParallelComm::collective_sync_partition()
08269 {
08270     int count       = partition_sets().size();
08271     globalPartCount = 0;
08272     int err         = MPI_Allreduce( &count, &globalPartCount, 1, MPI_INT, MPI_SUM, proc_config().proc_comm() );
08273     return err ? MB_FAILURE : MB_SUCCESS;
08274 }
08275 
08276 ErrorCode ParallelComm::get_part_neighbor_ids( EntityHandle part,
08277                                                int neighbors_out[MAX_SHARING_PROCS],
08278                                                int& num_neighbors_out )
08279 {
08280     ErrorCode rval;
08281     Range iface;
08282     rval = get_interface_sets( part, iface );
08283     if( MB_SUCCESS != rval ) return rval;
08284 
08285     num_neighbors_out = 0;
08286     int n, j = 0;
08287     int tmp[MAX_SHARING_PROCS] = { 0 }, curr[MAX_SHARING_PROCS] = { 0 };
08288     int* parts[2] = { neighbors_out, tmp };
08289     for( Range::iterator i = iface.begin(); i != iface.end(); ++i )
08290     {
08291         unsigned char pstat;
08292         rval = get_sharing_data( *i, curr, NULL, pstat, n );
08293         if( MB_SUCCESS != rval ) return rval;
08294         std::sort( curr, curr + n );
08295         assert( num_neighbors_out < MAX_SHARING_PROCS );
08296         int* k            = std::set_union( parts[j], parts[j] + num_neighbors_out, curr, curr + n, parts[1 - j] );
08297         j                 = 1 - j;
08298         num_neighbors_out = k - parts[j];
08299     }
08300     if( parts[j] != neighbors_out ) std::copy( parts[j], parts[j] + num_neighbors_out, neighbors_out );
08301 
08302     // Remove input part from list
08303     int id;
08304     rval = get_part_id( part, id );
08305     if( MB_SUCCESS == rval )
08306         num_neighbors_out = std::remove( neighbors_out, neighbors_out + num_neighbors_out, id ) - neighbors_out;
08307     return rval;
08308 }
08309 
08310 ErrorCode ParallelComm::get_interface_sets( EntityHandle, Range& iface_sets_out, int* adj_part_id )
08311 {
08312     // FIXME : assumes one part per processor.
08313     // Need to store part iface sets as children to implement
08314     // this correctly.
08315     iface_sets_out = interface_sets();
08316 
08317     if( adj_part_id )
08318     {
08319         int part_ids[MAX_SHARING_PROCS], num_parts;
08320         Range::iterator i = iface_sets_out.begin();
08321         while( i != iface_sets_out.end() )
08322         {
08323             unsigned char pstat;
08324             ErrorCode rval = get_sharing_data( *i, part_ids, NULL, pstat, num_parts );
08325             if( MB_SUCCESS != rval ) return rval;
08326 
08327             if( std::find( part_ids, part_ids + num_parts, *adj_part_id ) - part_ids != num_parts )
08328                 ++i;
08329             else
08330                 i = iface_sets_out.erase( i );
08331         }
08332     }
08333 
08334     return MB_SUCCESS;
08335 }
08336 
08337 ErrorCode ParallelComm::get_owning_part( EntityHandle handle, int& owning_part_id, EntityHandle* remote_handle )
08338 {
08339     // FIXME : assumes one part per proc, and therefore part_id == rank
08340 
08341     // If entity is not shared, then we're the owner.
08342     unsigned char pstat;
08343     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &handle, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
08344     if( !( pstat & PSTATUS_NOT_OWNED ) )
08345     {
08346         owning_part_id = proc_config().proc_rank();
08347         if( remote_handle ) *remote_handle = handle;
08348         return MB_SUCCESS;
08349     }
08350 
08351     // If entity is shared with one other proc, then
08352     // sharedp_tag will contain a positive value.
08353     result = mbImpl->tag_get_data( sharedp_tag(), &handle, 1, &owning_part_id );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
08354     if( owning_part_id != -1 )
08355     {
08356         // Done?
08357         if( !remote_handle ) return MB_SUCCESS;
08358 
08359         // Get handles on remote processors (and this one)
08360         return mbImpl->tag_get_data( sharedh_tag(), &handle, 1, remote_handle );
08361     }
08362 
08363     // If here, then the entity is shared with at least two other processors.
08364     // Get the list from the sharedps_tag
08365     const void* part_id_list = 0;
08366     result                   = mbImpl->tag_get_by_ptr( sharedps_tag(), &handle, 1, &part_id_list );
08367     if( MB_SUCCESS != result ) return result;
08368     owning_part_id = ( (const int*)part_id_list )[0];
08369 
08370     // Done?
08371     if( !remote_handle ) return MB_SUCCESS;
08372 
08373     // Get remote handles
08374     const void* handle_list = 0;
08375     result                  = mbImpl->tag_get_by_ptr( sharedhs_tag(), &handle, 1, &handle_list );
08376     if( MB_SUCCESS != result ) return result;
08377 
08378     *remote_handle = ( (const EntityHandle*)handle_list )[0];
08379     return MB_SUCCESS;
08380 }
08381 
08382 ErrorCode ParallelComm::get_sharing_parts( EntityHandle entity,
08383                                            int part_ids_out[MAX_SHARING_PROCS],
08384                                            int& num_part_ids_out,
08385                                            EntityHandle remote_handles[MAX_SHARING_PROCS] )
08386 {
08387     // FIXME : assumes one part per proc, and therefore part_id == rank
08388 
08389     // If entity is not shared, then we're the owner.
08390     unsigned char pstat;
08391     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
08392     if( !( pstat & PSTATUS_SHARED ) )
08393     {
08394         part_ids_out[0] = proc_config().proc_rank();
08395         if( remote_handles ) remote_handles[0] = entity;
08396         num_part_ids_out = 1;
08397         return MB_SUCCESS;
08398     }
08399 
08400     // If entity is shared with one other proc, then
08401     // sharedp_tag will contain a positive value.
08402     result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, part_ids_out );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
08403     if( part_ids_out[0] != -1 )
08404     {
08405         num_part_ids_out = 2;
08406         part_ids_out[1]  = proc_config().proc_rank();
08407 
08408         // Done?
08409         if( !remote_handles ) return MB_SUCCESS;
08410 
08411         // Get handles on remote processors (and this one)
08412         remote_handles[1] = entity;
08413         return mbImpl->tag_get_data( sharedh_tag(), &entity, 1, remote_handles );
08414     }
08415 
08416     // If here, then the entity is shared with at least two other processors.
08417     // Get the list from the sharedps_tag
08418     result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, part_ids_out );
08419     if( MB_SUCCESS != result ) return result;
08420     // Count number of valid (positive) entries in sharedps_tag
08421     for( num_part_ids_out = 0; num_part_ids_out < MAX_SHARING_PROCS && part_ids_out[num_part_ids_out] >= 0;
08422          num_part_ids_out++ )
08423         ;
08424         // part_ids_out[num_part_ids_out++] = proc_config().proc_rank();
08425 #ifndef NDEBUG
08426     int my_idx = std::find( part_ids_out, part_ids_out + num_part_ids_out, proc_config().proc_rank() ) - part_ids_out;
08427     assert( my_idx < num_part_ids_out );
08428 #endif
08429 
08430     // Done?
08431     if( !remote_handles ) return MB_SUCCESS;
08432 
08433     // Get remote handles
08434     result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, remote_handles );
08435     // remote_handles[num_part_ids_out - 1] = entity;
08436     assert( remote_handles[my_idx] == entity );
08437 
08438     return result;
08439 }
08440 
08441 ErrorCode ParallelComm::pack_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data )
08442 {
08443     // Build up send buffers
08444     ErrorCode rval = MB_SUCCESS;
08445     int ent_procs[MAX_SHARING_PROCS];
08446     EntityHandle handles[MAX_SHARING_PROCS];
08447     int num_sharing, tmp_int;
08448     SharedEntityData tmp;
08449     send_data.resize( buffProcs.size() );
08450     for( std::set< EntityHandle >::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i )
08451     {
08452         tmp.remote = *i;  // Swap local/remote so they're correct on the remote proc.
08453         rval       = get_owner( *i, tmp_int );
08454         tmp.owner  = tmp_int;
08455         if( MB_SUCCESS != rval ) return rval;
08456 
08457         unsigned char pstat;
08458         rval = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );
08459         if( MB_SUCCESS != rval ) return rval;
08460         for( int j = 0; j < num_sharing; j++ )
08461         {
08462             if( ent_procs[j] == (int)proc_config().proc_rank() ) continue;
08463             tmp.local = handles[j];
08464             int ind   = get_buffers( ent_procs[j] );
08465             assert( -1 != ind );
08466             if( (int)send_data.size() < ind + 1 ) send_data.resize( ind + 1 );
08467             send_data[ind].push_back( tmp );
08468         }
08469     }
08470 
08471     return MB_SUCCESS;
08472 }
08473 
08474 ErrorCode ParallelComm::exchange_all_shared_handles( std::vector< std::vector< SharedEntityData > >& send_data,
08475                                                      std::vector< std::vector< SharedEntityData > >& result )
08476 {
08477     int ierr;
08478     const int tag      = 0;
08479     const MPI_Comm cm  = procConfig.proc_comm();
08480     const int num_proc = buffProcs.size();
08481     const std::vector< int > procs( buffProcs.begin(), buffProcs.end() );
08482     std::vector< MPI_Request > recv_req( buffProcs.size(), MPI_REQUEST_NULL );
08483     std::vector< MPI_Request > send_req( buffProcs.size(), MPI_REQUEST_NULL );
08484 
08485     // Set up to receive sizes
08486     std::vector< int > sizes_send( num_proc ), sizes_recv( num_proc );
08487     for( int i = 0; i < num_proc; i++ )
08488     {
08489         ierr = MPI_Irecv( &sizes_recv[i], 1, MPI_INT, procs[i], tag, cm, &recv_req[i] );
08490         if( ierr ) return MB_FILE_WRITE_ERROR;
08491     }
08492 
08493     // Send sizes
08494     assert( num_proc == (int)send_data.size() );
08495 
08496     result.resize( num_proc );
08497     for( int i = 0; i < num_proc; i++ )
08498     {
08499         sizes_send[i] = send_data[i].size();
08500         ierr          = MPI_Isend( &sizes_send[i], 1, MPI_INT, buffProcs[i], tag, cm, &send_req[i] );
08501         if( ierr ) return MB_FILE_WRITE_ERROR;
08502     }
08503 
08504     // Receive sizes
08505     std::vector< MPI_Status > stat( num_proc );
08506     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
08507     if( ierr ) return MB_FILE_WRITE_ERROR;
08508 
08509     // Wait until all sizes are sent (clean up pending req's)
08510     ierr = MPI_Waitall( num_proc, &send_req[0], &stat[0] );
08511     if( ierr ) return MB_FILE_WRITE_ERROR;
08512 
08513     // Set up to receive data
08514     for( int i = 0; i < num_proc; i++ )
08515     {
08516         result[i].resize( sizes_recv[i] );
08517         ierr = MPI_Irecv( (void*)( &( result[i][0] ) ), sizeof( SharedEntityData ) * sizes_recv[i], MPI_UNSIGNED_CHAR,
08518                           buffProcs[i], tag, cm, &recv_req[i] );
08519         if( ierr ) return MB_FILE_WRITE_ERROR;
08520     }
08521 
08522     // Send data
08523     for( int i = 0; i < num_proc; i++ )
08524     {
08525         ierr = MPI_Isend( (void*)( &( send_data[i][0] ) ), sizeof( SharedEntityData ) * sizes_send[i],
08526                           MPI_UNSIGNED_CHAR, buffProcs[i], tag, cm, &send_req[i] );
08527         if( ierr ) return MB_FILE_WRITE_ERROR;
08528     }
08529 
08530     // Receive data
08531     ierr = MPI_Waitall( num_proc, &recv_req[0], &stat[0] );
08532     if( ierr ) return MB_FILE_WRITE_ERROR;
08533 
08534     // Wait until everything is sent to release send buffers
08535     ierr = MPI_Waitall( num_proc, &send_req[0], &stat[0] );
08536     if( ierr ) return MB_FILE_WRITE_ERROR;
08537 
08538     return MB_SUCCESS;
08539 }
08540 
08541 ErrorCode ParallelComm::check_all_shared_handles( bool print_em )
08542 {
08543     // Get all shared ent data from other procs
08544     std::vector< std::vector< SharedEntityData > > shents( buffProcs.size() ), send_data( buffProcs.size() );
08545 
08546     ErrorCode result;
08547     bool done = false;
08548 
08549     while( !done )
08550     {
08551         result = check_local_shared();
08552         if( MB_SUCCESS != result )
08553         {
08554             done = true;
08555             continue;
08556         }
08557 
08558         result = pack_shared_handles( send_data );
08559         if( MB_SUCCESS != result )
08560         {
08561             done = true;
08562             continue;
08563         }
08564 
08565         result = exchange_all_shared_handles( send_data, shents );
08566         if( MB_SUCCESS != result )
08567         {
08568             done = true;
08569             continue;
08570         }
08571 
08572         if( !shents.empty() ) result = check_my_shared_handles( shents );
08573         done = true;
08574     }
08575 
08576     if( MB_SUCCESS != result && print_em )
08577     {
08578 #ifdef MOAB_HAVE_HDF5
08579         std::ostringstream ent_str;
08580         ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
08581         mbImpl->write_mesh( ent_str.str().c_str() );
08582 #endif
08583     }
08584 
08585     return result;
08586 }
08587 
08588 ErrorCode ParallelComm::check_local_shared()
08589 {
08590     // Do some checks on shared entities to make sure things look
08591     // consistent
08592 
08593     // Check that non-vertex shared entities are shared by same procs as all
08594     // their vertices
08595     // std::pair<Range::const_iterator,Range::const_iterator> vert_it =
08596     //    sharedEnts.equal_range(MBVERTEX);
08597     std::vector< EntityHandle > dum_connect;
08598     const EntityHandle* connect;
08599     int num_connect;
08600     int tmp_procs[MAX_SHARING_PROCS];
08601     EntityHandle tmp_hs[MAX_SHARING_PROCS];
08602     std::set< int > tmp_set, vset;
08603     int num_ps;
08604     ErrorCode result;
08605     unsigned char pstat;
08606     std::vector< EntityHandle > bad_ents;
08607     std::vector< std::string > errors;
08608 
08609     std::set< EntityHandle >::iterator vit;
08610     for( vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
08611     {
08612         // Get sharing procs for this ent
08613         result = get_sharing_data( *vit, tmp_procs, tmp_hs, pstat, num_ps );
08614         if( MB_SUCCESS != result )
08615         {
08616             bad_ents.push_back( *vit );
08617             errors.push_back( std::string( "Failure getting sharing data." ) );
08618             continue;
08619         }
08620 
08621         bool bad = false;
08622         // Entity must be shared
08623         if( !( pstat & PSTATUS_SHARED ) )
08624             errors.push_back( std::string( "Entity should be shared but isn't." ) ), bad = true;
08625 
08626         // If entity is not owned this must not be first proc
08627         if( pstat & PSTATUS_NOT_OWNED && tmp_procs[0] == (int)procConfig.proc_rank() )
08628             errors.push_back( std::string( "Entity not owned but is first proc." ) ), bad = true;
08629 
08630         // If entity is owned and multishared, this must be first proc
08631         if( !( pstat & PSTATUS_NOT_OWNED ) && pstat & PSTATUS_MULTISHARED &&
08632             ( tmp_procs[0] != (int)procConfig.proc_rank() || tmp_hs[0] != *vit ) )
08633             errors.push_back( std::string( "Entity owned and multishared but not first proc or not first handle." ) ),
08634                 bad = true;
08635 
08636         if( bad )
08637         {
08638             bad_ents.push_back( *vit );
08639             continue;
08640         }
08641 
08642         EntityType type = mbImpl->type_from_handle( *vit );
08643         if( type == MBVERTEX || type == MBENTITYSET ) continue;
08644 
08645         // Copy element's procs to vset and save size
08646         int orig_ps = num_ps;
08647         vset.clear();
08648         std::copy( tmp_procs, tmp_procs + num_ps, std::inserter( vset, vset.begin() ) );
08649 
08650         // Get vertices for this ent and intersection of sharing procs
08651         result = mbImpl->get_connectivity( *vit, connect, num_connect, false, &dum_connect );
08652         if( MB_SUCCESS != result )
08653         {
08654             bad_ents.push_back( *vit );
08655             errors.push_back( std::string( "Failed to get connectivity." ) );
08656             continue;
08657         }
08658 
08659         for( int i = 0; i < num_connect; i++ )
08660         {
08661             result = get_sharing_data( connect[i], tmp_procs, NULL, pstat, num_ps );
08662             if( MB_SUCCESS != result )
08663             {
08664                 bad_ents.push_back( *vit );
08665                 continue;
08666             }
08667             if( !num_ps )
08668             {
08669                 vset.clear();
08670                 break;
08671             }
08672             std::sort( tmp_procs, tmp_procs + num_ps );
08673             tmp_set.clear();
08674             std::set_intersection( tmp_procs, tmp_procs + num_ps, vset.begin(), vset.end(),
08675                                    std::inserter( tmp_set, tmp_set.end() ) );
08676             vset.swap( tmp_set );
08677             if( vset.empty() ) break;
08678         }
08679 
08680         // Intersect them; should be the same size as orig_ps
08681         tmp_set.clear();
08682         std::set_intersection( tmp_procs, tmp_procs + num_ps, vset.begin(), vset.end(),
08683                                std::inserter( tmp_set, tmp_set.end() ) );
08684         if( orig_ps != (int)tmp_set.size() )
08685         {
08686             errors.push_back( std::string( "Vertex proc set not same size as entity proc set." ) );
08687             bad_ents.push_back( *vit );
08688             for( int i = 0; i < num_connect; i++ )
08689             {
08690                 bad_ents.push_back( connect[i] );
08691                 errors.push_back( std::string( "vertex in connect" ) );
08692             }
08693         }
08694     }
08695 
08696     if( !bad_ents.empty() )
08697     {
08698         std::cout << "Found bad entities in check_local_shared, proc rank " << procConfig.proc_rank() << ","
08699                   << std::endl;
08700         std::vector< std::string >::iterator sit;
08701         std::vector< EntityHandle >::iterator rit;
08702         for( rit = bad_ents.begin(), sit = errors.begin(); rit != bad_ents.end(); ++rit, ++sit )
08703         {
08704             list_entities( &( *rit ), 1 );
08705             std::cout << "Reason: " << *sit << std::endl;
08706         }
08707         return MB_FAILURE;
08708     }
08709 
08710     // To do: check interface sets
08711 
08712     return MB_SUCCESS;
08713 }
08714 
08715 ErrorCode ParallelComm::check_all_shared_handles( ParallelComm** pcs, int num_pcs )
08716 {
08717     std::vector< std::vector< std::vector< SharedEntityData > > > shents, send_data;
08718     ErrorCode result = MB_SUCCESS, tmp_result;
08719 
08720     // Get all shared ent data from each proc to all other procs
08721     send_data.resize( num_pcs );
08722     for( int p = 0; p < num_pcs; p++ )
08723     {
08724         tmp_result = pcs[p]->pack_shared_handles( send_data[p] );
08725         if( MB_SUCCESS != tmp_result ) result = tmp_result;
08726     }
08727     if( MB_SUCCESS != result ) return result;
08728 
08729     // Move the data sorted by sending proc to data sorted by receiving proc
08730     shents.resize( num_pcs );
08731     for( int p = 0; p < num_pcs; p++ )
08732         shents[p].resize( pcs[p]->buffProcs.size() );
08733 
08734     for( int p = 0; p < num_pcs; p++ )
08735     {
08736         for( unsigned int idx_p = 0; idx_p < pcs[p]->buffProcs.size(); idx_p++ )
08737         {
08738             // Move send_data[p][to_p] to shents[to_p][idx_p]
08739             int to_p      = pcs[p]->buffProcs[idx_p];
08740             int top_idx_p = pcs[to_p]->get_buffers( p );
08741             assert( -1 != top_idx_p );
08742             shents[to_p][top_idx_p] = send_data[p][idx_p];
08743         }
08744     }
08745 
08746     for( int p = 0; p < num_pcs; p++ )
08747     {
08748         std::ostringstream ostr;
08749         ostr << "Processor " << p << " bad entities:";
08750         tmp_result = pcs[p]->check_my_shared_handles( shents[p], ostr.str().c_str() );
08751         if( MB_SUCCESS != tmp_result ) result = tmp_result;
08752     }
08753 
08754     return result;
08755 }
08756 
08757 ErrorCode ParallelComm::check_my_shared_handles( std::vector< std::vector< SharedEntityData > >& shents,
08758                                                  const char* prefix )
08759 {
08760     // Now check against what I think data should be
08761     // Get all shared entities
08762     ErrorCode result;
08763     Range all_shared;
08764     std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( all_shared ) );
08765     std::vector< EntityHandle > dum_vec;
08766     all_shared.erase( all_shared.upper_bound( MBPOLYHEDRON ), all_shared.end() );
08767 
08768     Range bad_ents, local_shared;
08769     std::vector< SharedEntityData >::iterator vit;
08770     unsigned char tmp_pstat;
08771     for( unsigned int i = 0; i < shents.size(); i++ )
08772     {
08773         int other_proc = buffProcs[i];
08774         result         = get_shared_entities( other_proc, local_shared );
08775         if( MB_SUCCESS != result ) return result;
08776         for( vit = shents[i].begin(); vit != shents[i].end(); ++vit )
08777         {
08778             EntityHandle localh = vit->local, remoteh = vit->remote, dumh;
08779             local_shared.erase( localh );
08780             result = get_remote_handles( true, &localh, &dumh, 1, other_proc, dum_vec );
08781             if( MB_SUCCESS != result || dumh != remoteh ) bad_ents.insert( localh );
08782             result = get_pstatus( localh, tmp_pstat );
08783             if( MB_SUCCESS != result || ( !( tmp_pstat & PSTATUS_NOT_OWNED ) && (unsigned)vit->owner != rank() ) ||
08784                 ( tmp_pstat & PSTATUS_NOT_OWNED && (unsigned)vit->owner == rank() ) )
08785                 bad_ents.insert( localh );
08786         }
08787 
08788         if( !local_shared.empty() ) bad_ents.merge( local_shared );
08789     }
08790 
08791     if( !bad_ents.empty() )
08792     {
08793         if( prefix ) std::cout << prefix << std::endl;
08794         list_entities( bad_ents );
08795         return MB_FAILURE;
08796     }
08797     else
08798         return MB_SUCCESS;
08799 }
08800 
08801 ErrorCode ParallelComm::get_shared_entities( int other_proc,
08802                                              Range& shared_ents,
08803                                              int dim,
08804                                              const bool iface,
08805                                              const bool owned_filter )
08806 {
08807     shared_ents.clear();
08808     ErrorCode result = MB_SUCCESS;
08809 
08810     // Dimension
08811     if( -1 != dim )
08812     {
08813         DimensionPair dp = CN::TypeDimensionMap[dim];
08814         Range dum_range;
08815         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( dum_range ) );
08816         shared_ents.merge( dum_range.lower_bound( dp.first ), dum_range.upper_bound( dp.second ) );
08817     }
08818     else
08819         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
08820 
08821     // Filter by iface
08822     if( iface )
08823     {
08824         result = filter_pstatus( shared_ents, PSTATUS_INTERFACE, PSTATUS_AND );MB_CHK_SET_ERR( result, "Failed to filter by iface" );
08825     }
08826 
08827     // Filter by owned
08828     if( owned_filter )
08829     {
08830         result = filter_pstatus( shared_ents, PSTATUS_NOT_OWNED, PSTATUS_NOT );MB_CHK_SET_ERR( result, "Failed to filter by owned" );
08831     }
08832 
08833     // Filter by proc
08834     if( -1 != other_proc )
08835     {
08836         result = filter_pstatus( shared_ents, PSTATUS_SHARED, PSTATUS_AND, other_proc );MB_CHK_SET_ERR( result, "Failed to filter by proc" );
08837     }
08838 
08839     return result;
08840 }
08841 
08842 ErrorCode ParallelComm::clean_shared_tags( std::vector< Range* >& exchange_ents )
08843 {
08844     for( unsigned int i = 0; i < exchange_ents.size(); i++ )
08845     {
08846         Range* ents        = exchange_ents[i];
08847         int num_ents       = ents->size();
08848         Range::iterator it = ents->begin();
08849 
08850         for( int n = 0; n < num_ents; n++ )
08851         {
08852             int sharing_proc;
08853             ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &( *ents->begin() ), 1, &sharing_proc );
08854             if( result != MB_TAG_NOT_FOUND && sharing_proc == -1 )
08855             {
08856                 result = mbImpl->tag_delete_data( sharedp_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete sharedp tag data" );
08857                 result = mbImpl->tag_delete_data( sharedh_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete sharedh tag data" );
08858                 result = mbImpl->tag_delete_data( pstatus_tag(), &( *it ), 1 );MB_CHK_SET_ERR( result, "Failed to delete pstatus tag data" );
08859             }
08860             ++it;
08861         }
08862     }
08863 
08864     return MB_SUCCESS;
08865 }
08866 
08867 void ParallelComm::set_debug_verbosity( int verb )
08868 {
08869     myDebug->set_verbosity( verb );
08870 }
08871 
08872 int ParallelComm::get_debug_verbosity()
08873 {
08874     return myDebug->get_verbosity();
08875 }
08876 
08877 ErrorCode ParallelComm::get_entityset_procs( EntityHandle set, std::vector< unsigned >& ranks ) const
08878 {
08879     return sharedSetData->get_sharing_procs( set, ranks );
08880 }
08881 
08882 ErrorCode ParallelComm::get_entityset_owner( EntityHandle entity_set,
08883                                              unsigned& owner_rank,
08884                                              EntityHandle* remote_handle ) const
08885 {
08886     if( remote_handle )
08887         return sharedSetData->get_owner( entity_set, owner_rank, *remote_handle );
08888     else
08889         return sharedSetData->get_owner( entity_set, owner_rank );
08890 }
08891 
08892 ErrorCode ParallelComm::get_entityset_local_handle( unsigned owning_rank,
08893                                                     EntityHandle remote_handle,
08894                                                     EntityHandle& local_handle ) const
08895 {
08896     return sharedSetData->get_local_handle( owning_rank, remote_handle, local_handle );
08897 }
08898 
08899 ErrorCode ParallelComm::get_shared_sets( Range& result ) const
08900 {
08901     return sharedSetData->get_shared_sets( result );
08902 }
08903 
08904 ErrorCode ParallelComm::get_entityset_owners( std::vector< unsigned >& ranks ) const
08905 {
08906     return sharedSetData->get_owning_procs( ranks );
08907 }
08908 
08909 ErrorCode ParallelComm::get_owned_sets( unsigned owning_rank, Range& sets_out ) const
08910 {
08911     return sharedSetData->get_shared_sets( owning_rank, sets_out );
08912 }
08913 
08914 ErrorCode ParallelComm::gather_data( Range& gather_ents,
08915                                      Tag& tag_handle,
08916                                      Tag id_tag,
08917                                      EntityHandle gather_set,
08918                                      int root_proc_rank )
08919 {
08920     int dim           = mbImpl->dimension_from_handle( *gather_ents.begin() );
08921     int bytes_per_tag = 0;
08922     ErrorCode rval    = mbImpl->tag_get_bytes( tag_handle, bytes_per_tag );
08923     if( rval != MB_SUCCESS ) return rval;
08924 
08925     int sz_buffer         = sizeof( int ) + gather_ents.size() * ( sizeof( int ) + bytes_per_tag );
08926     void* senddata        = malloc( sz_buffer );
08927     ( (int*)senddata )[0] = (int)gather_ents.size();
08928     int* ptr_int          = (int*)senddata + 1;
08929     rval                  = mbImpl->tag_get_data( id_tag, gather_ents, (void*)ptr_int );
08930     if( rval != MB_SUCCESS ) return rval;
08931     ptr_int = (int*)( senddata ) + 1 + gather_ents.size();
08932     rval    = mbImpl->tag_get_data( tag_handle, gather_ents, (void*)ptr_int );
08933     if( rval != MB_SUCCESS ) return rval;
08934     std::vector< int > displs( proc_config().proc_size(), 0 );
08935     MPI_Gather( &sz_buffer, 1, MPI_INT, &displs[0], 1, MPI_INT, root_proc_rank, comm() );
08936     std::vector< int > recvcnts( proc_config().proc_size(), 0 );
08937     std::copy( displs.begin(), displs.end(), recvcnts.begin() );
08938     std::partial_sum( displs.begin(), displs.end(), displs.begin() );
08939     std::vector< int >::iterator lastM1 = displs.end() - 1;
08940     std::copy_backward( displs.begin(), lastM1, displs.end() );
08941     // std::copy_backward(displs.begin(), --displs.end(), displs.end());
08942     displs[0] = 0;
08943 
08944     if( (int)rank() != root_proc_rank )
08945         MPI_Gatherv( senddata, sz_buffer, MPI_BYTE, NULL, NULL, NULL, MPI_BYTE, root_proc_rank, comm() );
08946     else
08947     {
08948         Range gents;
08949         mbImpl->get_entities_by_dimension( gather_set, dim, gents );
08950         int recvbuffsz = gents.size() * ( bytes_per_tag + sizeof( int ) ) + proc_config().proc_size() * sizeof( int );
08951         void* recvbuf  = malloc( recvbuffsz );
08952         MPI_Gatherv( senddata, sz_buffer, MPI_BYTE, recvbuf, &recvcnts[0], &displs[0], MPI_BYTE, root_proc_rank,
08953                      comm() );
08954 
08955         void* gvals = NULL;
08956 
08957         // Test whether gents has multiple sequences
08958         bool multiple_sequences = false;
08959         if( gents.psize() > 1 )
08960             multiple_sequences = true;
08961         else
08962         {
08963             int count;
08964             rval = mbImpl->tag_iterate( tag_handle, gents.begin(), gents.end(), count, gvals );
08965             assert( NULL != gvals );
08966             assert( count > 0 );
08967             if( (size_t)count != gents.size() )
08968             {
08969                 multiple_sequences = true;
08970                 gvals              = NULL;
08971             }
08972         }
08973 
08974         // If gents has multiple sequences, create a temp buffer for gathered values
08975         if( multiple_sequences )
08976         {
08977             gvals = malloc( gents.size() * bytes_per_tag );
08978             assert( NULL != gvals );
08979         }
08980 
08981         for( int i = 0; i != (int)size(); i++ )
08982         {
08983             int numents   = *(int*)( ( (char*)recvbuf ) + displs[i] );
08984             int* id_ptr   = (int*)( ( (char*)recvbuf ) + displs[i] + sizeof( int ) );
08985             char* val_ptr = (char*)( id_ptr + numents );
08986             for( int j = 0; j != numents; j++ )
08987             {
08988                 int idx = id_ptr[j];
08989                 memcpy( (char*)gvals + ( idx - 1 ) * bytes_per_tag, val_ptr + j * bytes_per_tag, bytes_per_tag );
08990             }
08991         }
08992 
08993         // Free the receive buffer
08994         free( recvbuf );
08995 
08996         // If gents has multiple sequences, copy tag data (stored in the temp buffer) to each
08997         // sequence separately
08998         if( multiple_sequences )
08999         {
09000             Range::iterator iter = gents.begin();
09001             size_t start_idx     = 0;
09002             while( iter != gents.end() )
09003             {
09004                 int count;
09005                 void* ptr;
09006                 rval = mbImpl->tag_iterate( tag_handle, iter, gents.end(), count, ptr );
09007                 assert( NULL != ptr );
09008                 assert( count > 0 );
09009                 memcpy( (char*)ptr, (char*)gvals + start_idx * bytes_per_tag, bytes_per_tag * count );
09010 
09011                 iter += count;
09012                 start_idx += count;
09013             }
09014             assert( start_idx == gents.size() );
09015 
09016             // Free the temp buffer
09017             free( gvals );
09018         }
09019     }
09020 
09021     // Free the send data
09022     free( senddata );
09023 
09024     return MB_SUCCESS;
09025 }
09026 
09027 /*
09028  * This call is collective, so we will use the message ids for tag communications;
09029  * they are similar, but simpler
09030  * Pack the number of edges, the remote edge handles, then for each edge, the number
09031  *    of intersection points, and then 3 doubles for each intersection point
09032  * On average, there is one intx point per edge, in some cases 2, in some cases 0
09033  *   so on average, the message size is num_edges * (sizeof(eh) + sizeof(int) + 1*3*sizeof(double))
09034  *          = num_edges * (8 + 4 + 24)
09035  */
09036 ErrorCode ParallelComm::settle_intersection_points( Range& edges,
09037                                                     Range& shared_edges_owned,
09038                                                     std::vector< std::vector< EntityHandle >* >& extraNodesVec,
09039                                                     double tolerance )
09040 {
09041     // The index of an edge in the edges Range will give the index for extraNodesVec
09042     // the strategy of this follows exchange tags strategy:
09043     ErrorCode result;
09044     int success;
09045 
09046     myDebug->tprintf( 1, "Entering settle_intersection_points\n" );
09047 
09048     // Get all procs interfacing to this proc
09049     std::set< unsigned int > exch_procs;
09050     result = get_comm_procs( exch_procs );
09051 
09052     // Post ghost irecv's for all interface procs
09053     // Index requests the same as buffer/sharing procs indices
09054     std::vector< MPI_Request > recv_intx_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
09055     std::vector< unsigned int >::iterator sit;
09056     int ind;
09057 
09058     reset_all_buffers();
09059     int incoming = 0;
09060 
09061     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
09062     {
09063         incoming++;
09064         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
09065                            MB_MESG_TAGS_SIZE, incoming );
09066 
09067         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
09068                              MB_MESG_TAGS_SIZE, procConfig.proc_comm(), &recv_intx_reqs[3 * ind] );
09069         if( success != MPI_SUCCESS )
09070         {
09071             MB_SET_ERR( MB_FAILURE, "Failed to post irecv in settle intersection point" );
09072         }
09073     }
09074 
09075     // Pack and send intersection points from this proc to others
09076     // Make sendReqs vector to simplify initialization
09077     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
09078 
09079     // Take all shared entities if incoming list is empty
09080     Range& entities = shared_edges_owned;
09081 
09082     int dum_ack_buff;
09083 
09084     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
09085     {
09086         Range edges_to_send = entities;
09087 
09088         // Get ents shared by proc *sit
09089         result = filter_pstatus( edges_to_send, PSTATUS_SHARED, PSTATUS_AND, *sit );MB_CHK_SET_ERR( result, "Failed pstatus AND check" );
09090 
09091         // Remote nonowned entities; not needed, edges are already owned by this proc
09092 
09093         // Pack the data
09094         // Reserve space on front for size and for initial buff size
09095         Buffer* buff = localOwnedBuffs[ind];
09096         buff->reset_ptr( sizeof( int ) );
09097 
09098         /*result = pack_intx_points(edges_to_send, edges, extraNodesVec,
09099             localOwnedBuffs[ind], *sit);*/
09100 
09101         // Count first data, and see if it is enough room?
09102         // Send the remote handles
09103         std::vector< EntityHandle > dum_remote_edges( edges_to_send.size() );
09104         /*
09105          *  get_remote_handles(const bool store_remote_handles,
09106                                    EntityHandle *from_vec,
09107                                    EntityHandle *to_vec_tmp,
09108                                    int num_ents, int to_proc,
09109                                    const std::vector<EntityHandle> &new_ents);
09110          */
09111         // We are sending count, num edges, remote edges handles, and then, for each edge:
09112         //          -- nb intx points, 3*nbintPointsforEdge "doubles"
09113         std::vector< EntityHandle > dum_vec;
09114         result = get_remote_handles( true, edges_to_send, &dum_remote_edges[0], *sit, dum_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
09115         int count = 4;  // Size of data
09116         count += sizeof( int ) * (int)edges_to_send.size();
09117         count += sizeof( EntityHandle ) * (int)edges_to_send.size();  // We will send the remote handles
09118         for( Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit )
09119         {
09120             EntityHandle edge                       = *eit;
09121             unsigned int indx                       = edges.find( edge ) - edges.begin();
09122             std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
09123             count += (int)intx_nodes.size() * 3 * sizeof( double );  // 3 integer for each entity handle
09124         }
09125         //
09126         buff->check_space( count );
09127         PACK_INT( buff->buff_ptr, edges_to_send.size() );
09128         PACK_EH( buff->buff_ptr, &dum_remote_edges[0], dum_remote_edges.size() );
09129         for( Range::iterator eit = edges_to_send.begin(); eit != edges_to_send.end(); ++eit )
09130         {
09131             EntityHandle edge = *eit;
09132             // Pack the remote edge
09133             unsigned int indx                       = edges.find( edge ) - edges.begin();
09134             std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
09135             PACK_INT( buff->buff_ptr, intx_nodes.size() );
09136 
09137             result = mbImpl->get_coords( &intx_nodes[0], intx_nodes.size(), (double*)buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get coords" );
09138             buff->buff_ptr += 3 * sizeof( double ) * intx_nodes.size();
09139         }
09140 
09141         // Done packing the intx points and remote edges
09142         buff->set_stored_size();
09143 
09144         // Now send it
09145         result = send_buffer( *sit, localOwnedBuffs[ind], MB_MESG_TAGS_SIZE, sendReqs[3 * ind],
09146                               recv_intx_reqs[3 * ind + 2], &dum_ack_buff, incoming );MB_CHK_SET_ERR( result, "Failed to send buffer" );
09147     }
09148 
09149     // Receive/unpack intx points
09150     while( incoming )
09151     {
09152         MPI_Status status;
09153         int index_in_recv_requests;
09154         PRINT_DEBUG_WAITANY( recv_intx_reqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
09155         success = MPI_Waitany( 3 * buffProcs.size(), &recv_intx_reqs[0], &index_in_recv_requests, &status );
09156         if( MPI_SUCCESS != success )
09157         {
09158             MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" );
09159         }
09160         // Processor index in the list is divided by 3
09161         ind = index_in_recv_requests / 3;
09162 
09163         PRINT_DEBUG_RECD( status );
09164 
09165         // OK, received something; decrement incoming counter
09166         incoming--;
09167 
09168         bool done = false;
09169         result    = recv_buffer( MB_MESG_TAGS_SIZE, status, remoteOwnedBuffs[ind],
09170                                  recv_intx_reqs[3 * ind + 1],  // This is for receiving the second message
09171                                  recv_intx_reqs[3 * ind + 2],  // This would be for ack, but it is not
09172                                                                // used; consider removing it
09173                                  incoming, localOwnedBuffs[ind],
09174                                  sendReqs[3 * ind + 1],  // Send request for sending the second message
09175                                  sendReqs[3 * ind + 2],  // This is for sending the ack
09176                                  done );MB_CHK_SET_ERR( result, "Failed to resize recv buffer" );
09177         if( done )
09178         {
09179             Buffer* buff = remoteOwnedBuffs[ind];
09180             buff->reset_ptr( sizeof( int ) );
09181             /*result = unpack_tags(remoteOwnedBuffs[ind/2]->buff_ptr, dum_vec, true,
09182                 buffProcs[ind/2]);*/
09183             // Unpack now the edges and vertex info; compare with the existing vertex positions
09184 
09185             int num_edges;
09186 
09187             UNPACK_INT( buff->buff_ptr, num_edges );
09188             std::vector< EntityHandle > rec_edges;
09189             rec_edges.resize( num_edges );
09190             UNPACK_EH( buff->buff_ptr, &rec_edges[0], num_edges );
09191             for( int i = 0; i < num_edges; i++ )
09192             {
09193                 EntityHandle edge                       = rec_edges[i];
09194                 unsigned int indx                       = edges.find( edge ) - edges.begin();
09195                 std::vector< EntityHandle >& intx_nodes = *( extraNodesVec[indx] );
09196                 // Now get the number of nodes on this (now local) edge
09197                 int nverts;
09198                 UNPACK_INT( buff->buff_ptr, nverts );
09199                 std::vector< double > pos_from_owner;
09200                 pos_from_owner.resize( 3 * nverts );
09201                 UNPACK_DBLS( buff->buff_ptr, &pos_from_owner[0], 3 * nverts );
09202                 std::vector< double > current_positions( 3 * intx_nodes.size() );
09203                 result = mbImpl->get_coords( &intx_nodes[0], intx_nodes.size(), &current_positions[0] );MB_CHK_SET_ERR( result, "Failed to get current positions" );
09204                 // Now, look at what we have in current pos, compare to pos from owner, and reset
09205                 for( int k = 0; k < (int)intx_nodes.size(); k++ )
09206                 {
09207                     double* pk = &current_positions[3 * k];
09208                     // Take the current pos k, and settle among the ones from owner:
09209                     bool found = false;
09210                     for( int j = 0; j < nverts && !found; j++ )
09211                     {
09212                         double* pj   = &pos_from_owner[3 * j];
09213                         double dist2 = ( pk[0] - pj[0] ) * ( pk[0] - pj[0] ) + ( pk[1] - pj[1] ) * ( pk[1] - pj[1] ) +
09214                                        ( pk[2] - pj[2] ) * ( pk[2] - pj[2] );
09215                         if( dist2 < tolerance )
09216                         {
09217                             pk[0] = pj[0];
09218                             pk[1] = pj[1];
09219                             pk[2] = pj[2];  // Correct it!
09220                             found = true;
09221                             break;
09222                         }
09223                     }
09224                     if( !found )
09225                     {
09226 #ifndef NDEBUG
09227                         std::cout << " pk:" << pk[0] << " " << pk[1] << " " << pk[2] << " not found \n";
09228 #endif
09229                         result = MB_FAILURE;
09230                     }
09231                 }
09232                 // After we are done resetting, we can set the new positions of nodes:
09233                 result = mbImpl->set_coords( &intx_nodes[0], (int)intx_nodes.size(), &current_positions[0] );MB_CHK_SET_ERR( result, "Failed to set new current positions" );
09234             }
09235         }
09236     }
09237 
09238     // OK, now wait
09239     if( myDebug->get_verbosity() == 5 )
09240     {
09241         success = MPI_Barrier( procConfig.proc_comm() );
09242     }
09243     else
09244     {
09245         MPI_Status status[3 * MAX_SHARING_PROCS];
09246         success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], status );
09247     }
09248     if( MPI_SUCCESS != success )
09249     {
09250         MB_SET_ERR( MB_FAILURE, "Failure in waitall in tag exchange" );
09251     }
09252 
09253     myDebug->tprintf( 1, "Exiting settle_intersection_points" );
09254 
09255     return MB_SUCCESS;
09256 }
09257 
09258 ErrorCode ParallelComm::delete_entities( Range& to_delete )
09259 {
09260     // Will not look at shared sets yet, but maybe we should
09261     // First, see if any of the entities to delete is shared; then inform the other processors
09262     // about their fate (to be deleted), using a crystal router transfer
09263     ErrorCode rval = MB_SUCCESS;
09264     unsigned char pstat;
09265     EntityHandle tmp_handles[MAX_SHARING_PROCS];
09266     int tmp_procs[MAX_SHARING_PROCS];
09267     unsigned int num_ps;
09268     TupleList ents_to_delete;
09269     ents_to_delete.initialize( 1, 0, 1, 0, to_delete.size() * ( MAX_SHARING_PROCS + 1 ) );  // A little bit of overkill
09270     ents_to_delete.enableWriteAccess();
09271     unsigned int i = 0;
09272     for( Range::iterator it = to_delete.begin(); it != to_delete.end(); ++it )
09273     {
09274         EntityHandle eh = *it;  // Entity to be deleted
09275 
09276         rval = get_sharing_data( eh, tmp_procs, tmp_handles, pstat, num_ps );
09277         if( rval != MB_SUCCESS || num_ps == 0 ) continue;
09278         // Add to the tuple list the information to be sent (to the remote procs)
09279         for( unsigned int p = 0; p < num_ps; p++ )
09280         {
09281             ents_to_delete.vi_wr[i]  = tmp_procs[p];
09282             ents_to_delete.vul_wr[i] = (unsigned long)tmp_handles[p];
09283             i++;
09284             ents_to_delete.inc_n();
09285         }
09286     }
09287 
09288     gs_data::crystal_data* cd = this->procConfig.crystal_router();
09289     // All communication happens here; no other mpi calls
09290     // Also, this is a collective call
09291     rval = cd->gs_transfer( 1, ents_to_delete, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
09292 
09293     // Add to the range of ents to delete the new ones that were sent from other procs
09294     unsigned int received = ents_to_delete.get_n();
09295     for( i = 0; i < received; i++ )
09296     {
09297         // int from = ents_to_delete.vi_rd[i];
09298         unsigned long valrec = ents_to_delete.vul_rd[i];
09299         to_delete.insert( (EntityHandle)valrec );
09300     }
09301     rval = mbImpl->delete_entities( to_delete );MB_CHK_SET_ERR( rval, "Error in deleting actual entities" );
09302 
09303     std::set< EntityHandle > good_ents;
09304     for( std::set< EntityHandle >::iterator sst = sharedEnts.begin(); sst != sharedEnts.end(); sst++ )
09305     {
09306         EntityHandle eh = *sst;
09307         int index       = to_delete.index( eh );
09308         if( -1 == index ) good_ents.insert( eh );
09309     }
09310     sharedEnts = good_ents;
09311 
09312     // What about shared sets? Who is updating them?
09313     return MB_SUCCESS;
09314 }
09315 
09316 void ParallelComm::print_pstatus( unsigned char pstat, std::string& ostr )
09317 {
09318     std::ostringstream str;
09319     int num = 0;
09320 #define ppstat( a, b )             \
09321     {                              \
09322         if( pstat & ( a ) )        \
09323         {                          \
09324             if( num ) str << ", "; \
09325             str << ( b );          \
09326             num++;                 \
09327         }                          \
09328     }
09329 
09330     ppstat( PSTATUS_NOT_OWNED, "NOT_OWNED" );
09331     ppstat( PSTATUS_SHARED, "SHARED" );
09332     ppstat( PSTATUS_MULTISHARED, "MULTISHARED" );
09333     ppstat( PSTATUS_INTERFACE, "INTERFACE" );
09334     ppstat( PSTATUS_GHOST, "GHOST" );
09335 
09336     ostr = str.str();
09337 }
09338 
09339 void ParallelComm::print_pstatus( unsigned char pstat )
09340 {
09341     std::string str;
09342     print_pstatus( pstat, str );
09343     std::cout << str.c_str() << std::endl;
09344 }
09345 
09346 ErrorCode ParallelComm::correct_thin_ghost_layers()
09347 {
09348 
09349     // Get all shared ent data from other procs
09350     std::vector< std::vector< SharedEntityData > > shents( buffProcs.size() ), send_data( buffProcs.size() );
09351 
09352     // will work only on multi-shared tags  sharedps_tag(), sharedhs_tag();
09353 
09354     /*
09355      *   domain0 | domain1 | domain2 | domain3
09356      *   vertices from domain 1 and 2 are visible from both 0 and 3, but
09357      *   domain 0 might not have info about multi-sharing from domain 3
09358      *   so we will force that domain 0 vertices owned by 1 and 2 have information
09359      *   about the domain 3 sharing
09360      *
09361      *   SharedEntityData will have :
09362      *    struct SharedEntityData {
09363             EntityHandle local;  // this is same meaning, for the proc we sent to, it is local
09364             EntityHandle remote; // this will be the far away handle that will need to be added
09365             EntityID owner;      // this will be the remote proc
09366           };
09367           // so we need to add data like this:
09368            a multishared entity owned by proc x will have data like
09369              multishared procs:  proc x, a, b, c
09370              multishared handles:     h1, h2, h3, h4
09371              we will need to send data from proc x like this:
09372                to proc a we will send
09373                  (h2, h3, b), (h2, h4, c)
09374                to proc b we will send
09375                   (h3, h2, a), (h3, h4, c)
09376                to proc c we will send
09377                   (h4, h2, a), (h4, h3, b)
09378      *
09379      */
09380 
09381     ErrorCode result = MB_SUCCESS;
09382     int ent_procs[MAX_SHARING_PROCS + 1];
09383     EntityHandle handles[MAX_SHARING_PROCS + 1];
09384     int num_sharing;
09385     SharedEntityData tmp;
09386 
09387     for( std::set< EntityHandle >::iterator i = sharedEnts.begin(); i != sharedEnts.end(); ++i )
09388     {
09389 
09390         unsigned char pstat;
09391         result = get_sharing_data( *i, ent_procs, handles, pstat, num_sharing );MB_CHK_SET_ERR( result, "can't get sharing data" );
09392         if( !( pstat & PSTATUS_MULTISHARED ) ||
09393             num_sharing <= 2 )  // if not multishared, skip, it should have no problems
09394             continue;
09395         // we should skip the ones that are not owned locally
09396         // the owned ones will have the most multi-shared info, because the info comes from other
09397         // remote processors
09398         if( pstat & PSTATUS_NOT_OWNED ) continue;
09399         for( int j = 1; j < num_sharing; j++ )
09400         {
09401             // we will send to proc
09402             int send_to_proc = ent_procs[j];  //
09403             tmp.local        = handles[j];
09404             int ind          = get_buffers( send_to_proc );
09405             assert( -1 != ind );  // THIS SHOULD NEVER HAPPEN
09406             for( int k = 1; k < num_sharing; k++ )
09407             {
09408                 // do not send to self proc
09409                 if( j == k ) continue;
09410                 tmp.remote = handles[k];  // this will be the handle of entity on proc
09411                 tmp.owner  = ent_procs[k];
09412                 send_data[ind].push_back( tmp );
09413             }
09414         }
09415     }
09416 
09417     result = exchange_all_shared_handles( send_data, shents );MB_CHK_ERR( result );
09418 
09419     // loop over all shents and add if vertex type, add if missing
09420     for( size_t i = 0; i < shents.size(); i++ )
09421     {
09422         std::vector< SharedEntityData >& shEnts = shents[i];
09423         for( size_t j = 0; j < shEnts.size(); j++ )
09424         {
09425             tmp = shEnts[j];
09426             // basically, check the shared data for tmp.local entity
09427             // it should have inside the tmp.owner and tmp.remote
09428             EntityHandle eh = tmp.local;
09429             unsigned char pstat;
09430             result = get_sharing_data( eh, ent_procs, handles, pstat, num_sharing );MB_CHK_SET_ERR( result, "can't get sharing data" );
09431             // see if the proc tmp.owner is in the list of ent_procs; if not, we have to increase
09432             // handles, and ent_procs; and set
09433 
09434             int proc_remote = tmp.owner;  //
09435             if( std::find( ent_procs, ent_procs + num_sharing, proc_remote ) == ent_procs + num_sharing )
09436             {
09437                 // so we did not find on proc
09438 #ifndef NDEBUG
09439                 std::cout << "THIN GHOST: we did not find on proc " << rank() << " for shared ent " << eh
09440                           << " the proc " << proc_remote << "\n";
09441 #endif
09442                 // increase num_sharing, and set the multi-shared tags
09443                 if( num_sharing >= MAX_SHARING_PROCS ) return MB_FAILURE;
09444                 handles[num_sharing]       = tmp.remote;
09445                 handles[num_sharing + 1]   = 0;  // end of list
09446                 ent_procs[num_sharing]     = tmp.owner;
09447                 ent_procs[num_sharing + 1] = -1;  // this should be already set
09448                 result                     = mbImpl->tag_set_data( sharedps_tag(), &eh, 1, ent_procs );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
09449                 result = mbImpl->tag_set_data( sharedhs_tag(), &eh, 1, handles );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
09450                 if( 2 == num_sharing )  // it means the sharedp and sharedh tags were set with a
09451                                         // value non default
09452                 {
09453                     // so entity eh was simple shared before, we need to set those dense tags back
09454                     // to default
09455                     //  values
09456                     EntityHandle zero = 0;
09457                     int no_proc       = -1;
09458                     result            = mbImpl->tag_set_data( sharedp_tag(), &eh, 1, &no_proc );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
09459                     result = mbImpl->tag_set_data( sharedh_tag(), &eh, 1, &zero );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
09460                     // also, add multishared pstatus tag
09461                     // also add multishared status to pstatus
09462                     pstat  = pstat | PSTATUS_MULTISHARED;
09463                     result = mbImpl->tag_set_data( pstatus_tag(), &eh, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
09464                 }
09465             }
09466         }
09467     }
09468     return MB_SUCCESS;
09469 }
09470 }  // namespace moab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines