MOAB: Mesh Oriented datABase  (version 5.3.0)
ParallelComm.cpp
Go to the documentation of this file.
00001 #include "moab/Interface.hpp"
00002 #include "moab/ParallelComm.hpp"
00003 #include "moab/WriteUtilIface.hpp"
00004 #include "moab/ReadUtilIface.hpp"
00005 #include "SequenceManager.hpp"
00006 #include "moab/Error.hpp"
00007 #include "EntitySequence.hpp"
00008 #include "MBTagConventions.hpp"
00009 #include "moab/Skinner.hpp"
00010 #include "MBParallelConventions.h"
00011 #include "moab/Core.hpp"
00012 #include "ElementSequence.hpp"
00013 #include "moab/CN.hpp"
00014 #include "moab/RangeMap.hpp"
00015 #include "moab/MeshTopoUtil.hpp"
00016 #include "TagInfo.hpp"
00017 #include "DebugOutput.hpp"
00018 #include "SharedSetData.hpp"
00019 #include "moab/ScdInterface.hpp"
00020 #include "moab/TupleList.hpp"
00021 #include "moab/gs.hpp"
00022 
00023 #include <iostream>
00024 #include <sstream>
00025 #include <algorithm>
00026 #include <functional>
00027 #include <numeric>
00028 
00029 #include <cmath>
00030 #include <cstdlib>
00031 #include <cassert>
00032 
00033 #ifdef MOAB_HAVE_MPI
00034 #include "moab_mpi.h"
00035 #endif
00036 #ifdef MOAB_HAVE_MPE
00037 #include "mpe.h"
00038 int IFACE_START, IFACE_END;
00039 int GHOST_START, GHOST_END;
00040 int SHAREDV_START, SHAREDV_END;
00041 int RESOLVE_START, RESOLVE_END;
00042 int ENTITIES_START, ENTITIES_END;
00043 int RHANDLES_START, RHANDLES_END;
00044 int OWNED_START, OWNED_END;
00045 #endif
00046 
00047 namespace moab
00048 {
00049 
00050 const unsigned int ParallelComm::INITIAL_BUFF_SIZE = 1024;
00051 
00052 const int MAX_BCAST_SIZE = ( 1 << 28 );
00053 
00054 std::vector< ParallelComm::Buffer* > msgs;
00055 unsigned int __PACK_num = 0, __UNPACK_num = 0, __PACK_count = 0, __UNPACK_count = 0;
00056 std::string __PACK_string, __UNPACK_string;
00057 
00058 #ifdef DEBUG_PACKING_TIMES
00059 #define PC( n, m )                                                            \
00060     {                                                                         \
00061         if( __PACK_num == (unsigned int)n && __PACK_string == m )             \
00062             __PACK_count++;                                                   \
00063         else                                                                  \
00064         {                                                                     \
00065             if( __PACK_count > 1 ) std::cerr << " (" << __PACK_count << "x)"; \
00066             __PACK_count  = 1;                                                \
00067             __PACK_string = m;                                                \
00068             __PACK_num    = n;                                                \
00069             std::cerr << std::endl << "PACK: " << n << m;                     \
00070         }                                                                     \
00071     }
00072 #define UPC( n, m )                                                              \
00073     {                                                                            \
00074         if( __UNPACK_num == (unsigned int)n && __UNPACK_string == m )            \
00075             __UNPACK_count++;                                                    \
00076         else                                                                     \
00077         {                                                                        \
00078             if( __UNPACK_count > 1 ) std::cerr << "(" << __UNPACK_count << "x)"; \
00079             __UNPACK_count  = 1;                                                 \
00080             __UNPACK_string = m;                                                 \
00081             __UNPACK_num    = n;                                                 \
00082             std::cerr << std::endl << "UNPACK: " << n << m;                      \
00083         }                                                                        \
00084     }
00085 #else
00086 #define PC( n, m )
00087 #define UPC( n, m )
00088 #endif
00089 
00090 template < typename T >
00091 static inline void UNPACK( unsigned char*& buff, T* val, size_t count )
00092 {
00093     memcpy( val, buff, count * sizeof( T ) );
00094     buff += count * sizeof( T );
00095 }
00096 
00097 template < typename T >
00098 static inline void PACK( unsigned char*& buff, const T* val, size_t count )
00099 {
00100     memcpy( buff, val, count * sizeof( T ) );
00101     buff += count * sizeof( T );
00102 }
00103 
00104 static inline void PACK_INTS( unsigned char*& buff, const int* int_val, size_t num )
00105 {
00106     PACK( buff, int_val, num );
00107     PC( num, " ints" );
00108 }
00109 
00110 static inline void PACK_INT( unsigned char*& buff, int int_val )
00111 {
00112     PACK_INTS( buff, &int_val, 1 );
00113 }
00114 
00115 static inline void PACK_DBLS( unsigned char*& buff, const double* dbl_val, size_t num )
00116 {
00117     PACK( buff, dbl_val, num );
00118     PC( num, " doubles" );
00119 }
00120 
00121 // static inline
00122 // void PACK_DBL(unsigned char*& buff, const double dbl_val)
00123 //{ PACK_DBLS(buff, &dbl_val, 1); }
00124 
00125 static inline void PACK_EH( unsigned char*& buff, const EntityHandle* eh_val, size_t num )
00126 {
00127     PACK( buff, eh_val, num );
00128     PC( num, " handles" );
00129 }
00130 
00131 // static inline
00132 // void PACK_CHAR_64(unsigned char*& buff, const char* str)
00133 //{
00134 //  memcpy(buff, str, 64);
00135 //  buff += 64;
00136 //  PC(64, " chars");
00137 //}
00138 
00139 static inline void PACK_VOID( unsigned char*& buff, const void* val, size_t num )
00140 {
00141     PACK( buff, reinterpret_cast< const unsigned char* >( val ), num );
00142     PC( num, " void" );
00143 }
00144 
00145 static inline void PACK_BYTES( unsigned char*& buff, const void* val, int num )
00146 {
00147     PACK_INT( buff, num );
00148     PACK_VOID( buff, val, num );
00149 }
00150 
00151 static inline void PACK_RANGE( unsigned char*& buff, const Range& rng )
00152 {
00153     PACK_INT( buff, rng.psize() );
00154     Range::const_pair_iterator cit;
00155     for( cit = rng.const_pair_begin(); cit != rng.const_pair_end(); ++cit )
00156     {
00157         EntityHandle eh[2] = { cit->first, cit->second };
00158         PACK_EH( buff, eh, 2 );
00159     }
00160     PC( rng.psize(), "-subranged range" );
00161 }
00162 
00163 static inline void UNPACK_INTS( unsigned char*& buff, int* int_val, size_t num )
00164 {
00165     UNPACK( buff, int_val, num );
00166     UPC( num, " ints" );
00167 }
00168 
00169 static inline void UNPACK_INT( unsigned char*& buff, int& int_val )
00170 {
00171     UNPACK_INTS( buff, &int_val, 1 );
00172 }
00173 
00174 static inline void UNPACK_DBLS( unsigned char*& buff, double* dbl_val, size_t num )
00175 {
00176     UNPACK( buff, dbl_val, num );
00177     UPC( num, " doubles" );
00178 }
00179 
00180 static inline void UNPACK_DBL( unsigned char*& buff, double& dbl_val )
00181 {
00182     UNPACK_DBLS( buff, &dbl_val, 1 );
00183 }
00184 
00185 static inline void UNPACK_EH( unsigned char*& buff, EntityHandle* eh_val, size_t num )
00186 {
00187     UNPACK( buff, eh_val, num );
00188     UPC( num, " handles" );
00189 }
00190 
00191 // static inline
00192 // void UNPACK_CHAR_64(unsigned char*& buff, char* char_val)
00193 //{
00194 //  memcpy(buff, char_val, 64);
00195 //  buff += 64;
00196 //  UPC(64, " chars");
00197 //}
00198 
00199 static inline void UNPACK_VOID( unsigned char*& buff, void* val, size_t num )
00200 {
00201     UNPACK( buff, reinterpret_cast< unsigned char* >( val ), num );
00202     UPC( num, " void" );
00203 }
00204 
00205 static inline void UNPACK_TYPE( unsigned char*& buff, EntityType& type )
00206 {
00207     int int_type = MBMAXTYPE;
00208     UNPACK_INT( buff, int_type );
00209     type = static_cast< EntityType >( int_type );
00210     assert( type >= MBVERTEX && type <= MBMAXTYPE );
00211 }
00212 
00213 static inline void UNPACK_RANGE( unsigned char*& buff, Range& rng )
00214 {
00215     int num_subs;
00216     EntityHandle eh[2];
00217     UNPACK_INT( buff, num_subs );
00218     for( int i = 0; i < num_subs; i++ )
00219     {
00220         UPC( num_subs, "-subranged range" );
00221         UNPACK_EH( buff, eh, 2 );
00222         rng.insert( eh[0], eh[1] );
00223     }
00224 }
00225 
00226 enum MBMessageTag
00227 {
00228     MB_MESG_ANY = MPI_ANY_TAG,
00229     MB_MESG_ENTS_ACK,
00230     MB_MESG_ENTS_SIZE,
00231     MB_MESG_ENTS_LARGE,
00232     MB_MESG_REMOTEH_ACK,
00233     MB_MESG_REMOTEH_SIZE,
00234     MB_MESG_REMOTEH_LARGE,
00235     MB_MESG_TAGS_ACK,
00236     MB_MESG_TAGS_SIZE,
00237     MB_MESG_TAGS_LARGE
00238 };
00239 
00240 static inline size_t RANGE_SIZE( const Range& rng )
00241 {
00242     return 2 * sizeof( EntityHandle ) * rng.psize() + sizeof( int );
00243 }
00244 
00245 #define PRINT_DEBUG_ISEND( A, B, C, D, E )    print_debug_isend( ( A ), ( B ), ( C ), ( D ), ( E ) )
00246 #define PRINT_DEBUG_IRECV( A, B, C, D, E, F ) print_debug_irecv( ( A ), ( B ), ( C ), ( D ), ( E ), ( F ) )
00247 #define PRINT_DEBUG_RECD( A )                 print_debug_recd( ( A ) )
00248 #define PRINT_DEBUG_WAITANY( A, B, C )        print_debug_waitany( ( A ), ( B ), ( C ) )
00249 
00250 void ParallelComm::print_debug_isend( int from, int to, unsigned char* buff, int tag, int sz )
00251 {
00252     myDebug->tprintf( 3, "Isend, %d->%d, buffer ptr = %p, tag=%d, size=%d\n", from, to, (void*)buff, tag, sz );
00253 }
00254 
00255 void ParallelComm::print_debug_irecv( int to, int from, unsigned char* buff, int sz, int tag, int incoming )
00256 {
00257     myDebug->tprintf( 3, "Irecv, %d<-%d, buffer ptr = %p, tag=%d, size=%d", to, from, (void*)buff, tag, sz );
00258     if( tag < MB_MESG_REMOTEH_ACK )
00259         myDebug->printf( 3, ", incoming1=%d\n", incoming );
00260     else if( tag < MB_MESG_TAGS_ACK )
00261         myDebug->printf( 3, ", incoming2=%d\n", incoming );
00262     else
00263         myDebug->printf( 3, ", incoming=%d\n", incoming );
00264 }
00265 
00266 void ParallelComm::print_debug_recd( MPI_Status status )
00267 {
00268     if( myDebug->get_verbosity() == 3 )
00269     {
00270         int this_count;
00271         int success = MPI_Get_count( &status, MPI_UNSIGNED_CHAR, &this_count );
00272         if( MPI_SUCCESS != success ) this_count = -1;
00273         myDebug->tprintf( 3, "Received from %d, count = %d, tag = %d\n", status.MPI_SOURCE, this_count,
00274                           status.MPI_TAG );
00275     }
00276 }
00277 
00278 void ParallelComm::print_debug_waitany( std::vector< MPI_Request >& reqs, int tag, int proc )
00279 {
00280     if( myDebug->get_verbosity() == 3 )
00281     {
00282         myDebug->tprintf( 3, "Waitany, p=%d, ", proc );
00283         if( tag < MB_MESG_REMOTEH_ACK )
00284             myDebug->print( 3, ", recv_ent_reqs=" );
00285         else if( tag < MB_MESG_TAGS_ACK )
00286             myDebug->print( 3, ", recv_remoteh_reqs=" );
00287         else
00288             myDebug->print( 3, ", recv_tag_reqs=" );
00289         for( unsigned int i = 0; i < reqs.size(); i++ )
00290             myDebug->printf( 3, " %p", (void*)(intptr_t)reqs[i] );
00291         myDebug->print( 3, "\n" );
00292     }
00293 }
00294 
00295 /** Name of tag used to store ParallelComm Index on mesh paritioning sets */
00296 const char* PARTITIONING_PCOMM_TAG_NAME = "__PRTN_PCOMM";
00297 
00298 /** \brief Tag storing parallel communication objects
00299  *
00300  * This tag stores pointers to ParallelComm communication
00301  * objects; one of these is allocated for each different
00302  * communicator used to read mesh. ParallelComm stores
00303  * partition and interface sets corresponding to its parallel mesh.
00304  * By default, a parallel read uses the first ParallelComm object
00305  * on the interface instance; if instantiated with one, ReadParallel
00306  * adds this object to the interface instance too.
00307  *
00308  * Tag type: opaque
00309  * Tag size: MAX_SHARING_PROCS*sizeof(ParallelComm*)
00310  */
00311 #define PARALLEL_COMM_TAG_NAME "__PARALLEL_COMM"
00312 
00313 ParallelComm::ParallelComm( Interface* impl, MPI_Comm cm, int* id )
00314     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00315       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00316       myDebug( NULL )
00317 {
00318     initialize();
00319     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00320     if( id ) *id = pcommID;
00321 }
00322 
00323 ParallelComm::ParallelComm( Interface* impl, std::vector< unsigned char >& /*tmp_buff*/, MPI_Comm cm, int* id )
00324     : mbImpl( impl ), procConfig( cm ), sharedpTag( 0 ), sharedpsTag( 0 ), sharedhTag( 0 ), sharedhsTag( 0 ),
00325       pstatusTag( 0 ), ifaceSetsTag( 0 ), partitionTag( 0 ), globalPartCount( -1 ), partitioningSet( 0 ),
00326       myDebug( NULL )
00327 {
00328     initialize();
00329     sharedSetData = new SharedSetData( *impl, pcommID, procConfig.proc_rank() );
00330     if( id ) *id = pcommID;
00331 }
00332 
00333 ParallelComm::~ParallelComm()
00334 {
00335     remove_pcomm( this );
00336     delete_all_buffers();
00337     delete myDebug;
00338     delete sharedSetData;
00339 }
00340 
00341 void ParallelComm::initialize()
00342 {
00343     Core* core      = dynamic_cast< Core* >( mbImpl );
00344     sequenceManager = core->sequence_manager();
00345     mbImpl->query_interface( errorHandler );
00346 
00347     // Initialize MPI, if necessary
00348     int flag   = 1;
00349     int retval = MPI_Initialized( &flag );
00350     if( MPI_SUCCESS != retval || !flag )
00351     {
00352         int argc    = 0;
00353         char** argv = NULL;
00354 
00355         // mpi not initialized yet - initialize here
00356         retval = MPI_Init( &argc, &argv );
00357         assert( MPI_SUCCESS == retval );
00358     }
00359 
00360     // Reserve space for vectors
00361     buffProcs.reserve( MAX_SHARING_PROCS );
00362     localOwnedBuffs.reserve( MAX_SHARING_PROCS );
00363     remoteOwnedBuffs.reserve( MAX_SHARING_PROCS );
00364 
00365     pcommID = add_pcomm( this );
00366 
00367     if( !myDebug )
00368     {
00369         myDebug = new DebugOutput( "ParallelComm", std::cerr );
00370         myDebug->set_rank( procConfig.proc_rank() );
00371     }
00372 }
00373 
00374 int ParallelComm::add_pcomm( ParallelComm* pc )
00375 {
00376     // Add this pcomm to instance tag
00377     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS, (ParallelComm*)NULL );
00378     Tag pc_tag = pcomm_tag( mbImpl, true );
00379     assert( 0 != pc_tag );
00380 
00381     const EntityHandle root = 0;
00382     ErrorCode result        = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00383     if( MB_SUCCESS != result && MB_TAG_NOT_FOUND != result ) return -1;
00384     int index = 0;
00385     while( index < MAX_SHARING_PROCS && pc_array[index] )
00386         index++;
00387     if( index == MAX_SHARING_PROCS )
00388     {
00389         index = -1;
00390         assert( false );
00391     }
00392     else
00393     {
00394         pc_array[index] = pc;
00395         mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00396     }
00397     return index;
00398 }
00399 
00400 void ParallelComm::remove_pcomm( ParallelComm* pc )
00401 {
00402     // Remove this pcomm from instance tag
00403     std::vector< ParallelComm* > pc_array( MAX_SHARING_PROCS );
00404     Tag pc_tag = pcomm_tag( mbImpl, true );
00405 
00406     const EntityHandle root                      = 0;
00407     ErrorCode result                             = mbImpl->tag_get_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00408     std::vector< ParallelComm* >::iterator pc_it = std::find( pc_array.begin(), pc_array.end(), pc );
00409     assert( MB_SUCCESS == result && pc_it != pc_array.end() );
00410     // Empty if test to get around compiler warning about unused var
00411     if( MB_SUCCESS == result ) {}
00412 
00413     *pc_it = NULL;
00414     mbImpl->tag_set_data( pc_tag, &root, 1, (void*)&pc_array[0] );
00415 }
00416 
00417 //! Assign a global id space, for largest-dimension or all entities (and
00418 //! in either case for vertices too)
00419 ErrorCode ParallelComm::assign_global_ids( EntityHandle this_set, const int dimension, const int start_id,
00420                                            const bool largest_dim_only, const bool parallel, const bool owned_only )
00421 {
00422     Range entities[4];
00423     ErrorCode result;
00424     std::vector< unsigned char > pstatus;
00425     for( int dim = 0; dim <= dimension; dim++ )
00426     {
00427         if( dim == 0 || !largest_dim_only || dim == dimension )
00428         {
00429             result = mbImpl->get_entities_by_dimension( this_set, dim, entities[dim] );MB_CHK_SET_ERR( result, "Failed to get vertices in assign_global_ids" );
00430         }
00431 
00432         // Need to filter out non-locally-owned entities!!!
00433         pstatus.resize( entities[dim].size() );
00434         result = mbImpl->tag_get_data( pstatus_tag(), entities[dim], &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus in assign_global_ids" );
00435 
00436         Range dum_range;
00437         Range::iterator rit;
00438         unsigned int i;
00439         for( rit = entities[dim].begin(), i = 0; rit != entities[dim].end(); ++rit, i++ )
00440             if( pstatus[i] & PSTATUS_NOT_OWNED ) dum_range.insert( *rit );
00441         entities[dim] = subtract( entities[dim], dum_range );
00442     }
00443 
00444     return assign_global_ids( entities, dimension, start_id, parallel, owned_only );
00445 }
00446 
00447 //! Assign a global id space, for largest-dimension or all entities (and
00448 //! in either case for vertices too)
00449 ErrorCode ParallelComm::assign_global_ids( Range entities[], const int dimension, const int start_id,
00450                                            const bool parallel, const bool owned_only )
00451 {
00452     int local_num_elements[4];
00453     ErrorCode result;
00454     for( int dim = 0; dim <= dimension; dim++ )
00455     {
00456         local_num_elements[dim] = entities[dim].size();
00457     }
00458 
00459     // Communicate numbers
00460     std::vector< int > num_elements( procConfig.proc_size() * 4 );
00461 #ifdef MOAB_HAVE_MPI
00462     if( procConfig.proc_size() > 1 && parallel )
00463     {
00464         int retval =
00465             MPI_Allgather( local_num_elements, 4, MPI_INT, &num_elements[0], 4, MPI_INT, procConfig.proc_comm() );
00466         if( 0 != retval ) return MB_FAILURE;
00467     }
00468     else
00469 #endif
00470         for( int dim = 0; dim < 4; dim++ )
00471             num_elements[dim] = local_num_elements[dim];
00472 
00473     // My entities start at one greater than total_elems[d]
00474     int total_elems[4] = { start_id, start_id, start_id, start_id };
00475 
00476     for( unsigned int proc = 0; proc < procConfig.proc_rank(); proc++ )
00477     {
00478         for( int dim = 0; dim < 4; dim++ )
00479             total_elems[dim] += num_elements[4 * proc + dim];
00480     }
00481 
00482     // Assign global ids now
00483     Tag gid_tag = mbImpl->globalId_tag();
00484 
00485     for( int dim = 0; dim < 4; dim++ )
00486     {
00487         if( entities[dim].empty() ) continue;
00488         num_elements.resize( entities[dim].size() );
00489         int i = 0;
00490         for( Range::iterator rit = entities[dim].begin(); rit != entities[dim].end(); ++rit )
00491             num_elements[i++] = total_elems[dim]++;
00492 
00493         result = mbImpl->tag_set_data( gid_tag, entities[dim], &num_elements[0] );MB_CHK_SET_ERR( result, "Failed to set global id tag in assign_global_ids" );
00494     }
00495 
00496     if( owned_only ) return MB_SUCCESS;
00497 
00498     // Exchange tags
00499     for( int dim = 1; dim < 4; dim++ )
00500         entities[0].merge( entities[dim] );
00501 
00502     return exchange_tags( gid_tag, entities[0] );
00503 }
00504 
00505 int ParallelComm::get_buffers( int to_proc, bool* is_new )
00506 {
00507     int ind                                   = -1;
00508     std::vector< unsigned int >::iterator vit = std::find( buffProcs.begin(), buffProcs.end(), to_proc );
00509     if( vit == buffProcs.end() )
00510     {
00511         assert( "shouldn't need buffer to myself" && to_proc != (int)procConfig.proc_rank() );
00512         ind = buffProcs.size();
00513         buffProcs.push_back( (unsigned int)to_proc );
00514         localOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00515         remoteOwnedBuffs.push_back( new Buffer( INITIAL_BUFF_SIZE ) );
00516         if( is_new ) *is_new = true;
00517     }
00518     else
00519     {
00520         ind = vit - buffProcs.begin();
00521         if( is_new ) *is_new = false;
00522     }
00523     assert( ind < MAX_SHARING_PROCS );
00524     return ind;
00525 }
00526 
00527 ErrorCode ParallelComm::broadcast_entities( const int from_proc, Range& entities, const bool adjacencies,
00528                                             const bool tags )
00529 {
00530 #ifndef MOAB_HAVE_MPI
00531     return MB_FAILURE;
00532 #else
00533 
00534     ErrorCode result = MB_SUCCESS;
00535     int success;
00536     int buff_size;
00537 
00538     Buffer buff( INITIAL_BUFF_SIZE );
00539     buff.reset_ptr( sizeof( int ) );
00540     if( (int)procConfig.proc_rank() == from_proc )
00541     {
00542         result = add_verts( entities );MB_CHK_SET_ERR( result, "Failed to add adj vertices" );
00543 
00544         buff.reset_ptr( sizeof( int ) );
00545         result = pack_buffer( entities, adjacencies, tags, false, -1, &buff );MB_CHK_SET_ERR( result, "Failed to compute buffer size in broadcast_entities" );
00546         buff.set_stored_size();
00547         buff_size = buff.buff_ptr - buff.mem_ptr;
00548     }
00549 
00550     success = MPI_Bcast( &buff_size, 1, MPI_INT, from_proc, procConfig.proc_comm() );
00551     if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" ); }
00552 
00553     if( !buff_size )  // No data
00554         return MB_SUCCESS;
00555 
00556     if( (int)procConfig.proc_rank() != from_proc ) buff.reserve( buff_size );
00557 
00558     size_t offset = 0;
00559     while( buff_size )
00560     {
00561         int sz  = std::min( buff_size, MAX_BCAST_SIZE );
00562         success = MPI_Bcast( buff.mem_ptr + offset, sz, MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00563         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer failed" ); }
00564 
00565         offset += sz;
00566         buff_size -= sz;
00567     }
00568 
00569     if( (int)procConfig.proc_rank() != from_proc )
00570     {
00571         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00572         std::vector< std::vector< int > > dum1p;
00573         std::vector< EntityHandle > dum2, dum4;
00574         std::vector< unsigned int > dum3;
00575         buff.reset_ptr( sizeof( int ) );
00576         result = unpack_buffer( buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );MB_CHK_SET_ERR( result, "Failed to unpack buffer in broadcast_entities" );
00577         std::copy( dum4.begin(), dum4.end(), range_inserter( entities ) );
00578     }
00579 
00580     return MB_SUCCESS;
00581 #endif
00582 }
00583 
00584 ErrorCode ParallelComm::scatter_entities( const int from_proc, std::vector< Range >& entities, const bool adjacencies,
00585                                           const bool tags )
00586 {
00587 #ifndef MOAB_HAVE_MPI
00588     return MB_FAILURE;
00589 #else
00590     ErrorCode result = MB_SUCCESS;
00591     int i, success, buff_size, prev_size;
00592     int nProcs         = (int)procConfig.proc_size();
00593     int* sendCounts    = new int[nProcs];
00594     int* displacements = new int[nProcs];
00595     sendCounts[0]      = sizeof( int );
00596     displacements[0]   = 0;
00597     Buffer buff( INITIAL_BUFF_SIZE );
00598     buff.reset_ptr( sizeof( int ) );
00599     buff.set_stored_size();
00600     unsigned int my_proc = procConfig.proc_rank();
00601 
00602     // Get buffer size array for each remote processor
00603     if( my_proc == (unsigned int)from_proc )
00604     {
00605         for( i = 1; i < nProcs; i++ )
00606         {
00607             prev_size = buff.buff_ptr - buff.mem_ptr;
00608             buff.reset_ptr( prev_size + sizeof( int ) );
00609             result = add_verts( entities[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00610 
00611             result = pack_buffer( entities[i], adjacencies, tags, false, -1, &buff );
00612             if( MB_SUCCESS != result )
00613             {
00614                 delete[] sendCounts;
00615                 delete[] displacements;
00616                 MB_SET_ERR( result, "Failed to pack buffer in scatter_entities" );
00617             }
00618 
00619             buff_size                               = buff.buff_ptr - buff.mem_ptr - prev_size;
00620             *( (int*)( buff.mem_ptr + prev_size ) ) = buff_size;
00621             sendCounts[i]                           = buff_size;
00622         }
00623     }
00624 
00625     // Broadcast buffer size array
00626     success = MPI_Bcast( sendCounts, nProcs, MPI_INT, from_proc, procConfig.proc_comm() );
00627     if( MPI_SUCCESS != success )
00628     {
00629         delete[] sendCounts;
00630         delete[] displacements;
00631         MB_SET_ERR( MB_FAILURE, "MPI_Bcast of buffer size failed" );
00632     }
00633 
00634     for( i = 1; i < nProcs; i++ )
00635     {
00636         displacements[i] = displacements[i - 1] + sendCounts[i - 1];
00637     }
00638 
00639     Buffer rec_buff;
00640     rec_buff.reserve( sendCounts[my_proc] );
00641 
00642     // Scatter actual geometry
00643     success = MPI_Scatterv( buff.mem_ptr, sendCounts, displacements, MPI_UNSIGNED_CHAR, rec_buff.mem_ptr,
00644                             sendCounts[my_proc], MPI_UNSIGNED_CHAR, from_proc, procConfig.proc_comm() );
00645 
00646     if( MPI_SUCCESS != success )
00647     {
00648         delete[] sendCounts;
00649         delete[] displacements;
00650         MB_SET_ERR( MB_FAILURE, "MPI_Scatterv of buffer failed" );
00651     }
00652 
00653     // Unpack in remote processors
00654     if( my_proc != (unsigned int)from_proc )
00655     {
00656         std::vector< std::vector< EntityHandle > > dum1a, dum1b;
00657         std::vector< std::vector< int > > dum1p;
00658         std::vector< EntityHandle > dum2, dum4;
00659         std::vector< unsigned int > dum3;
00660         rec_buff.reset_ptr( sizeof( int ) );
00661         result = unpack_buffer( rec_buff.buff_ptr, false, from_proc, -1, dum1a, dum1b, dum1p, dum2, dum2, dum3, dum4 );
00662         if( MB_SUCCESS != result )
00663         {
00664             delete[] sendCounts;
00665             delete[] displacements;
00666             MB_SET_ERR( result, "Failed to unpack buffer in scatter_entities" );
00667         }
00668 
00669         std::copy( dum4.begin(), dum4.end(), range_inserter( entities[my_proc] ) );
00670     }
00671 
00672     delete[] sendCounts;
00673     delete[] displacements;
00674 
00675     return MB_SUCCESS;
00676 #endif
00677 }
00678 
00679 ErrorCode ParallelComm::send_entities( const int to_proc, Range& orig_ents, const bool adjs, const bool tags,
00680                                        const bool store_remote_handles, const bool is_iface, Range& /*final_ents*/,
00681                                        int& incoming1, int& incoming2, TupleList& entprocs,
00682                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
00683 {
00684 #ifndef MOAB_HAVE_MPI
00685     return MB_FAILURE;
00686 #else
00687     // Pack entities to local buffer
00688     int ind = get_buffers( to_proc );
00689     localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00690 
00691     // Add vertices
00692     ErrorCode result = add_verts( orig_ents );MB_CHK_SET_ERR( result, "Failed to add verts in send_entities" );
00693 
00694     // Filter out entities already shared with destination
00695     Range tmp_range;
00696     result = filter_pstatus( orig_ents, PSTATUS_SHARED, PSTATUS_AND, to_proc, &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00697     if( !tmp_range.empty() ) { orig_ents = subtract( orig_ents, tmp_range ); }
00698 
00699     result = pack_buffer( orig_ents, adjs, tags, store_remote_handles, to_proc, localOwnedBuffs[ind], &entprocs );MB_CHK_SET_ERR( result, "Failed to pack buffer in send_entities" );
00700 
00701     // Send buffer
00702     result = send_buffer( to_proc, localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind], recvReqs[2 * ind + 1],
00703                           (int*)( remoteOwnedBuffs[ind]->mem_ptr ),
00704                           //&ackbuff,
00705                           incoming1, MB_MESG_REMOTEH_SIZE,
00706                           ( !is_iface && store_remote_handles ? localOwnedBuffs[ind] : NULL ),
00707                           &recv_remoteh_reqs[2 * ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to send buffer" );
00708 
00709     return MB_SUCCESS;
00710 #endif
00711 }
00712 
00713 ErrorCode ParallelComm::send_entities( std::vector< unsigned int >& send_procs, std::vector< Range* >& send_ents,
00714                                        int& incoming1, int& incoming2, const bool store_remote_handles )
00715 {
00716 #ifdef MOAB_HAVE_MPE
00717     if( myDebug->get_verbosity() == 2 )
00718     {
00719         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_entities." );
00720     }
00721 #endif
00722     myDebug->tprintf( 1, "Entering send_entities\n" );
00723     if( myDebug->get_verbosity() == 4 )
00724     {
00725         msgs.clear();
00726         msgs.reserve( MAX_SHARING_PROCS );
00727     }
00728 
00729     unsigned int i;
00730     int ind;
00731     ErrorCode result = MB_SUCCESS;
00732 
00733     // Set buffProcs with communicating procs
00734     unsigned int n_proc = send_procs.size();
00735     for( i = 0; i < n_proc; i++ )
00736     {
00737         ind    = get_buffers( send_procs[i] );
00738         result = add_verts( *send_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
00739 
00740         // Filter out entities already shared with destination
00741         Range tmp_range;
00742         result = filter_pstatus( *send_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
00743         if( !tmp_range.empty() ) { *send_ents[i] = subtract( *send_ents[i], tmp_range ); }
00744     }
00745 
00746     //===========================================
00747     // Get entities to be sent to neighbors
00748     // Need to get procs each entity is sent to
00749     //===========================================
00750     Range allsent, tmp_range;
00751     int npairs = 0;
00752     TupleList entprocs;
00753     for( i = 0; i < n_proc; i++ )
00754     {
00755         int n_ents = send_ents[i]->size();
00756         if( n_ents > 0 )
00757         {
00758             npairs += n_ents;  // Get the total # of proc/handle pairs
00759             allsent.merge( *send_ents[i] );
00760         }
00761     }
00762 
00763     // Allocate a TupleList of that size
00764     entprocs.initialize( 1, 0, 1, 0, npairs );
00765     entprocs.enableWriteAccess();
00766 
00767     // Put the proc/handle pairs in the list
00768     for( i = 0; i < n_proc; i++ )
00769     {
00770         for( Range::iterator rit = send_ents[i]->begin(); rit != send_ents[i]->end(); ++rit )
00771         {
00772             entprocs.vi_wr[entprocs.get_n()]  = send_procs[i];
00773             entprocs.vul_wr[entprocs.get_n()] = *rit;
00774             entprocs.inc_n();
00775         }
00776     }
00777 
00778     // Sort by handle
00779     moab::TupleList::buffer sort_buffer;
00780     sort_buffer.buffer_init( npairs );
00781     entprocs.sort( 1, &sort_buffer );
00782     entprocs.disableWriteAccess();
00783     sort_buffer.reset();
00784 
00785     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
00786                       (unsigned long)allsent.size() );
00787 
00788     //===========================================
00789     // Pack and send ents from this proc to others
00790     //===========================================
00791     for( i = 0; i < n_proc; i++ )
00792     {
00793         if( send_ents[i]->size() > 0 )
00794         {
00795             ind = get_buffers( send_procs[i] );
00796             myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", send_ents[i]->compactness(),
00797                               (unsigned long)send_ents[i]->size() );
00798             // Reserve space on front for size and for initial buff size
00799             localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00800             result = pack_buffer( *send_ents[i], false, true, store_remote_handles, buffProcs[ind],
00801                                   localOwnedBuffs[ind], &entprocs, &allsent );
00802 
00803             if( myDebug->get_verbosity() == 4 )
00804             {
00805                 msgs.resize( msgs.size() + 1 );
00806                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00807             }
00808 
00809             // Send the buffer (size stored in front in send_buffer)
00810             result = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[2 * ind],
00811                                   recvReqs[2 * ind + 1], &ackbuff, incoming1, MB_MESG_REMOTEH_SIZE,
00812                                   ( store_remote_handles ? localOwnedBuffs[ind] : NULL ), &recvRemotehReqs[2 * ind],
00813                                   &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost send" );
00814         }
00815     }
00816     entprocs.reset();
00817 
00818 #ifdef MOAB_HAVE_MPE
00819     if( myDebug->get_verbosity() == 2 )
00820     {
00821         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_entities." );
00822     }
00823 #endif
00824 
00825     return MB_SUCCESS;
00826 }
00827 
00828 /////////////////////////////////////////////////////////////////////////////////
00829 // Send and Receive routines for a sequence of entities: use case UMR
00830 /////////////////////////////////////////////////////////////////////////////////
00831 void print_buff( unsigned char* ch, int size )
00832 {
00833     for( int i = 0; i < size; i++ )
00834         std::cout << ch[i];
00835     std::cout << "\n";
00836 }
00837 ErrorCode ParallelComm::send_recv_entities( std::vector< int >& send_procs, std::vector< std::vector< int > >& msgsizes,
00838                                             std::vector< std::vector< EntityHandle > >& senddata,
00839                                             std::vector< std::vector< EntityHandle > >& recvdata )
00840 {
00841 #ifdef USE_MPE
00842     if( myDebug->get_verbosity() == 2 )
00843     {
00844         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting send_recv_entities." );
00845     }
00846 #endif
00847     myDebug->tprintf( 1, "Entering send_recv_entities\n" );
00848     if( myDebug->get_verbosity() == 4 )
00849     {
00850         msgs.clear();
00851         msgs.reserve( MAX_SHARING_PROCS );
00852     }
00853 
00854     // unsigned int i;
00855     int i, ind, success;
00856     ErrorCode error = MB_SUCCESS;
00857 
00858     //===========================================
00859     // Pack and send ents from this proc to others
00860     //===========================================
00861 
00862     // std::cout<<"resetting all buffers"<<std::endl;
00863 
00864     reset_all_buffers();
00865     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00866     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
00867     int ack_buff;
00868     int incoming = 0;
00869 
00870     std::vector< unsigned int >::iterator sit;
00871 
00872     for( ind = 0, sit = buffProcs.begin(); sit != buffProcs.end(); ++sit, ind++ )
00873     {
00874         incoming++;
00875         PRINT_DEBUG_IRECV( *sit, procConfig.proc_rank(), remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
00876                            MB_MESG_ENTS_SIZE, incoming );
00877 
00878         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, *sit,
00879                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
00880         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in send_recv_entities" ); }
00881     }
00882 
00883     //  std::set<unsigned int>::iterator it;
00884     for( i = 0; i < (int)send_procs.size(); i++ )
00885     {
00886         // Get index of the shared processor in the local buffer
00887         ind = get_buffers( send_procs[i] );
00888         localOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
00889 
00890         int buff_size = msgsizes[i].size() * sizeof( int ) + senddata[i].size() * sizeof( EntityHandle );
00891         localOwnedBuffs[ind]->check_space( buff_size );
00892 
00893         // Pack entities
00894         std::vector< int > msg;
00895         msg.insert( msg.end(), msgsizes[i].begin(), msgsizes[i].end() );
00896         PACK_INTS( localOwnedBuffs[ind]->buff_ptr, &msg[0], msg.size() );
00897 
00898         std::vector< EntityHandle > entities;
00899         entities.insert( entities.end(), senddata[i].begin(), senddata[i].end() );
00900         PACK_EH( localOwnedBuffs[ind]->buff_ptr, &entities[0], entities.size() );
00901         localOwnedBuffs[ind]->set_stored_size();
00902 
00903         if( myDebug->get_verbosity() == 4 )
00904         {
00905             msgs.resize( msgs.size() + 1 );
00906             msgs.back() = new Buffer( *localOwnedBuffs[ind] );
00907         }
00908 
00909         // Send the buffer (size stored in front in send_buffer)
00910         error = send_buffer( send_procs[i], localOwnedBuffs[ind], MB_MESG_ENTS_SIZE, sendReqs[3 * ind],
00911                              recv_ent_reqs[3 * ind + 2], &ack_buff, incoming );MB_CHK_SET_ERR( error, "Failed to Isend in send_recv_entities" );
00912     }
00913 
00914     //===========================================
00915     // Receive and unpack ents from received data
00916     //===========================================
00917 
00918     while( incoming )
00919     {
00920 
00921         MPI_Status status;
00922         int index_in_recv_requests;
00923 
00924         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
00925         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &index_in_recv_requests, &status );
00926         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in send_recv_entities" ); }
00927 
00928         // Processor index in the list is divided by 3
00929         ind = index_in_recv_requests / 3;
00930 
00931         PRINT_DEBUG_RECD( status );
00932 
00933         // OK, received something; decrement incoming counter
00934         incoming--;
00935 
00936         bool done = false;
00937 
00938         error = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind],
00939                              recv_ent_reqs[3 * ind + 1],  // This is for receiving the second message
00940                              recv_ent_reqs[3 * ind + 2],  // This would be for ack, but it is not
00941                                                           // used; consider removing it
00942                              incoming, localOwnedBuffs[ind],
00943                              sendReqs[3 * ind + 1],  // Send request for sending the second message
00944                              sendReqs[3 * ind + 2],  // This is for sending the ack
00945                              done );MB_CHK_SET_ERR( error, "Failed to resize recv buffer" );
00946 
00947         if( done )
00948         {
00949             remoteOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
00950 
00951             int from_proc = status.MPI_SOURCE;
00952             int idx       = std::find( send_procs.begin(), send_procs.end(), from_proc ) - send_procs.begin();
00953 
00954             int msg = msgsizes[idx].size();
00955             std::vector< int > recvmsg( msg );
00956             int ndata = senddata[idx].size();
00957             std::vector< EntityHandle > dum_vec( ndata );
00958 
00959             UNPACK_INTS( remoteOwnedBuffs[ind]->buff_ptr, &recvmsg[0], msg );
00960             UNPACK_EH( remoteOwnedBuffs[ind]->buff_ptr, &dum_vec[0], ndata );
00961 
00962             recvdata[idx].insert( recvdata[idx].end(), dum_vec.begin(), dum_vec.end() );
00963         }
00964     }
00965 
00966 #ifdef USE_MPE
00967     if( myDebug->get_verbosity() == 2 )
00968     {
00969         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending send_recv_entities." );
00970     }
00971 #endif
00972 
00973     return MB_SUCCESS;
00974 }
00975 
00976 ErrorCode ParallelComm::update_remote_data( EntityHandle entity, std::vector< int >& procs,
00977                                             std::vector< EntityHandle >& handles )
00978 {
00979     ErrorCode error;
00980     unsigned char pstatus = PSTATUS_INTERFACE;
00981 
00982     int procmin = *std::min_element( procs.begin(), procs.end() );
00983 
00984     if( (int)rank() > procmin )
00985         pstatus |= PSTATUS_NOT_OWNED;
00986     else
00987         procmin = rank();
00988 
00989     // DBG
00990     // std::cout<<"entity = "<<entity<<std::endl;
00991     // for (int j=0; j<procs.size(); j++)
00992     // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
00993     // DBG
00994 
00995     if( (int)procs.size() > 1 )
00996     {
00997         procs.push_back( rank() );
00998         handles.push_back( entity );
00999 
01000         int idx = std::find( procs.begin(), procs.end(), procmin ) - procs.begin();
01001 
01002         std::iter_swap( procs.begin(), procs.begin() + idx );
01003         std::iter_swap( handles.begin(), handles.begin() + idx );
01004 
01005         // DBG
01006         //  std::cout<<"entity = "<<entity<<std::endl;
01007         // for (int j=0; j<procs.size(); j++)
01008         // std::cout<<"procs["<<j<<"] = "<<procs[j]<<", handles["<<j<<"] = "<<handles[j]<<std::endl;
01009         // DBG
01010     }
01011 
01012     // if ((entity == 10388) && (rank()==1))
01013     //    std::cout<<"Here"<<std::endl;
01014 
01015     error = update_remote_data( entity, &procs[0], &handles[0], procs.size(), pstatus );MB_CHK_ERR( error );
01016 
01017     return MB_SUCCESS;
01018 }
01019 
01020 ErrorCode ParallelComm::get_remote_handles( EntityHandle* local_vec, EntityHandle* rem_vec, int num_ents, int to_proc )
01021 {
01022     ErrorCode error;
01023     std::vector< EntityHandle > newents;
01024     error = get_remote_handles( true, local_vec, rem_vec, num_ents, to_proc, newents );MB_CHK_ERR( error );
01025 
01026     return MB_SUCCESS;
01027 }
01028 
01029 //////////////////////////////////////////////////////////////////
01030 
01031 ErrorCode ParallelComm::recv_entities( const int from_proc, const bool store_remote_handles, const bool is_iface,
01032                                        Range& final_ents, int& incoming1, int& incoming2,
01033                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01034                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01035                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01036                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01037                                        std::vector< MPI_Request >& recv_remoteh_reqs, bool /*wait_all*/ )
01038 {
01039 #ifndef MOAB_HAVE_MPI
01040     return MB_FAILURE;
01041 #else
01042     // Non-blocking receive for the first message (having size info)
01043     int ind1 = get_buffers( from_proc );
01044     incoming1++;
01045     PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE,
01046                        MB_MESG_ENTS_SIZE, incoming1 );
01047     int success = MPI_Irecv( remoteOwnedBuffs[ind1]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc,
01048                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind1] );
01049     if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
01050 
01051     // Receive messages in while loop
01052     return recv_messages( from_proc, store_remote_handles, is_iface, final_ents, incoming1, incoming2, L1hloc, L1hrem,
01053                           L1p, L2hloc, L2hrem, L2p, recv_remoteh_reqs );
01054 #endif
01055 }
01056 
01057 ErrorCode ParallelComm::recv_entities( std::set< unsigned int >& recv_procs, int incoming1, int incoming2,
01058                                        const bool store_remote_handles, const bool migrate )
01059 {
01060     //===========================================
01061     // Receive/unpack new entities
01062     //===========================================
01063     // Number of incoming messages is the number of procs we communicate with
01064     int success, ind, i;
01065     ErrorCode result;
01066     MPI_Status status;
01067     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
01068     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
01069     std::vector< std::vector< int > > L1p( buffProcs.size() );
01070     std::vector< EntityHandle > L2hloc, L2hrem;
01071     std::vector< unsigned int > L2p;
01072     std::vector< EntityHandle > new_ents;
01073 
01074     while( incoming1 )
01075     {
01076         // Wait for all recvs of ents before proceeding to sending remote handles,
01077         // b/c some procs may have sent to a 3rd proc ents owned by me;
01078         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
01079 
01080         success = MPI_Waitany( 2 * buffProcs.size(), &recvReqs[0], &ind, &status );
01081         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
01082 
01083         PRINT_DEBUG_RECD( status );
01084 
01085         // OK, received something; decrement incoming counter
01086         incoming1--;
01087         bool done = false;
01088 
01089         // In case ind is for ack, we need index of one before it
01090         unsigned int base_ind = 2 * ( ind / 2 );
01091         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 2], recvReqs[ind], recvReqs[ind + 1],
01092                               incoming1, localOwnedBuffs[ind / 2], sendReqs[base_ind], sendReqs[base_ind + 1], done,
01093                               ( store_remote_handles ? localOwnedBuffs[ind / 2] : NULL ), MB_MESG_REMOTEH_SIZE,
01094                               &recvRemotehReqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01095 
01096         if( done )
01097         {
01098             if( myDebug->get_verbosity() == 4 )
01099             {
01100                 msgs.resize( msgs.size() + 1 );
01101                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 2] );
01102             }
01103 
01104             // Message completely received - process buffer that was sent
01105             remoteOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01106             result = unpack_buffer( remoteOwnedBuffs[ind / 2]->buff_ptr, store_remote_handles, buffProcs[ind / 2],
01107                                     ind / 2, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents, true );
01108             if( MB_SUCCESS != result )
01109             {
01110                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
01111                 print_buffer( remoteOwnedBuffs[ind / 2]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 2], false );
01112                 return result;
01113             }
01114 
01115             if( recvReqs.size() != 2 * buffProcs.size() )
01116             {
01117                 // Post irecv's for remote handles from new proc
01118                 recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01119                 for( i = recvReqs.size(); i < (int)( 2 * buffProcs.size() ); i += 2 )
01120                 {
01121                     localOwnedBuffs[i / 2]->reset_buffer();
01122                     incoming2++;
01123                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 2], localOwnedBuffs[i / 2]->mem_ptr,
01124                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
01125                     success = MPI_Irecv( localOwnedBuffs[i / 2]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
01126                                          buffProcs[i / 2], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
01127                                          &recvRemotehReqs[i] );
01128                     if( success != MPI_SUCCESS )
01129                     {
01130                         MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
01131                     }
01132                 }
01133                 recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01134                 sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
01135             }
01136         }
01137     }
01138 
01139     // Assign and remove newly created elements from/to receive processor
01140     result = assign_entities_part( new_ents, procConfig.proc_rank() );MB_CHK_SET_ERR( result, "Failed to assign entities to part" );
01141     if( migrate )
01142     {
01143         // result = remove_entities_part(allsent, procConfig.proc_rank());MB_CHK_SET_ERR(ressult,
01144         // "Failed to remove entities to part");
01145     }
01146 
01147     // Add requests for any new addl procs
01148     if( recvReqs.size() != 2 * buffProcs.size() )
01149     {
01150         // Shouldn't get here...
01151         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in entity exchange" );
01152     }
01153 
01154 #ifdef MOAB_HAVE_MPE
01155     if( myDebug->get_verbosity() == 2 )
01156     {
01157         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending recv entities." );
01158     }
01159 #endif
01160 
01161     //===========================================
01162     // Send local handles for new entity to owner
01163     //===========================================
01164     std::set< unsigned int >::iterator it  = recv_procs.begin();
01165     std::set< unsigned int >::iterator eit = recv_procs.end();
01166     for( ; it != eit; ++it )
01167     {
01168         ind = get_buffers( *it );
01169         // Reserve space on front for size and for initial buff size
01170         remoteOwnedBuffs[ind]->reset_buffer( sizeof( int ) );
01171 
01172         result = pack_remote_handles( L1hloc[ind], L1hrem[ind], L1p[ind], buffProcs[ind], remoteOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01173         remoteOwnedBuffs[ind]->set_stored_size();
01174 
01175         if( myDebug->get_verbosity() == 4 )
01176         {
01177             msgs.resize( msgs.size() + 1 );
01178             msgs.back() = new Buffer( *remoteOwnedBuffs[ind] );
01179         }
01180         result = send_buffer( buffProcs[ind], remoteOwnedBuffs[ind], MB_MESG_REMOTEH_SIZE, sendReqs[2 * ind],
01181                               recvRemotehReqs[2 * ind + 1], &ackbuff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01182     }
01183 
01184     //===========================================
01185     // Process remote handles of my ghosteds
01186     //===========================================
01187     while( incoming2 )
01188     {
01189         PRINT_DEBUG_WAITANY( recvRemotehReqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01190         success = MPI_Waitany( 2 * buffProcs.size(), &recvRemotehReqs[0], &ind, &status );
01191         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in owned entity exchange" ); }
01192 
01193         // OK, received something; decrement incoming counter
01194         incoming2--;
01195 
01196         PRINT_DEBUG_RECD( status );
01197         bool done             = false;
01198         unsigned int base_ind = 2 * ( ind / 2 );
01199         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 2], recvRemotehReqs[ind],
01200                               recvRemotehReqs[ind + 1], incoming2, remoteOwnedBuffs[ind / 2], sendReqs[base_ind],
01201                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01202         if( done )
01203         {
01204             // Incoming remote handles
01205             if( myDebug->get_verbosity() == 4 )
01206             {
01207                 msgs.resize( msgs.size() + 1 );
01208                 msgs.back() = new Buffer( *localOwnedBuffs[ind] );
01209             }
01210 
01211             localOwnedBuffs[ind / 2]->reset_ptr( sizeof( int ) );
01212             result =
01213                 unpack_remote_handles( buffProcs[ind / 2], localOwnedBuffs[ind / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01214         }
01215     }
01216 
01217 #ifdef MOAB_HAVE_MPE
01218     if( myDebug->get_verbosity() == 2 )
01219     {
01220         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
01221         MPE_Log_event( OWNED_END, procConfig.proc_rank(), "Ending recv entities (still doing checks)." );
01222     }
01223 #endif
01224     myDebug->tprintf( 1, "Exiting recv_entities.\n" );
01225 
01226     return MB_SUCCESS;
01227 }
01228 
01229 ErrorCode ParallelComm::recv_messages( const int from_proc, const bool store_remote_handles, const bool is_iface,
01230                                        Range& final_ents, int& incoming1, int& incoming2,
01231                                        std::vector< std::vector< EntityHandle > >& L1hloc,
01232                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01233                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01234                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01235                                        std::vector< MPI_Request >& recv_remoteh_reqs )
01236 {
01237 #ifndef MOAB_HAVE_MPI
01238     return MB_FAILURE;
01239 #else
01240     MPI_Status status;
01241     ErrorCode result;
01242     int ind1 = get_buffers( from_proc );
01243     int success, ind2;
01244     std::vector< EntityHandle > new_ents;
01245 
01246     // Wait and receive messages
01247     while( incoming1 )
01248     {
01249         PRINT_DEBUG_WAITANY( recvReqs, MB_MESG_TAGS_SIZE, procConfig.proc_rank() );
01250         success = MPI_Waitany( 2, &recvReqs[2 * ind1], &ind2, &status );
01251         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_messages" ); }
01252 
01253         PRINT_DEBUG_RECD( status );
01254 
01255         // OK, received something; decrement incoming counter
01256         incoming1--;
01257         bool done = false;
01258 
01259         // In case ind is for ack, we need index of one before it
01260         ind2 += 2 * ind1;
01261         unsigned int base_ind = 2 * ( ind2 / 2 );
01262 
01263         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind2 / 2],
01264                               // recvbuff,
01265                               recvReqs[ind2], recvReqs[ind2 + 1], incoming1, localOwnedBuffs[ind2 / 2],
01266                               sendReqs[base_ind], sendReqs[base_ind + 1], done,
01267                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind2 / 2] : NULL ),
01268                               MB_MESG_REMOTEH_SIZE, &recv_remoteh_reqs[base_ind], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
01269 
01270         if( done )
01271         {
01272             // If it is done, unpack buffer
01273             remoteOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01274             result = unpack_buffer( remoteOwnedBuffs[ind2 / 2]->buff_ptr, store_remote_handles, from_proc, ind2 / 2,
01275                                     L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );MB_CHK_SET_ERR( result, "Failed to unpack buffer in recev_messages" );
01276 
01277             std::copy( new_ents.begin(), new_ents.end(), range_inserter( final_ents ) );
01278 
01279             // Send local handles for new elements to owner
01280             // Reserve space on front for size and for initial buff size
01281             remoteOwnedBuffs[ind2 / 2]->reset_buffer( sizeof( int ) );
01282 
01283             result = pack_remote_handles( L1hloc[ind2 / 2], L1hrem[ind2 / 2], L1p[ind2 / 2], from_proc,
01284                                           remoteOwnedBuffs[ind2 / 2] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
01285             remoteOwnedBuffs[ind2 / 2]->set_stored_size();
01286 
01287             result = send_buffer( buffProcs[ind2 / 2], remoteOwnedBuffs[ind2 / 2], MB_MESG_REMOTEH_SIZE, sendReqs[ind2],
01288                                   recv_remoteh_reqs[ind2 + 1], (int*)( localOwnedBuffs[ind2 / 2]->mem_ptr ),
01289                                   //&ackbuff,
01290                                   incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
01291         }
01292     }
01293 
01294     return MB_SUCCESS;
01295 #endif
01296 }
01297 
01298 ErrorCode ParallelComm::recv_remote_handle_messages( const int from_proc, int& incoming2,
01299                                                      std::vector< EntityHandle >& L2hloc,
01300                                                      std::vector< EntityHandle >& L2hrem,
01301                                                      std::vector< unsigned int >& L2p,
01302                                                      std::vector< MPI_Request >& recv_remoteh_reqs )
01303 {
01304 #ifndef MOAB_HAVE_MPI
01305     return MB_FAILURE;
01306 #else
01307     MPI_Status status;
01308     ErrorCode result;
01309     int ind1 = get_buffers( from_proc );
01310     int success, ind2;
01311 
01312     while( incoming2 )
01313     {
01314         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
01315         success = MPI_Waitany( 2, &recv_remoteh_reqs[2 * ind1], &ind2, &status );
01316         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in recv_remote_handle_messages" ); }
01317 
01318         // OK, received something; decrement incoming counter
01319         incoming2--;
01320 
01321         PRINT_DEBUG_RECD( status );
01322 
01323         bool done = false;
01324         ind2 += 2 * ind1;
01325         unsigned int base_ind = 2 * ( ind2 / 2 );
01326         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind2 / 2], recv_remoteh_reqs[ind2],
01327                               recv_remoteh_reqs[ind2 + 1], incoming2, remoteOwnedBuffs[ind2 / 2], sendReqs[base_ind],
01328                               sendReqs[base_ind + 1], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
01329         if( done )
01330         {
01331             // Incoming remote handles
01332             localOwnedBuffs[ind2 / 2]->reset_ptr( sizeof( int ) );
01333             result =
01334                 unpack_remote_handles( buffProcs[ind2 / 2], localOwnedBuffs[ind2 / 2]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
01335         }
01336     }
01337 
01338     return MB_SUCCESS;
01339 #endif
01340 }
01341 
01342 ErrorCode ParallelComm::pack_buffer( Range& orig_ents, const bool /*adjacencies*/, const bool tags,
01343                                      const bool store_remote_handles, const int to_proc, Buffer* buff,
01344                                      TupleList* entprocs, Range* allsent )
01345 {
01346     // Pack the buffer with the entity ranges, adjacencies, and tags sections
01347     //
01348     // Note: new entities used in subsequent connectivity lists, sets, or tags,
01349     // are referred to as (MBMAXTYPE + index), where index is into vector
01350     // of new entities, 0-based
01351     ErrorCode result;
01352 
01353     Range set_range;
01354     std::vector< Tag > all_tags;
01355     std::vector< Range > tag_ranges;
01356 
01357     Range::const_iterator rit;
01358 
01359     // Entities
01360     result = pack_entities( orig_ents, buff, store_remote_handles, to_proc, false, entprocs, allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
01361 
01362     // Sets
01363     result = pack_sets( orig_ents, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing sets (count) failed" );
01364 
01365     // Tags
01366     Range final_ents;
01367     if( tags )
01368     {
01369         result = get_tag_send_list( orig_ents, all_tags, tag_ranges );MB_CHK_SET_ERR( result, "Failed to get tagged entities" );
01370         result = pack_tags( orig_ents, all_tags, all_tags, tag_ranges, buff, store_remote_handles, to_proc );MB_CHK_SET_ERR( result, "Packing tags (count) failed" );
01371     }
01372     else
01373     {  // Set tag size to 0
01374         buff->check_space( sizeof( int ) );
01375         PACK_INT( buff->buff_ptr, 0 );
01376         buff->set_stored_size();
01377     }
01378 
01379     return result;
01380 }
01381 
01382 ErrorCode ParallelComm::unpack_buffer( unsigned char* buff_ptr, const bool store_remote_handles, const int from_proc,
01383                                        const int ind, std::vector< std::vector< EntityHandle > >& L1hloc,
01384                                        std::vector< std::vector< EntityHandle > >& L1hrem,
01385                                        std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01386                                        std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01387                                        std::vector< EntityHandle >& new_ents, const bool created_iface )
01388 {
01389     unsigned char* tmp_buff = buff_ptr;
01390     ErrorCode result;
01391     result = unpack_entities( buff_ptr, store_remote_handles, ind, false, L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p,
01392                               new_ents, created_iface );MB_CHK_SET_ERR( result, "Unpacking entities failed" );
01393     if( myDebug->get_verbosity() == 3 )
01394     {
01395         myDebug->tprintf( 4, "unpack_entities buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01396         tmp_buff = buff_ptr;
01397     }
01398     result = unpack_sets( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking sets failed" );
01399     if( myDebug->get_verbosity() == 3 )
01400     {
01401         myDebug->tprintf( 4, "unpack_sets buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01402         tmp_buff = buff_ptr;
01403     }
01404     result = unpack_tags( buff_ptr, new_ents, store_remote_handles, from_proc );MB_CHK_SET_ERR( result, "Unpacking tags failed" );
01405     if( myDebug->get_verbosity() == 3 )
01406     {
01407         myDebug->tprintf( 4, "unpack_tags buffer space: %ld bytes.\n", (long int)( buff_ptr - tmp_buff ) );
01408         // tmp_buff = buff_ptr;
01409     }
01410 
01411     if( myDebug->get_verbosity() == 3 ) myDebug->print( 4, "\n" );
01412 
01413     return MB_SUCCESS;
01414 }
01415 
01416 int ParallelComm::estimate_ents_buffer_size( Range& entities, const bool store_remote_handles )
01417 {
01418     int buff_size = 0;
01419     std::vector< EntityHandle > dum_connect_vec;
01420     const EntityHandle* connect;
01421     int num_connect;
01422 
01423     int num_verts = entities.num_of_type( MBVERTEX );
01424     // # verts + coords + handles
01425     buff_size += 2 * sizeof( int ) + 3 * sizeof( double ) * num_verts;
01426     if( store_remote_handles ) buff_size += sizeof( EntityHandle ) * num_verts;
01427 
01428     // Do a rough count by looking at first entity of each type
01429     for( EntityType t = MBEDGE; t < MBENTITYSET; t++ )
01430     {
01431         const Range::iterator rit = entities.lower_bound( t );
01432         if( TYPE_FROM_HANDLE( *rit ) != t ) continue;
01433 
01434         ErrorCode result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect_vec );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get connectivity to estimate buffer size", -1 );
01435 
01436         // Number, type, nodes per entity
01437         buff_size += 3 * sizeof( int );
01438         int num_ents = entities.num_of_type( t );
01439         // Connectivity, handle for each ent
01440         buff_size += ( num_connect + 1 ) * sizeof( EntityHandle ) * num_ents;
01441     }
01442 
01443     // Extra entity type at end, passed as int
01444     buff_size += sizeof( int );
01445 
01446     return buff_size;
01447 }
01448 
01449 int ParallelComm::estimate_sets_buffer_size( Range& entities, const bool /*store_remote_handles*/ )
01450 {
01451     // Number of sets
01452     int buff_size = sizeof( int );
01453 
01454     // Do a rough count by looking at first entity of each type
01455     Range::iterator rit = entities.lower_bound( MBENTITYSET );
01456     ErrorCode result;
01457 
01458     for( ; rit != entities.end(); ++rit )
01459     {
01460         unsigned int options;
01461         result = mbImpl->get_meshset_options( *rit, options );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get meshset options", -1 );
01462 
01463         buff_size += sizeof( int );
01464 
01465         Range set_range;
01466         if( options & MESHSET_SET )
01467         {
01468             // Range-based set; count the subranges
01469             result = mbImpl->get_entities_by_handle( *rit, set_range );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get set entities", -1 );
01470 
01471             // Set range
01472             buff_size += RANGE_SIZE( set_range );
01473         }
01474         else if( options & MESHSET_ORDERED )
01475         {
01476             // Just get the number of entities in the set
01477             int num_ents;
01478             result = mbImpl->get_number_entities_by_handle( *rit, num_ents );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get number entities in ordered set", -1 );
01479 
01480             // Set vec
01481             buff_size += sizeof( EntityHandle ) * num_ents + sizeof( int );
01482         }
01483 
01484         // Get numbers of parents/children
01485         int num_par, num_ch;
01486         result = mbImpl->num_child_meshsets( *rit, &num_ch );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num children", -1 );
01487         result = mbImpl->num_parent_meshsets( *rit, &num_par );MB_CHK_SET_ERR_RET_VAL( result, "Failed to get num parents", -1 );
01488 
01489         buff_size += ( num_ch + num_par ) * sizeof( EntityHandle ) + 2 * sizeof( int );
01490     }
01491 
01492     return buff_size;
01493 }
01494 
01495 ErrorCode ParallelComm::pack_entities( Range& entities, Buffer* buff, const bool store_remote_handles,
01496                                        const int to_proc, const bool /*is_iface*/, TupleList* entprocs,
01497                                        Range* /*allsent*/ )
01498 {
01499     // Packed information:
01500     // 1. # entities = E
01501     // 2. for e in E
01502     //   a. # procs sharing e, incl. sender and receiver = P
01503     //   b. for p in P (procs sharing e)
01504     //   c. for p in P (handle for e on p) (Note1)
01505     // 3. vertex/entity info
01506 
01507     // Get an estimate of the buffer size & pre-allocate buffer size
01508     int buff_size = estimate_ents_buffer_size( entities, store_remote_handles );
01509     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate ents buffer size" );
01510     buff->check_space( buff_size );
01511     myDebug->tprintf( 3, "estimate buffer size for %d entities: %d \n", (int)entities.size(), buff_size );
01512 
01513     unsigned int num_ents;
01514     ErrorCode result;
01515 
01516     std::vector< EntityHandle > entities_vec( entities.size() );
01517     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
01518 
01519     // First pack procs/handles sharing this ent, not including this dest but including
01520     // others (with zero handles)
01521     if( store_remote_handles )
01522     {
01523         // Buff space is at least proc + handle for each entity; use avg of 4 other procs
01524         // to estimate buff size, but check later
01525         buff->check_space( sizeof( int ) + ( 5 * sizeof( int ) + sizeof( EntityHandle ) ) * entities.size() );
01526 
01527         // 1. # entities = E
01528         PACK_INT( buff->buff_ptr, entities.size() );
01529 
01530         Range::iterator rit;
01531 
01532         // Pre-fetch sharedp and pstatus
01533         std::vector< int > sharedp_vals( entities.size() );
01534         result = mbImpl->tag_get_data( sharedp_tag(), entities, &sharedp_vals[0] );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
01535         std::vector< char > pstatus_vals( entities.size() );
01536         result = mbImpl->tag_get_data( pstatus_tag(), entities, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
01537 
01538         unsigned int i;
01539         int tmp_procs[MAX_SHARING_PROCS];
01540         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01541         std::set< unsigned int > dumprocs;
01542 
01543         // 2. for e in E
01544         for( rit = entities.begin(), i = 0; rit != entities.end(); ++rit, i++ )
01545         {
01546             unsigned int ind =
01547                 std::lower_bound( entprocs->vul_rd, entprocs->vul_rd + entprocs->get_n(), *rit ) - entprocs->vul_rd;
01548             assert( ind < entprocs->get_n() );
01549 
01550             while( ind < entprocs->get_n() && entprocs->vul_rd[ind] == *rit )
01551                 dumprocs.insert( entprocs->vi_rd[ind++] );
01552 
01553             result = build_sharedhps_list( *rit, pstatus_vals[i], sharedp_vals[i], dumprocs, num_ents, tmp_procs,
01554                                            tmp_handles );MB_CHK_SET_ERR( result, "Failed to build sharedhps" );
01555 
01556             dumprocs.clear();
01557 
01558             // Now pack them
01559             buff->check_space( ( num_ents + 1 ) * sizeof( int ) + num_ents * sizeof( EntityHandle ) );
01560             PACK_INT( buff->buff_ptr, num_ents );
01561             PACK_INTS( buff->buff_ptr, tmp_procs, num_ents );
01562             PACK_EH( buff->buff_ptr, tmp_handles, num_ents );
01563 
01564 #ifndef NDEBUG
01565             // Check for duplicates in proc list
01566             unsigned int dp = 0;
01567             for( ; dp < MAX_SHARING_PROCS && -1 != tmp_procs[dp]; dp++ )
01568                 dumprocs.insert( tmp_procs[dp] );
01569             assert( dumprocs.size() == dp );
01570             dumprocs.clear();
01571 #endif
01572         }
01573     }
01574 
01575     // Pack vertices
01576     Range these_ents = entities.subset_by_type( MBVERTEX );
01577     num_ents         = these_ents.size();
01578 
01579     if( num_ents )
01580     {
01581         buff_size = 2 * sizeof( int ) + 3 * num_ents * sizeof( double );
01582         buff->check_space( buff_size );
01583 
01584         // Type, # ents
01585         PACK_INT( buff->buff_ptr, ( (int)MBVERTEX ) );
01586         PACK_INT( buff->buff_ptr, ( (int)num_ents ) );
01587 
01588         std::vector< double > tmp_coords( 3 * num_ents );
01589         result = mbImpl->get_coords( these_ents, &tmp_coords[0] );MB_CHK_SET_ERR( result, "Failed to get vertex coordinates" );
01590         PACK_DBLS( buff->buff_ptr, &tmp_coords[0], 3 * num_ents );
01591 
01592         myDebug->tprintf( 4, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01593                           CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01594     }
01595 
01596     // Now entities; go through range, packing by type and equal # verts per element
01597     Range::iterator start_rit = entities.find( *these_ents.rbegin() );
01598     ++start_rit;
01599     int last_nodes       = -1;
01600     EntityType last_type = MBMAXTYPE;
01601     these_ents.clear();
01602     Range::iterator end_rit = start_rit;
01603     EntitySequence* seq;
01604     ElementSequence* eseq;
01605 
01606     while( start_rit != entities.end() || !these_ents.empty() )
01607     {
01608         // Cases:
01609         // A: !end, last_type == MBMAXTYPE, seq: save contig sequence in these_ents
01610         // B: !end, last type & nodes same, seq: save contig sequence in these_ents
01611         // C: !end, last type & nodes different: pack these_ents, then save contig sequence in
01612         // these_ents D: end: pack these_ents
01613 
01614         // Find the sequence holding current start entity, if we're not at end
01615         eseq = NULL;
01616         if( start_rit != entities.end() )
01617         {
01618             result = sequenceManager->find( *start_rit, seq );MB_CHK_SET_ERR( result, "Failed to find entity sequence" );
01619             if( NULL == seq ) return MB_FAILURE;
01620             eseq = dynamic_cast< ElementSequence* >( seq );
01621         }
01622 
01623         // Pack the last batch if at end or next one is different
01624         if( !these_ents.empty() &&
01625             ( !eseq || eseq->type() != last_type || last_nodes != (int)eseq->nodes_per_element() ) )
01626         {
01627             result = pack_entity_seq( last_nodes, store_remote_handles, to_proc, these_ents, entities_vec, buff );MB_CHK_SET_ERR( result, "Failed to pack entities from a sequence" );
01628             these_ents.clear();
01629         }
01630 
01631         if( eseq )
01632         {
01633             // Continuation of current range, just save these entities
01634             // Get position in entities list one past end of this sequence
01635             end_rit = entities.lower_bound( start_rit, entities.end(), eseq->end_handle() + 1 );
01636 
01637             // Put these entities in the range
01638             std::copy( start_rit, end_rit, range_inserter( these_ents ) );
01639 
01640             last_type  = eseq->type();
01641             last_nodes = eseq->nodes_per_element();
01642         }
01643         else if( start_rit != entities.end() && TYPE_FROM_HANDLE( *start_rit ) == MBENTITYSET )
01644             break;
01645 
01646         start_rit = end_rit;
01647     }
01648 
01649     // Pack MBMAXTYPE to indicate end of ranges
01650     buff->check_space( sizeof( int ) );
01651     PACK_INT( buff->buff_ptr, ( (int)MBMAXTYPE ) );
01652 
01653     buff->set_stored_size();
01654     return MB_SUCCESS;
01655 }
01656 
01657 ErrorCode ParallelComm::build_sharedhps_list( const EntityHandle entity, const unsigned char pstatus,
01658                                               const int
01659 #ifndef NDEBUG
01660                                                   sharedp
01661 #endif
01662                                               ,
01663                                               const std::set< unsigned int >& procs, unsigned int& num_ents,
01664                                               int* tmp_procs, EntityHandle* tmp_handles )
01665 {
01666     num_ents = 0;
01667     unsigned char pstat;
01668     ErrorCode result = get_sharing_data( entity, tmp_procs, tmp_handles, pstat, num_ents );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
01669     assert( pstat == pstatus );
01670 
01671     // Build shared proc/handle lists
01672     // Start with multi-shared, since if it is the owner will be first
01673     if( pstatus & PSTATUS_MULTISHARED ) {}
01674     else if( pstatus & PSTATUS_NOT_OWNED )
01675     {
01676         // If not multishared and not owned, other sharing proc is owner, put that
01677         // one first
01678         assert( "If not owned, I should be shared too" && pstatus & PSTATUS_SHARED && 1 == num_ents );
01679         tmp_procs[1]   = procConfig.proc_rank();
01680         tmp_handles[1] = entity;
01681         num_ents       = 2;
01682     }
01683     else if( pstatus & PSTATUS_SHARED )
01684     {
01685         // If not multishared and owned, I'm owner
01686         assert( "shared and owned, should be only 1 sharing proc" && 1 == num_ents );
01687         tmp_procs[1]   = tmp_procs[0];
01688         tmp_procs[0]   = procConfig.proc_rank();
01689         tmp_handles[1] = tmp_handles[0];
01690         tmp_handles[0] = entity;
01691         num_ents       = 2;
01692     }
01693     else
01694     {
01695         // Not shared yet, just add owner (me)
01696         tmp_procs[0]   = procConfig.proc_rank();
01697         tmp_handles[0] = entity;
01698         num_ents       = 1;
01699     }
01700 
01701 #ifndef NDEBUG
01702     int tmp_ps = num_ents;
01703 #endif
01704 
01705     // Now add others, with zero handle for now
01706     for( std::set< unsigned int >::iterator sit = procs.begin(); sit != procs.end(); ++sit )
01707     {
01708 #ifndef NDEBUG
01709         if( tmp_ps && std::find( tmp_procs, tmp_procs + tmp_ps, *sit ) != tmp_procs + tmp_ps )
01710         {
01711             std::cerr << "Trouble with something already in shared list on proc " << procConfig.proc_rank()
01712                       << ". Entity:" << std::endl;
01713             list_entities( &entity, 1 );
01714             std::cerr << "pstatus = " << (int)pstatus << ", sharedp = " << sharedp << std::endl;
01715             std::cerr << "tmp_ps = ";
01716             for( int i = 0; i < tmp_ps; i++ )
01717                 std::cerr << tmp_procs[i] << " ";
01718             std::cerr << std::endl;
01719             std::cerr << "procs = ";
01720             for( std::set< unsigned int >::iterator sit2 = procs.begin(); sit2 != procs.end(); ++sit2 )
01721                 std::cerr << *sit2 << " ";
01722             assert( false );
01723         }
01724 #endif
01725         tmp_procs[num_ents]   = *sit;
01726         tmp_handles[num_ents] = 0;
01727         num_ents++;
01728     }
01729 
01730     // Put -1 after procs and 0 after handles
01731     if( MAX_SHARING_PROCS > num_ents )
01732     {
01733         tmp_procs[num_ents]   = -1;
01734         tmp_handles[num_ents] = 0;
01735     }
01736 
01737     return MB_SUCCESS;
01738 }
01739 
01740 ErrorCode ParallelComm::pack_entity_seq( const int nodes_per_entity, const bool store_remote_handles, const int to_proc,
01741                                          Range& these_ents, std::vector< EntityHandle >& entities_vec, Buffer* buff )
01742 {
01743     int tmp_space = 3 * sizeof( int ) + nodes_per_entity * these_ents.size() * sizeof( EntityHandle );
01744     buff->check_space( tmp_space );
01745 
01746     // Pack the entity type
01747     PACK_INT( buff->buff_ptr, ( (int)TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01748 
01749     // Pack # ents
01750     PACK_INT( buff->buff_ptr, these_ents.size() );
01751 
01752     // Pack the nodes per entity
01753     PACK_INT( buff->buff_ptr, nodes_per_entity );
01754     myDebug->tprintf( 3, "after some pack int  %d \n", buff->get_current_size() );
01755 
01756     // Pack the connectivity
01757     std::vector< EntityHandle > connect;
01758     ErrorCode result = MB_SUCCESS;
01759     for( Range::const_iterator rit = these_ents.begin(); rit != these_ents.end(); ++rit )
01760     {
01761         connect.clear();
01762         result = mbImpl->get_connectivity( &( *rit ), 1, connect, false );MB_CHK_SET_ERR( result, "Failed to get connectivity" );
01763         assert( (int)connect.size() == nodes_per_entity );
01764         result =
01765             get_remote_handles( store_remote_handles, &connect[0], &connect[0], connect.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
01766         PACK_EH( buff->buff_ptr, &connect[0], connect.size() );
01767     }
01768 
01769     myDebug->tprintf( 3, "Packed %lu ents of type %s\n", (unsigned long)these_ents.size(),
01770                       CN::EntityTypeName( TYPE_FROM_HANDLE( *these_ents.begin() ) ) );
01771 
01772     return result;
01773 }
01774 
01775 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, EntityHandle* from_vec,
01776                                             EntityHandle* to_vec_tmp, int num_ents, int to_proc,
01777                                             const std::vector< EntityHandle >& new_ents )
01778 {
01779     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE RANGE-BASED VERSION, NO REUSE
01780     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01781     // OTHER VERSION TOO!!!
01782     if( 0 == num_ents ) return MB_SUCCESS;
01783 
01784     // Use a local destination ptr in case we're doing an in-place copy
01785     std::vector< EntityHandle > tmp_vector;
01786     EntityHandle* to_vec = to_vec_tmp;
01787     if( to_vec == from_vec )
01788     {
01789         tmp_vector.resize( num_ents );
01790         to_vec = &tmp_vector[0];
01791     }
01792 
01793     if( !store_remote_handles )
01794     {
01795         int err;
01796         // In this case, substitute position in new_ents list
01797         for( int i = 0; i < num_ents; i++ )
01798         {
01799             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01800             assert( new_ents[ind] == from_vec[i] );
01801             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01802             assert( to_vec[i] != 0 && !err && -1 != ind );
01803         }
01804     }
01805     else
01806     {
01807         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01808         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
01809 
01810         // Get single-proc destination handles and shared procs
01811         std::vector< int > sharing_procs( num_ents );
01812         result = mbImpl->tag_get_data( shh_tag, from_vec, num_ents, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
01813         result = mbImpl->tag_get_data( shp_tag, from_vec, num_ents, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
01814         for( int j = 0; j < num_ents; j++ )
01815         {
01816             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
01817         }
01818 
01819         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01820         int tmp_procs[MAX_SHARING_PROCS];
01821         int i;
01822         // Go through results, and for 0-valued ones, look for multiple shared proc
01823         for( i = 0; i < num_ents; i++ )
01824         {
01825             if( !to_vec[i] )
01826             {
01827                 result = mbImpl->tag_get_data( shps_tag, from_vec + i, 1, tmp_procs );
01828                 if( MB_SUCCESS == result )
01829                 {
01830                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
01831                     {
01832                         if( -1 == tmp_procs[j] )
01833                             break;
01834                         else if( tmp_procs[j] == to_proc )
01835                         {
01836                             result = mbImpl->tag_get_data( shhs_tag, from_vec + i, 1, tmp_handles );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
01837                             to_vec[i] = tmp_handles[j];
01838                             assert( to_vec[i] );
01839                             break;
01840                         }
01841                     }
01842                 }
01843                 if( !to_vec[i] )
01844                 {
01845                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), from_vec[i] ) - new_ents.begin();
01846                     if( (int)new_ents.size() == j )
01847                     {
01848                         std::cout << "Failed to find new entity in send list, proc " << procConfig.proc_rank()
01849                                   << std::endl;
01850                         for( int k = 0; k <= num_ents; k++ )
01851                             std::cout << k << ": " << from_vec[k] << " " << to_vec[k] << std::endl;
01852                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
01853                     }
01854                     int err;
01855                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
01856                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
01857                 }
01858             }
01859         }
01860     }
01861 
01862     // memcpy over results if from_vec and to_vec are the same
01863     if( to_vec_tmp == from_vec ) memcpy( from_vec, to_vec, num_ents * sizeof( EntityHandle ) );
01864 
01865     return MB_SUCCESS;
01866 }
01867 
01868 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range,
01869                                             EntityHandle* to_vec, int to_proc,
01870                                             const std::vector< EntityHandle >& new_ents )
01871 {
01872     // NOTE: THIS IMPLEMENTATION IS JUST LIKE THE VECTOR-BASED VERSION, NO REUSE
01873     // AT THIS TIME, SO IF YOU FIX A BUG IN THIS VERSION, IT MAY BE IN THE
01874     // OTHER VERSION TOO!!!
01875     if( from_range.empty() ) return MB_SUCCESS;
01876 
01877     if( !store_remote_handles )
01878     {
01879         int err;
01880         // In this case, substitute position in new_ents list
01881         Range::iterator rit;
01882         unsigned int i;
01883         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
01884         {
01885             int ind = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
01886             assert( new_ents[ind] == *rit );
01887             to_vec[i] = CREATE_HANDLE( MBMAXTYPE, ind, err );
01888             assert( to_vec[i] != 0 && !err && -1 != ind );
01889         }
01890     }
01891     else
01892     {
01893         Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
01894         ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
01895 
01896         // Get single-proc destination handles and shared procs
01897         std::vector< int > sharing_procs( from_range.size() );
01898         result = mbImpl->tag_get_data( shh_tag, from_range, to_vec );MB_CHK_SET_ERR( result, "Failed to get shared handle tag for remote_handles" );
01899         result = mbImpl->tag_get_data( shp_tag, from_range, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to get sharing proc tag in remote_handles" );
01900         for( unsigned int j = 0; j < from_range.size(); j++ )
01901         {
01902             if( to_vec[j] && sharing_procs[j] != to_proc ) to_vec[j] = 0;
01903         }
01904 
01905         EntityHandle tmp_handles[MAX_SHARING_PROCS];
01906         int tmp_procs[MAX_SHARING_PROCS];
01907         // Go through results, and for 0-valued ones, look for multiple shared proc
01908         Range::iterator rit;
01909         unsigned int i;
01910         for( rit = from_range.begin(), i = 0; rit != from_range.end(); ++rit, i++ )
01911         {
01912             if( !to_vec[i] )
01913             {
01914                 result = mbImpl->tag_get_data( shhs_tag, &( *rit ), 1, tmp_handles );
01915                 if( MB_SUCCESS == result )
01916                 {
01917                     result = mbImpl->tag_get_data( shps_tag, &( *rit ), 1, tmp_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
01918                     for( int j = 0; j < MAX_SHARING_PROCS; j++ )
01919                         if( tmp_procs[j] == to_proc )
01920                         {
01921                             to_vec[i] = tmp_handles[j];
01922                             break;
01923                         }
01924                 }
01925 
01926                 if( !to_vec[i] )
01927                 {
01928                     int j = std::lower_bound( new_ents.begin(), new_ents.end(), *rit ) - new_ents.begin();
01929                     if( (int)new_ents.size() == j )
01930                     {
01931                         MB_SET_ERR( MB_FAILURE, "Failed to find new entity in send list" );
01932                     }
01933                     int err;
01934                     to_vec[i] = CREATE_HANDLE( MBMAXTYPE, j, err );
01935                     if( err ) { MB_SET_ERR( MB_FAILURE, "Failed to create handle in remote_handles" ); }
01936                 }
01937             }
01938         }
01939     }
01940 
01941     return MB_SUCCESS;
01942 }
01943 
01944 ErrorCode ParallelComm::get_remote_handles( const bool store_remote_handles, const Range& from_range, Range& to_range,
01945                                             int to_proc, const std::vector< EntityHandle >& new_ents )
01946 {
01947     std::vector< EntityHandle > to_vector( from_range.size() );
01948 
01949     ErrorCode result = get_remote_handles( store_remote_handles, from_range, &to_vector[0], to_proc, new_ents );MB_CHK_SET_ERR( result, "Failed to get remote handles" );
01950     std::copy( to_vector.begin(), to_vector.end(), range_inserter( to_range ) );
01951     return result;
01952 }
01953 
01954 ErrorCode ParallelComm::unpack_entities( unsigned char*& buff_ptr, const bool store_remote_handles,
01955                                          const int /*from_ind*/, const bool is_iface,
01956                                          std::vector< std::vector< EntityHandle > >& L1hloc,
01957                                          std::vector< std::vector< EntityHandle > >& L1hrem,
01958                                          std::vector< std::vector< int > >& L1p, std::vector< EntityHandle >& L2hloc,
01959                                          std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
01960                                          std::vector< EntityHandle >& new_ents, const bool created_iface )
01961 {
01962     // General algorithm:
01963     // - unpack # entities
01964     // - save start of remote handle info, then scan forward to entity definition data
01965     // - for all vertices or entities w/ same # verts:
01966     //   . get entity type, num ents, and (if !vert) # verts
01967     //   . for each ent:
01968     //      o get # procs/handles in remote handle info
01969     //      o if # procs/handles > 2, check for already-created entity:
01970     //        x get index of owner proc (1st in proc list), resize L1 list if nec
01971     //        x look for already-arrived entity in L2 by owner handle
01972     //      o if no existing entity:
01973     //        x if iface, look for existing entity with same connect & type
01974     //        x if none found, create vertex or element
01975     //        x if !iface & multi-shared, save on L2
01976     //        x if !iface, put new entity on new_ents list
01977     //      o update proc/handle, pstatus tags, adjusting to put owner first if iface
01978     //      o if !iface, save new handle on L1 for all sharing procs
01979 
01980     // Lists of handles/procs to return to sending/other procs
01981     // L1hloc[p], L1hrem[p]: handle pairs [h, h'], where h is the local proc handle
01982     //         and h' is either the remote proc handle (if that is known) or
01983     //         the owner proc handle (otherwise);
01984     // L1p[p]: indicates whether h is remote handle (= -1) or owner (rank of owner)
01985     // L2hloc, L2hrem: local/remote handles for entities shared by > 2 procs;
01986     //         remote handles are on owning proc
01987     // L2p: owning procs for handles in L2hrem
01988 
01989     ErrorCode result;
01990     bool done         = false;
01991     ReadUtilIface* ru = NULL;
01992 
01993     result = mbImpl->query_interface( ru );MB_CHK_SET_ERR( result, "Failed to get ReadUtilIface" );
01994 
01995     // 1. # entities = E
01996     int num_ents             = 0;
01997     unsigned char* buff_save = buff_ptr;
01998     int i, j;
01999 
02000     if( store_remote_handles )
02001     {
02002         UNPACK_INT( buff_ptr, num_ents );
02003 
02004         buff_save = buff_ptr;
02005 
02006         // Save place where remote handle info starts, then scan forward to ents
02007         for( i = 0; i < num_ents; i++ )
02008         {
02009             UNPACK_INT( buff_ptr, j );
02010             if( j < 0 )
02011             {
02012                 std::cout << "Should be non-negative # proc/handles.";
02013                 return MB_FAILURE;
02014             }
02015 
02016             buff_ptr += j * ( sizeof( int ) + sizeof( EntityHandle ) );
02017         }
02018     }
02019 
02020     std::vector< EntityHandle > msg_ents;
02021 
02022     while( !done )
02023     {
02024         EntityType this_type = MBMAXTYPE;
02025         UNPACK_TYPE( buff_ptr, this_type );
02026         assert( this_type != MBENTITYSET );
02027 
02028         // MBMAXTYPE signifies end of entities data
02029         if( MBMAXTYPE == this_type ) break;
02030 
02031         // Get the number of ents
02032         int num_ents2, verts_per_entity = 0;
02033         UNPACK_INT( buff_ptr, num_ents2 );
02034 
02035         // Unpack the nodes per entity
02036         if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
02037 
02038         std::vector< int > ps( MAX_SHARING_PROCS, -1 );
02039         std::vector< EntityHandle > hs( MAX_SHARING_PROCS, 0 );
02040         for( int e = 0; e < num_ents2; e++ )
02041         {
02042             // Check for existing entity, otherwise make new one
02043             EntityHandle new_h = 0;
02044             EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02045             double coords[3];
02046             int num_ps = -1;
02047 
02048             //=======================================
02049             // Unpack all the data at once, to make sure the buffer pointers
02050             // are tracked correctly
02051             //=======================================
02052             if( store_remote_handles )
02053             {
02054                 // Pointers to other procs/handles
02055                 UNPACK_INT( buff_save, num_ps );
02056                 if( 0 >= num_ps )
02057                 {
02058                     std::cout << "Shouldn't ever be fewer than 1 procs here." << std::endl;
02059                     return MB_FAILURE;
02060                 }
02061 
02062                 UNPACK_INTS( buff_save, &ps[0], num_ps );
02063                 UNPACK_EH( buff_save, &hs[0], num_ps );
02064             }
02065 
02066             if( MBVERTEX == this_type ) { UNPACK_DBLS( buff_ptr, coords, 3 ); }
02067             else
02068             {
02069                 assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02070                 UNPACK_EH( buff_ptr, connect, verts_per_entity );
02071 
02072                 // Update connectivity to local handles
02073                 result = get_local_handles( connect, verts_per_entity, msg_ents );MB_CHK_SET_ERR( result, "Failed to get local handles" );
02074             }
02075 
02076             //=======================================
02077             // Now, process that data; begin by finding an identical
02078             // entity, if there is one
02079             //=======================================
02080             if( store_remote_handles )
02081             {
02082                 result = find_existing_entity( is_iface, ps[0], hs[0], num_ps, connect, verts_per_entity, this_type,
02083                                                L2hloc, L2hrem, L2p, new_h );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
02084             }
02085 
02086             //=======================================
02087             // If we didn't find one, we'll have to create one
02088             //=======================================
02089             bool created_here = false;
02090             if( !new_h && !is_iface )
02091             {
02092                 if( MBVERTEX == this_type )
02093                 {
02094                     // Create a vertex
02095                     result = mbImpl->create_vertex( coords, new_h );MB_CHK_SET_ERR( result, "Failed to make new vertex" );
02096                 }
02097                 else
02098                 {
02099                     // Create the element
02100                     result = mbImpl->create_element( this_type, connect, verts_per_entity, new_h );MB_CHK_SET_ERR( result, "Failed to make new element" );
02101 
02102                     // Update adjacencies
02103                     result = ru->update_adjacencies( new_h, 1, verts_per_entity, connect );MB_CHK_SET_ERR( result, "Failed to update adjacencies" );
02104                 }
02105 
02106                 // Should have a new handle now
02107                 assert( new_h );
02108 
02109                 created_here = true;
02110             }
02111 
02112             //=======================================
02113             // Take care of sharing data
02114             //=======================================
02115 
02116             // Need to save entities found in order, for interpretation of
02117             // later parts of this message
02118             if( !is_iface )
02119             {
02120                 assert( new_h );
02121                 msg_ents.push_back( new_h );
02122             }
02123 
02124             if( created_here ) new_ents.push_back( new_h );
02125 
02126             if( new_h && store_remote_handles )
02127             {
02128                 unsigned char new_pstat = 0x0;
02129                 if( is_iface )
02130                 {
02131                     new_pstat = PSTATUS_INTERFACE;
02132                     // Here, lowest rank proc should be first
02133                     int idx = std::min_element( &ps[0], &ps[0] + num_ps ) - &ps[0];
02134                     if( idx )
02135                     {
02136                         std::swap( ps[0], ps[idx] );
02137                         std::swap( hs[0], hs[idx] );
02138                     }
02139                     // Set ownership based on lowest rank; can't be in update_remote_data, because
02140                     // there we don't know whether it resulted from ghosting or not
02141                     if( ( num_ps > 1 && ps[0] != (int)rank() ) ) new_pstat |= PSTATUS_NOT_OWNED;
02142                 }
02143                 else if( created_here )
02144                 {
02145                     if( created_iface )
02146                         new_pstat = PSTATUS_NOT_OWNED;
02147                     else
02148                         new_pstat = PSTATUS_GHOST | PSTATUS_NOT_OWNED;
02149                 }
02150 
02151                 // Update sharing data and pstatus, adjusting order if iface
02152                 result = update_remote_data( new_h, &ps[0], &hs[0], num_ps, new_pstat );MB_CHK_SET_ERR( result, "unpack_entities" );
02153 
02154                 // If a new multi-shared entity, save owner for subsequent lookup in L2 lists
02155                 if( store_remote_handles && !is_iface && num_ps > 2 )
02156                 {
02157                     L2hrem.push_back( hs[0] );
02158                     L2hloc.push_back( new_h );
02159                     L2p.push_back( ps[0] );
02160                 }
02161 
02162                 // Need to send this new handle to all sharing procs
02163                 if( !is_iface )
02164                 {
02165                     for( j = 0; j < num_ps; j++ )
02166                     {
02167                         if( ps[j] == (int)procConfig.proc_rank() ) continue;
02168                         int idx = get_buffers( ps[j] );
02169                         if( idx == (int)L1hloc.size() )
02170                         {
02171                             L1hloc.resize( idx + 1 );
02172                             L1hrem.resize( idx + 1 );
02173                             L1p.resize( idx + 1 );
02174                         }
02175 
02176                         // Don't bother adding if it's already in the list
02177                         std::vector< EntityHandle >::iterator vit =
02178                             std::find( L1hloc[idx].begin(), L1hloc[idx].end(), new_h );
02179                         if( vit != L1hloc[idx].end() )
02180                         {
02181                             // If it's in the list but remote handle isn't known but we know
02182                             // it, replace in the list
02183                             if( L1p[idx][vit - L1hloc[idx].begin()] != -1 && hs[j] )
02184                             {
02185                                 L1hrem[idx][vit - L1hloc[idx].begin()] = hs[j];
02186                                 L1p[idx][vit - L1hloc[idx].begin()]    = -1;
02187                             }
02188                             else
02189                                 continue;
02190                         }
02191                         else
02192                         {
02193                             if( !hs[j] )
02194                             {
02195                                 assert( -1 != ps[0] && num_ps > 2 );
02196                                 L1p[idx].push_back( ps[0] );
02197                                 L1hrem[idx].push_back( hs[0] );
02198                             }
02199                             else
02200                             {
02201                                 assert(
02202                                     "either this remote handle isn't in the remote list, or "
02203                                     "it's for another proc" &&
02204                                     ( std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) == L1hrem[idx].end() ||
02205                                       L1p[idx][std::find( L1hrem[idx].begin(), L1hrem[idx].end(), hs[j] ) -
02206                                                L1hrem[idx].begin()] != -1 ) );
02207                                 L1p[idx].push_back( -1 );
02208                                 L1hrem[idx].push_back( hs[j] );
02209                             }
02210                             L1hloc[idx].push_back( new_h );
02211                         }
02212                     }
02213                 }
02214 
02215                 assert( "Shouldn't be here for non-shared entities" && -1 != num_ps );
02216                 std::fill( &ps[0], &ps[num_ps], -1 );
02217                 std::fill( &hs[0], &hs[num_ps], 0 );
02218             }
02219         }
02220 
02221         myDebug->tprintf( 4, "Unpacked %d ents of type %s", num_ents2, CN::EntityTypeName( this_type ) );
02222     }
02223 
02224     myDebug->tprintf( 4, "Done unpacking entities.\n" );
02225 
02226     // Need to sort here, to enable searching
02227     std::sort( new_ents.begin(), new_ents.end() );
02228 
02229     return MB_SUCCESS;
02230 }
02231 
02232 ErrorCode ParallelComm::print_buffer( unsigned char* buff_ptr, int mesg_tag, int from_proc, bool sent )
02233 {
02234     std::cerr << procConfig.proc_rank();
02235     if( sent )
02236         std::cerr << " sent";
02237     else
02238         std::cerr << " received";
02239     std::cerr << " message type " << mesg_tag << " to/from proc " << from_proc << "; contents:" << std::endl;
02240 
02241     int msg_length, num_ents;
02242     unsigned char* orig_ptr = buff_ptr;
02243     UNPACK_INT( buff_ptr, msg_length );
02244     std::cerr << msg_length << " bytes..." << std::endl;
02245 
02246     if( MB_MESG_ENTS_SIZE == mesg_tag || MB_MESG_ENTS_LARGE == mesg_tag )
02247     {
02248         // 1. # entities = E
02249         int i, j, k;
02250         std::vector< int > ps;
02251         std::vector< EntityHandle > hs;
02252 
02253         UNPACK_INT( buff_ptr, num_ents );
02254         std::cerr << num_ents << " entities..." << std::endl;
02255 
02256         // Save place where remote handle info starts, then scan forward to ents
02257         for( i = 0; i < num_ents; i++ )
02258         {
02259             UNPACK_INT( buff_ptr, j );
02260             if( 0 > j ) return MB_FAILURE;
02261             ps.resize( j );
02262             hs.resize( j );
02263             std::cerr << "Entity " << i << ", # procs = " << j << std::endl;
02264             UNPACK_INTS( buff_ptr, &ps[0], j );
02265             UNPACK_EH( buff_ptr, &hs[0], j );
02266             std::cerr << "   Procs: ";
02267             for( k = 0; k < j; k++ )
02268                 std::cerr << ps[k] << " ";
02269             std::cerr << std::endl;
02270             std::cerr << "   Handles: ";
02271             for( k = 0; k < j; k++ )
02272                 std::cerr << hs[k] << " ";
02273             std::cerr << std::endl;
02274 
02275             if( buff_ptr - orig_ptr > msg_length )
02276             {
02277                 std::cerr << "End of buffer..." << std::endl;
02278                 std::cerr.flush();
02279                 return MB_FAILURE;
02280             }
02281         }
02282 
02283         while( true )
02284         {
02285             EntityType this_type = MBMAXTYPE;
02286             UNPACK_TYPE( buff_ptr, this_type );
02287             assert( this_type != MBENTITYSET );
02288 
02289             // MBMAXTYPE signifies end of entities data
02290             if( MBMAXTYPE == this_type ) break;
02291 
02292             // Get the number of ents
02293             int num_ents2, verts_per_entity = 0;
02294             UNPACK_INT( buff_ptr, num_ents2 );
02295 
02296             // Unpack the nodes per entity
02297             if( MBVERTEX != this_type && num_ents2 ) { UNPACK_INT( buff_ptr, verts_per_entity ); }
02298 
02299             std::cerr << "Type: " << CN::EntityTypeName( this_type ) << "; num_ents = " << num_ents2;
02300             if( MBVERTEX != this_type ) std::cerr << "; verts_per_ent = " << verts_per_entity;
02301             std::cerr << std::endl;
02302             if( num_ents2 < 0 || num_ents2 > msg_length )
02303             {
02304                 std::cerr << "Wrong number of entities, returning." << std::endl;
02305                 return MB_FAILURE;
02306             }
02307 
02308             for( int e = 0; e < num_ents2; e++ )
02309             {
02310                 // Check for existing entity, otherwise make new one
02311                 if( MBVERTEX == this_type )
02312                 {
02313                     double coords[3];
02314                     UNPACK_DBLS( buff_ptr, coords, 3 );
02315                     std::cerr << "xyz = " << coords[0] << ", " << coords[1] << ", " << coords[2] << std::endl;
02316                 }
02317                 else
02318                 {
02319                     EntityHandle connect[CN::MAX_NODES_PER_ELEMENT];
02320                     assert( verts_per_entity <= CN::MAX_NODES_PER_ELEMENT );
02321                     UNPACK_EH( buff_ptr, connect, verts_per_entity );
02322 
02323                     // Update connectivity to local handles
02324                     std::cerr << "Connectivity: ";
02325                     for( k = 0; k < verts_per_entity; k++ )
02326                         std::cerr << connect[k] << " ";
02327                     std::cerr << std::endl;
02328                 }
02329 
02330                 if( buff_ptr - orig_ptr > msg_length )
02331                 {
02332                     std::cerr << "End of buffer..." << std::endl;
02333                     std::cerr.flush();
02334                     return MB_FAILURE;
02335                 }
02336             }
02337         }
02338     }
02339     else if( MB_MESG_REMOTEH_SIZE == mesg_tag || MB_MESG_REMOTEH_LARGE == mesg_tag )
02340     {
02341         UNPACK_INT( buff_ptr, num_ents );
02342         std::cerr << num_ents << " entities..." << std::endl;
02343         if( 0 > num_ents || num_ents > msg_length )
02344         {
02345             std::cerr << "Wrong number of entities, returning." << std::endl;
02346             return MB_FAILURE;
02347         }
02348         std::vector< EntityHandle > L1hloc( num_ents ), L1hrem( num_ents );
02349         std::vector< int > L1p( num_ents );
02350         UNPACK_INTS( buff_ptr, &L1p[0], num_ents );
02351         UNPACK_EH( buff_ptr, &L1hrem[0], num_ents );
02352         UNPACK_EH( buff_ptr, &L1hloc[0], num_ents );
02353         std::cerr << num_ents << " Entity pairs; hremote/hlocal/proc: " << std::endl;
02354         for( int i = 0; i < num_ents; i++ )
02355         {
02356             EntityType etype = TYPE_FROM_HANDLE( L1hloc[i] );
02357             std::cerr << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hrem[i] ) << ", "
02358                       << CN::EntityTypeName( etype ) << ID_FROM_HANDLE( L1hloc[i] ) << ", " << L1p[i] << std::endl;
02359         }
02360 
02361         if( buff_ptr - orig_ptr > msg_length )
02362         {
02363             std::cerr << "End of buffer..." << std::endl;
02364             std::cerr.flush();
02365             return MB_FAILURE;
02366         }
02367     }
02368     else if( mesg_tag == MB_MESG_TAGS_SIZE || mesg_tag == MB_MESG_TAGS_LARGE )
02369     {
02370         int num_tags, dum1, data_type, tag_size;
02371         UNPACK_INT( buff_ptr, num_tags );
02372         std::cerr << "Number of tags = " << num_tags << std::endl;
02373         for( int i = 0; i < num_tags; i++ )
02374         {
02375             std::cerr << "Tag " << i << ":" << std::endl;
02376             UNPACK_INT( buff_ptr, tag_size );
02377             UNPACK_INT( buff_ptr, dum1 );
02378             UNPACK_INT( buff_ptr, data_type );
02379             std::cerr << "Tag size, type, data type = " << tag_size << ", " << dum1 << ", " << data_type << std::endl;
02380             UNPACK_INT( buff_ptr, dum1 );
02381             std::cerr << "Default value size = " << dum1 << std::endl;
02382             buff_ptr += dum1;
02383             UNPACK_INT( buff_ptr, dum1 );
02384             std::string name( (char*)buff_ptr, dum1 );
02385             std::cerr << "Tag name = " << name.c_str() << std::endl;
02386             buff_ptr += dum1;
02387             UNPACK_INT( buff_ptr, num_ents );
02388             std::cerr << "Number of ents = " << num_ents << std::endl;
02389             std::vector< EntityHandle > tmp_buff( num_ents );
02390             UNPACK_EH( buff_ptr, &tmp_buff[0], num_ents );
02391             int tot_length = 0;
02392             for( int j = 0; j < num_ents; j++ )
02393             {
02394                 EntityType etype = TYPE_FROM_HANDLE( tmp_buff[j] );
02395                 std::cerr << CN::EntityTypeName( etype ) << " " << ID_FROM_HANDLE( tmp_buff[j] ) << ", tag = ";
02396                 if( tag_size == MB_VARIABLE_LENGTH )
02397                 {
02398                     UNPACK_INT( buff_ptr, dum1 );
02399                     tot_length += dum1;
02400                     std::cerr << "(variable, length = " << dum1 << ")" << std::endl;
02401                 }
02402                 else if( data_type == MB_TYPE_DOUBLE )
02403                 {
02404                     double dum_dbl;
02405                     UNPACK_DBL( buff_ptr, dum_dbl );
02406                     std::cerr << dum_dbl << std::endl;
02407                 }
02408                 else if( data_type == MB_TYPE_INTEGER )
02409                 {
02410                     int dum_int;
02411                     UNPACK_INT( buff_ptr, dum_int );
02412                     std::cerr << dum_int << std::endl;
02413                 }
02414                 else if( data_type == MB_TYPE_OPAQUE )
02415                 {
02416                     std::cerr << "(opaque)" << std::endl;
02417                     buff_ptr += tag_size;
02418                 }
02419                 else if( data_type == MB_TYPE_HANDLE )
02420                 {
02421                     EntityHandle dum_eh;
02422                     UNPACK_EH( buff_ptr, &dum_eh, 1 );
02423                     std::cerr << dum_eh << std::endl;
02424                 }
02425                 else if( data_type == MB_TYPE_BIT )
02426                 {
02427                     std::cerr << "(bit)" << std::endl;
02428                     buff_ptr += tag_size;
02429                 }
02430             }
02431             if( tag_size == MB_VARIABLE_LENGTH ) buff_ptr += tot_length;
02432         }
02433     }
02434     else
02435     {
02436         assert( false );
02437         return MB_FAILURE;
02438     }
02439 
02440     std::cerr.flush();
02441 
02442     return MB_SUCCESS;
02443 }
02444 
02445 ErrorCode ParallelComm::list_entities( const EntityHandle* ents, int num_ents )
02446 {
02447     if( NULL == ents )
02448     {
02449         Range shared_ents;
02450         std::copy( sharedEnts.begin(), sharedEnts.end(), range_inserter( shared_ents ) );
02451         shared_ents.print( "Shared entities:\n" );
02452         return MB_SUCCESS;
02453     }
02454 
02455     unsigned char pstat;
02456     EntityHandle tmp_handles[MAX_SHARING_PROCS];
02457     int tmp_procs[MAX_SHARING_PROCS];
02458     unsigned int num_ps;
02459     ErrorCode result;
02460 
02461     for( int i = 0; i < num_ents; i++ )
02462     {
02463         result = mbImpl->list_entities( ents + i, 1 );MB_CHK_ERR( result );
02464         double coords[3];
02465         result = mbImpl->get_coords( ents + i, 1, coords );
02466         std::cout << " coords: " << coords[0] << " " << coords[1] << " " << coords[2] << "\n";
02467 
02468         result = get_sharing_data( ents[i], tmp_procs, tmp_handles, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
02469 
02470         std::cout << "Pstatus: ";
02471         if( !num_ps )
02472             std::cout << "local " << std::endl;
02473         else
02474         {
02475             if( pstat & PSTATUS_NOT_OWNED ) std::cout << "NOT_OWNED; ";
02476             if( pstat & PSTATUS_SHARED ) std::cout << "SHARED; ";
02477             if( pstat & PSTATUS_MULTISHARED ) std::cout << "MULTISHARED; ";
02478             if( pstat & PSTATUS_INTERFACE ) std::cout << "INTERFACE; ";
02479             if( pstat & PSTATUS_GHOST ) std::cout << "GHOST; ";
02480             std::cout << std::endl;
02481             for( unsigned int j = 0; j < num_ps; j++ )
02482             {
02483                 std::cout << "  proc " << tmp_procs[j] << " id (handle) " << mbImpl->id_from_handle( tmp_handles[j] )
02484                           << "(" << tmp_handles[j] << ")" << std::endl;
02485             }
02486         }
02487         std::cout << std::endl;
02488     }
02489 
02490     return MB_SUCCESS;
02491 }
02492 
02493 ErrorCode ParallelComm::list_entities( const Range& ents )
02494 {
02495     for( Range::iterator rit = ents.begin(); rit != ents.end(); ++rit )
02496         list_entities( &( *rit ), 1 );
02497 
02498     return MB_SUCCESS;
02499 }
02500 
02501 ErrorCode ParallelComm::update_remote_data( Range& local_range, Range& remote_range, int other_proc,
02502                                             const unsigned char add_pstat )
02503 {
02504     Range::iterator rit, rit2;
02505     ErrorCode result = MB_SUCCESS;
02506 
02507     // For each pair of local/remote handles:
02508     for( rit = local_range.begin(), rit2 = remote_range.begin(); rit != local_range.end(); ++rit, ++rit2 )
02509     {
02510         result = update_remote_data( *rit, &other_proc, &( *rit2 ), 1, add_pstat );MB_CHK_ERR( result );
02511     }
02512 
02513     return MB_SUCCESS;
02514 }
02515 
02516 ErrorCode ParallelComm::update_remote_data( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
02517                                             const int num_ps, const unsigned char add_pstat
02518                                             // The following lines left in for future debugging, at least until I trust
02519                                             // this function; tjt, 10/4/2013
02520                                             //                                           , int *new_ps,
02521                                             //                                           EntityHandle *new_hs,
02522                                             //                                           int &new_numps,
02523                                             //                                           unsigned char &new_pstat
02524 )
02525 {
02526     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02527     // in this function, so no need to initialize; sharing data does not include
02528     // this proc if shared with only one other
02529 
02530     // Following variables declared here to avoid compiler errors
02531     int new_numps;
02532     unsigned char new_pstat;
02533     std::vector< int > new_ps( MAX_SHARING_PROCS, -1 );
02534     std::vector< EntityHandle > new_hs( MAX_SHARING_PROCS, 0 );
02535 
02536     new_numps        = 0;
02537     ErrorCode result = get_sharing_data( new_h, &new_ps[0], &new_hs[0], new_pstat, new_numps );MB_CHK_SET_ERR( result, "Failed to get sharing data in update_remote_data" );
02538     int num_exist = new_numps;
02539 
02540     // Add new pstat info to the flag
02541     new_pstat |= add_pstat;
02542 
02543     /*
02544     #define plist(str, lst, siz)                                          \
02545         std::cout << str << "(";                                          \
02546         for (int i = 0; i < (int)siz; i++) std::cout << lst[i] << " ";    \
02547         std::cout << ") ";                                                \
02548 
02549         std::cout << "update_remote_data: rank = " << rank() << ", new_h = " << new_h << std::endl;
02550         std::string ostr;
02551         plist("ps", ps, num_ps);
02552         plist("hs", hs, num_ps);
02553         print_pstatus(add_pstat, ostr);
02554         std::cout << ", add_pstat = " << ostr.c_str() << std::endl;
02555         plist("tag_ps", new_ps, new_numps);
02556         plist("tag_hs", new_hs, new_numps);
02557         assert(new_numps <= size());
02558         print_pstatus(new_pstat, ostr);
02559         std::cout << ", tag_pstat=" << ostr.c_str() << std::endl;
02560     */
02561 
02562 #ifndef NDEBUG
02563     {
02564         // Check for duplicates in proc list
02565         std::set< unsigned int > dumprocs;
02566         unsigned int dp = 0;
02567         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02568             dumprocs.insert( ps[dp] );
02569         assert( dp == dumprocs.size() );
02570     }
02571 #endif
02572 
02573     // If only one sharer and I'm the owner, insert myself in the list;
02574     // otherwise, my data is checked at the end
02575     if( 1 == new_numps && !( new_pstat & PSTATUS_NOT_OWNED ) )
02576     {
02577         new_hs[1] = new_hs[0];
02578         new_ps[1] = new_ps[0];
02579         new_hs[0] = new_h;
02580         new_ps[0] = rank();
02581         new_numps = 2;
02582     }
02583 
02584     // Now put passed-in data onto lists
02585     int idx;
02586     for( int i = 0; i < num_ps; i++ )
02587     {
02588         idx = std::find( &new_ps[0], &new_ps[0] + new_numps, ps[i] ) - &new_ps[0];
02589         if( idx < new_numps )
02590         {
02591             if( !new_hs[idx] && hs[i] )
02592                 // h on list is 0 and passed-in h is non-zero, replace it
02593                 new_hs[idx] = hs[i];
02594             else
02595                 assert( !hs[i] || new_hs[idx] == hs[i] );
02596         }
02597         else
02598         {
02599             if( new_numps + 1 == MAX_SHARING_PROCS )
02600             {
02601                 MB_SET_ERR( MB_FAILURE, "Exceeded MAX_SHARING_PROCS for "
02602                                             << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) ) << ' '
02603                                             << ID_FROM_HANDLE( new_h ) << " in process " << rank() );
02604             }
02605             new_ps[new_numps] = ps[i];
02606             new_hs[new_numps] = hs[i];
02607             new_numps++;
02608         }
02609     }
02610 
02611     // Add myself, if it isn't there already
02612     idx = std::find( &new_ps[0], &new_ps[0] + new_numps, rank() ) - &new_ps[0];
02613     if( idx == new_numps )
02614     {
02615         new_ps[new_numps] = rank();
02616         new_hs[new_numps] = new_h;
02617         new_numps++;
02618     }
02619     else if( !new_hs[idx] && new_numps > 2 )
02620         new_hs[idx] = new_h;
02621 
02622     // Proc list is complete; update for shared, multishared
02623     if( new_numps > 1 )
02624     {
02625         if( new_numps > 2 ) new_pstat |= PSTATUS_MULTISHARED;
02626         new_pstat |= PSTATUS_SHARED;
02627     }
02628 
02629     /*
02630         plist("new_ps", new_ps, new_numps);
02631         plist("new_hs", new_hs, new_numps);
02632         print_pstatus(new_pstat, ostr);
02633         std::cout << ", new_pstat=" << ostr.c_str() << std::endl;
02634         std::cout << std::endl;
02635     */
02636 
02637     result = set_sharing_data( new_h, new_pstat, num_exist, new_numps, &new_ps[0], &new_hs[0] );MB_CHK_SET_ERR( result, "Failed to set sharing data in update_remote_data" );
02638 
02639     if( new_pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02640 
02641     return MB_SUCCESS;
02642 }
02643 
02644 ErrorCode ParallelComm::update_remote_data_old( const EntityHandle new_h, const int* ps, const EntityHandle* hs,
02645                                                 const int num_ps, const unsigned char add_pstat )
02646 {
02647     EntityHandle tag_hs[MAX_SHARING_PROCS];
02648     int tag_ps[MAX_SHARING_PROCS];
02649     unsigned char pstat;
02650     // Get initial sharing data; tag_ps and tag_hs get terminated with -1 and 0
02651     // in this function, so no need to initialize
02652     unsigned int num_exist;
02653     ErrorCode result = get_sharing_data( new_h, tag_ps, tag_hs, pstat, num_exist );MB_CHK_ERR( result );
02654 
02655 #ifndef NDEBUG
02656     {
02657         // Check for duplicates in proc list
02658         std::set< unsigned int > dumprocs;
02659         unsigned int dp = 0;
02660         for( ; (int)dp < num_ps && -1 != ps[dp]; dp++ )
02661             dumprocs.insert( ps[dp] );
02662         assert( dp == dumprocs.size() );
02663     }
02664 #endif
02665 
02666     // Add any new sharing data
02667     bool changed = false;
02668     int idx;
02669     if( !num_exist )
02670     {
02671         // Just take what caller passed
02672         memcpy( tag_ps, ps, num_ps * sizeof( int ) );
02673         memcpy( tag_hs, hs, num_ps * sizeof( EntityHandle ) );
02674         num_exist = num_ps;
02675         // If it's only one, hopefully I'm not there yet...
02676         assert( "I shouldn't be the only proc there." && ( 1 != num_exist || ps[0] != (int)procConfig.proc_rank() ) );
02677         changed = true;
02678     }
02679     else
02680     {
02681         for( int i = 0; i < num_ps; i++ )
02682         {
02683             idx = std::find( tag_ps, tag_ps + num_exist, ps[i] ) - tag_ps;
02684             if( idx == (int)num_exist )
02685             {
02686                 if( num_exist == MAX_SHARING_PROCS )
02687                 {
02688                     std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_h ) )
02689                               << ' ' << ID_FROM_HANDLE( new_h ) << " in process " << proc_config().proc_rank()
02690                               << std::endl;
02691                     std::cerr.flush();
02692                     MPI_Abort( proc_config().proc_comm(), 66 );
02693                 }
02694 
02695                 // If there's only 1 sharing proc, and it's not me, then
02696                 // we'll end up with 3; add me to the front
02697                 if( !i && num_ps == 1 && num_exist == 1 && ps[0] != (int)procConfig.proc_rank() )
02698                 {
02699                     int j = 1;
02700                     // If I own this entity, put me at front, otherwise after first
02701                     if( !( pstat & PSTATUS_NOT_OWNED ) )
02702                     {
02703                         tag_ps[1] = tag_ps[0];
02704                         tag_hs[1] = tag_hs[0];
02705                         j         = 0;
02706                     }
02707                     tag_ps[j] = procConfig.proc_rank();
02708                     tag_hs[j] = new_h;
02709                     num_exist++;
02710                 }
02711 
02712                 tag_ps[num_exist] = ps[i];
02713                 tag_hs[num_exist] = hs[i];
02714                 num_exist++;
02715                 changed = true;
02716             }
02717             else if( 0 == tag_hs[idx] )
02718             {
02719                 tag_hs[idx] = hs[i];
02720                 changed     = true;
02721             }
02722             else if( 0 != hs[i] )
02723             {
02724                 assert( hs[i] == tag_hs[idx] );
02725             }
02726         }
02727     }
02728 
02729     // Adjust for interface layer if necessary
02730     if( add_pstat & PSTATUS_INTERFACE )
02731     {
02732         idx = std::min_element( tag_ps, tag_ps + num_exist ) - tag_ps;
02733         if( idx )
02734         {
02735             int tag_proc       = tag_ps[idx];
02736             tag_ps[idx]        = tag_ps[0];
02737             tag_ps[0]          = tag_proc;
02738             EntityHandle tag_h = tag_hs[idx];
02739             tag_hs[idx]        = tag_hs[0];
02740             tag_hs[0]          = tag_h;
02741             changed            = true;
02742             if( tag_ps[0] != (int)procConfig.proc_rank() ) pstat |= PSTATUS_NOT_OWNED;
02743         }
02744     }
02745 
02746     if( !changed ) return MB_SUCCESS;
02747 
02748     assert( "interface entities should have > 1 proc" && ( !( add_pstat & PSTATUS_INTERFACE ) || num_exist > 1 ) );
02749     assert( "ghost entities should have > 1 proc" && ( !( add_pstat & PSTATUS_GHOST ) || num_exist > 1 ) );
02750 
02751     // If it's multi-shared and we created the entity in this unpack,
02752     // local handle probably isn't in handle list yet
02753     if( num_exist > 2 )
02754     {
02755         idx = std::find( tag_ps, tag_ps + num_exist, procConfig.proc_rank() ) - tag_ps;
02756         assert( idx < (int)num_exist );
02757         if( !tag_hs[idx] ) tag_hs[idx] = new_h;
02758     }
02759 
02760     int tag_p;
02761     EntityHandle tag_h;
02762 
02763     // Update pstat
02764     pstat |= add_pstat;
02765 
02766     if( num_exist > 2 )
02767         pstat |= ( PSTATUS_MULTISHARED | PSTATUS_SHARED );
02768     else if( num_exist > 0 )
02769         pstat |= PSTATUS_SHARED;
02770 
02771     //    compare_remote_data(new_h, num_ps, hs, ps, add_pstat,
02772     //                        num_exist, tag_hs, tag_ps, pstat);
02773 
02774     // Reset single shared proc/handle if was shared and moving to multi-shared
02775     if( num_exist > 2 && !( pstat & PSTATUS_MULTISHARED ) && ( pstat & PSTATUS_SHARED ) )
02776     {
02777         // Must remove sharedp/h first, which really means set to default value
02778         tag_p  = -1;
02779         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, &tag_p );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02780         tag_h  = 0;
02781         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, &tag_h );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02782     }
02783 
02784     // Set sharing tags
02785     if( num_exist > 2 )
02786     {
02787         std::fill( tag_ps + num_exist, tag_ps + MAX_SHARING_PROCS, -1 );
02788         std::fill( tag_hs + num_exist, tag_hs + MAX_SHARING_PROCS, 0 );
02789         result = mbImpl->tag_set_data( sharedps_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedps tag data" );
02790         result = mbImpl->tag_set_data( sharedhs_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag data" );
02791 
02792 #ifndef NDEBUG
02793         {
02794             // Check for duplicates in proc list
02795             std::set< unsigned int > dumprocs;
02796             unsigned int dp = 0;
02797             for( ; dp < num_exist && -1 != tag_ps[dp]; dp++ )
02798                 dumprocs.insert( tag_ps[dp] );
02799             assert( dp == dumprocs.size() );
02800         }
02801 #endif
02802     }
02803     else if( num_exist == 2 || num_exist == 1 )
02804     {
02805         if( tag_ps[0] == (int)procConfig.proc_rank() )
02806         {
02807             assert( 2 == num_exist && tag_ps[1] != (int)procConfig.proc_rank() );
02808             tag_ps[0] = tag_ps[1];
02809             tag_hs[0] = tag_hs[1];
02810         }
02811         assert( tag_ps[0] != -1 && tag_hs[0] != 0 );
02812         result = mbImpl->tag_set_data( sharedp_tag(), &new_h, 1, tag_ps );MB_CHK_SET_ERR( result, "Failed to set sharedp tag data" );
02813         result = mbImpl->tag_set_data( sharedh_tag(), &new_h, 1, tag_hs );MB_CHK_SET_ERR( result, "Failed to set sharedh tag data" );
02814     }
02815 
02816     // Now set new pstatus
02817     result = mbImpl->tag_set_data( pstatus_tag(), &new_h, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
02818 
02819     if( pstat & PSTATUS_SHARED ) sharedEnts.insert( new_h );
02820 
02821     return MB_SUCCESS;
02822 }
02823 
02824 ErrorCode ParallelComm::get_sharing_data( const Range& entities, std::set< int >& procs, int operation )
02825 {
02826     // Get the union or intersection of sharing data for multiple entities
02827     ErrorCode result;
02828     int sp2[MAX_SHARING_PROCS];
02829     int num_ps;
02830     unsigned char pstat;
02831     std::set< int > tmp_procs;
02832     procs.clear();
02833 
02834     for( Range::const_iterator rit = entities.begin(); rit != entities.end(); ++rit )
02835     {
02836         // Get sharing procs
02837         result = get_sharing_data( *rit, sp2, NULL, pstat, num_ps );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_sharing_data" );
02838         if( !( pstat & PSTATUS_SHARED ) && Interface::INTERSECT == operation )
02839         {
02840             procs.clear();
02841             return MB_SUCCESS;
02842         }
02843 
02844         if( rit == entities.begin() ) { std::copy( sp2, sp2 + num_ps, std::inserter( procs, procs.begin() ) ); }
02845         else
02846         {
02847             std::sort( sp2, sp2 + num_ps );
02848             tmp_procs.clear();
02849             if( Interface::UNION == operation )
02850                 std::set_union( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02851                                 std::inserter( tmp_procs, tmp_procs.end() ) );
02852             else if( Interface::INTERSECT == operation )
02853                 std::set_intersection( procs.begin(), procs.end(), sp2, sp2 + num_ps,
02854                                        std::inserter( tmp_procs, tmp_procs.end() ) );
02855             else
02856             {
02857                 assert( "Unknown operation." && false );
02858                 return MB_FAILURE;
02859             }
02860             procs.swap( tmp_procs );
02861         }
02862         if( Interface::INTERSECT == operation && procs.empty() ) return MB_SUCCESS;
02863     }
02864 
02865     return MB_SUCCESS;
02866 }
02867 
02868 ErrorCode ParallelComm::get_sharing_data( const EntityHandle entity, int* ps, EntityHandle* hs, unsigned char& pstat,
02869                                           unsigned int& num_ps )
02870 {
02871     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
02872     if( pstat & PSTATUS_MULTISHARED )
02873     {
02874         result = mbImpl->tag_get_data( sharedps_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedps tag data" );
02875         if( hs )
02876         {
02877             result = mbImpl->tag_get_data( sharedhs_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedhs tag data" );
02878         }
02879         num_ps = std::find( ps, ps + MAX_SHARING_PROCS, -1 ) - ps;
02880     }
02881     else if( pstat & PSTATUS_SHARED )
02882     {
02883         result = mbImpl->tag_get_data( sharedp_tag(), &entity, 1, ps );MB_CHK_SET_ERR( result, "Failed to get sharedp tag data" );
02884         if( hs )
02885         {
02886             result = mbImpl->tag_get_data( sharedh_tag(), &entity, 1, hs );MB_CHK_SET_ERR( result, "Failed to get sharedh tag data" );
02887             hs[1] = 0;
02888         }
02889         // Initialize past end of data
02890         ps[1]  = -1;
02891         num_ps = 1;
02892     }
02893     else
02894     {
02895         ps[0] = -1;
02896         if( hs ) hs[0] = 0;
02897         num_ps = 0;
02898     }
02899 
02900     assert( MAX_SHARING_PROCS >= num_ps );
02901 
02902     return MB_SUCCESS;
02903 }
02904 
02905 ErrorCode ParallelComm::find_existing_entity( const bool is_iface, const int owner_p, const EntityHandle owner_h,
02906                                               const int num_ps, const EntityHandle* connect, const int num_connect,
02907                                               const EntityType this_type, std::vector< EntityHandle >& L2hloc,
02908                                               std::vector< EntityHandle >& L2hrem, std::vector< unsigned int >& L2p,
02909                                               EntityHandle& new_h )
02910 {
02911     new_h = 0;
02912     if( !is_iface && num_ps > 2 )
02913     {
02914         for( unsigned int i = 0; i < L2hrem.size(); i++ )
02915         {
02916             if( L2hrem[i] == owner_h && owner_p == (int)L2p[i] )
02917             {
02918                 new_h = L2hloc[i];
02919                 return MB_SUCCESS;
02920             }
02921         }
02922     }
02923 
02924     // If we got here and it's a vertex, we don't need to look further
02925     if( MBVERTEX == this_type || !connect || !num_connect ) return MB_SUCCESS;
02926 
02927     Range tmp_range;
02928     ErrorCode result = mbImpl->get_adjacencies( connect, num_connect, CN::Dimension( this_type ), false, tmp_range );MB_CHK_SET_ERR( result, "Failed to get existing entity" );
02929     if( !tmp_range.empty() )
02930     {
02931         // Found a corresponding entity - return target
02932         new_h = *tmp_range.begin();
02933     }
02934     else
02935     {
02936         new_h = 0;
02937     }
02938 
02939     return MB_SUCCESS;
02940 }
02941 
02942 ErrorCode ParallelComm::get_local_handles( const Range& remote_handles, Range& local_handles,
02943                                            const std::vector< EntityHandle >& new_ents )
02944 {
02945     std::vector< EntityHandle > rh_vec;
02946     rh_vec.reserve( remote_handles.size() );
02947     std::copy( remote_handles.begin(), remote_handles.end(), std::back_inserter( rh_vec ) );
02948     ErrorCode result = get_local_handles( &rh_vec[0], remote_handles.size(), new_ents );
02949     std::copy( rh_vec.begin(), rh_vec.end(), range_inserter( local_handles ) );
02950     return result;
02951 }
02952 
02953 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents, const Range& new_ents )
02954 {
02955     std::vector< EntityHandle > tmp_ents;
02956     std::copy( new_ents.begin(), new_ents.end(), std::back_inserter( tmp_ents ) );
02957     return get_local_handles( from_vec, num_ents, tmp_ents );
02958 }
02959 
02960 ErrorCode ParallelComm::get_local_handles( EntityHandle* from_vec, int num_ents,
02961                                            const std::vector< EntityHandle >& new_ents )
02962 {
02963     for( int i = 0; i < num_ents; i++ )
02964     {
02965         if( TYPE_FROM_HANDLE( from_vec[i] ) == MBMAXTYPE )
02966         {
02967             assert( ID_FROM_HANDLE( from_vec[i] ) < (int)new_ents.size() );
02968             from_vec[i] = new_ents[ID_FROM_HANDLE( from_vec[i] )];
02969         }
02970     }
02971 
02972     return MB_SUCCESS;
02973 }
02974 
02975 /*
02976 template <typename T> void
02977 insert_in_array(T* array, size_t array_size, size_t location, T value)
02978 {
02979   assert(location + 1 < array_size);
02980   for (size_t i = array_size - 1; i > location; i--)
02981     array[i] = array[i - 1];
02982   array[location] = value;
02983 }
02984 */
02985 
02986 ErrorCode ParallelComm::pack_range_map( Range& key_range, EntityHandle val_start, HandleMap& handle_map )
02987 {
02988     for( Range::const_pair_iterator key_it = key_range.const_pair_begin(); key_it != key_range.const_pair_end();
02989          ++key_it )
02990     {
02991         int tmp_num = ( *key_it ).second - ( *key_it ).first + 1;
02992         handle_map.insert( ( *key_it ).first, val_start, tmp_num );
02993         val_start += tmp_num;
02994     }
02995 
02996     return MB_SUCCESS;
02997 }
02998 
02999 ErrorCode ParallelComm::pack_sets( Range& entities, Buffer* buff, const bool store_remote_handles, const int to_proc )
03000 {
03001     // SETS:
03002     // . #sets
03003     // . for each set:
03004     //   - options[#sets] (unsigned int)
03005     //   - if (unordered) set range
03006     //   - else if ordered
03007     //     . #ents in set
03008     //     . handles[#ents]
03009     //   - #parents
03010     //   - if (#parents) handles[#parents]
03011     //   - #children
03012     //   - if (#children) handles[#children]
03013 
03014     // Now the sets; assume any sets the application wants to pass are in the entities list
03015     ErrorCode result;
03016     Range all_sets = entities.subset_by_type( MBENTITYSET );
03017 
03018     int buff_size = estimate_sets_buffer_size( all_sets, store_remote_handles );
03019     if( buff_size < 0 ) MB_SET_ERR( MB_FAILURE, "Failed to estimate sets buffer size" );
03020     buff->check_space( buff_size );
03021 
03022     // Number of sets
03023     PACK_INT( buff->buff_ptr, all_sets.size() );
03024 
03025     // Options for all sets
03026     std::vector< unsigned int > options( all_sets.size() );
03027     Range::iterator rit;
03028     std::vector< EntityHandle > members;
03029     int i;
03030     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03031     {
03032         result = mbImpl->get_meshset_options( *rit, options[i] );MB_CHK_SET_ERR( result, "Failed to get meshset options" );
03033     }
03034     buff->check_space( all_sets.size() * sizeof( unsigned int ) );
03035     PACK_VOID( buff->buff_ptr, &options[0], all_sets.size() * sizeof( unsigned int ) );
03036 
03037     // Pack parallel geometry unique id
03038     if( !all_sets.empty() )
03039     {
03040         Tag uid_tag;
03041         int n_sets  = all_sets.size();
03042         bool b_pack = false;
03043         std::vector< int > id_data( n_sets );
03044         result =
03045             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03046 
03047         result = mbImpl->tag_get_data( uid_tag, all_sets, &id_data[0] );
03048         if( MB_TAG_NOT_FOUND != result )
03049         {
03050             if( MB_SUCCESS != result ) MB_SET_ERR( result, "Failed to get parallel geometry unique ids" );
03051             for( i = 0; i < n_sets; i++ )
03052             {
03053                 if( id_data[i] != 0 )
03054                 {
03055                     b_pack = true;
03056                     break;
03057                 }
03058             }
03059         }
03060 
03061         if( b_pack )
03062         {  // If you find
03063             buff->check_space( ( n_sets + 1 ) * sizeof( int ) );
03064             PACK_INT( buff->buff_ptr, n_sets );
03065             PACK_INTS( buff->buff_ptr, &id_data[0], n_sets );
03066         }
03067         else
03068         {
03069             buff->check_space( sizeof( int ) );
03070             PACK_INT( buff->buff_ptr, 0 );
03071         }
03072     }
03073 
03074     // Vectors/ranges
03075     std::vector< EntityHandle > entities_vec( entities.size() );
03076     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03077     for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03078     {
03079         members.clear();
03080         result = mbImpl->get_entities_by_handle( *rit, members );MB_CHK_SET_ERR( result, "Failed to get entities in ordered set" );
03081         result =
03082             get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc, entities_vec );MB_CHK_SET_ERR( result, "Failed in get_remote_handles" );
03083         buff->check_space( members.size() * sizeof( EntityHandle ) + sizeof( int ) );
03084         PACK_INT( buff->buff_ptr, members.size() );
03085         PACK_EH( buff->buff_ptr, &members[0], members.size() );
03086     }
03087 
03088     // Pack parent/child sets
03089     if( !store_remote_handles )
03090     {  // Only works not store remote handles
03091         // Pack numbers of parents/children
03092         unsigned int tot_pch = 0;
03093         int num_pch;
03094         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03095         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03096         {
03097             // Pack parents
03098             result = mbImpl->num_parent_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num parents" );
03099             PACK_INT( buff->buff_ptr, num_pch );
03100             tot_pch += num_pch;
03101             result = mbImpl->num_child_meshsets( *rit, &num_pch );MB_CHK_SET_ERR( result, "Failed to get num children" );
03102             PACK_INT( buff->buff_ptr, num_pch );
03103             tot_pch += num_pch;
03104         }
03105 
03106         // Now pack actual parents/children
03107         members.clear();
03108         members.reserve( tot_pch );
03109         std::vector< EntityHandle > tmp_pch;
03110         for( rit = all_sets.begin(), i = 0; rit != all_sets.end(); ++rit, i++ )
03111         {
03112             result = mbImpl->get_parent_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get parents" );
03113             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03114             tmp_pch.clear();
03115             result = mbImpl->get_child_meshsets( *rit, tmp_pch );MB_CHK_SET_ERR( result, "Failed to get children" );
03116             std::copy( tmp_pch.begin(), tmp_pch.end(), std::back_inserter( members ) );
03117             tmp_pch.clear();
03118         }
03119         assert( members.size() == tot_pch );
03120         if( !members.empty() )
03121         {
03122             result = get_remote_handles( store_remote_handles, &members[0], &members[0], members.size(), to_proc,
03123                                          entities_vec );MB_CHK_SET_ERR( result, "Failed to get remote handles for set parent/child sets" );
03124 #ifndef NDEBUG
03125             // Check that all handles are either sets or maxtype
03126             for( unsigned int __j = 0; __j < members.size(); __j++ )
03127                 assert( ( TYPE_FROM_HANDLE( members[__j] ) == MBMAXTYPE &&
03128                           ID_FROM_HANDLE( members[__j] ) < (int)entities.size() ) ||
03129                         TYPE_FROM_HANDLE( members[__j] ) == MBENTITYSET );
03130 #endif
03131             buff->check_space( members.size() * sizeof( EntityHandle ) );
03132             PACK_EH( buff->buff_ptr, &members[0], members.size() );
03133         }
03134     }
03135     else
03136     {
03137         buff->check_space( 2 * all_sets.size() * sizeof( int ) );
03138         for( rit = all_sets.begin(); rit != all_sets.end(); ++rit )
03139         {
03140             PACK_INT( buff->buff_ptr, 0 );
03141             PACK_INT( buff->buff_ptr, 0 );
03142         }
03143     }
03144 
03145     // Pack the handles
03146     if( store_remote_handles && !all_sets.empty() )
03147     {
03148         buff_size = RANGE_SIZE( all_sets );
03149         buff->check_space( buff_size );
03150         PACK_RANGE( buff->buff_ptr, all_sets );
03151     }
03152 
03153     myDebug->tprintf( 4, "Done packing sets.\n" );
03154 
03155     buff->set_stored_size();
03156 
03157     return MB_SUCCESS;
03158 }
03159 
03160 ErrorCode ParallelComm::unpack_sets( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
03161                                      const bool store_remote_handles, const int from_proc )
03162 {
03163     // Now the sets; assume any sets the application wants to pass are in the entities list
03164     ErrorCode result;
03165 
03166     bool no_sets = ( entities.empty() || ( mbImpl->type_from_handle( *entities.rbegin() ) == MBENTITYSET ) );
03167 
03168     Range new_sets;
03169     int num_sets;
03170     UNPACK_INT( buff_ptr, num_sets );
03171 
03172     if( !num_sets ) return MB_SUCCESS;
03173 
03174     int i;
03175     Range::const_iterator rit;
03176     std::vector< EntityHandle > members;
03177     int num_ents;
03178     std::vector< unsigned int > options_vec( num_sets );
03179     // Option value
03180     if( num_sets ) UNPACK_VOID( buff_ptr, &options_vec[0], num_sets * sizeof( unsigned int ) );
03181 
03182     // Unpack parallel geometry unique id
03183     int n_uid;
03184     UNPACK_INT( buff_ptr, n_uid );
03185     if( n_uid > 0 && n_uid != num_sets )
03186     {
03187         std::cerr << "The number of Parallel geometry unique ids should be same." << std::endl;
03188     }
03189 
03190     if( n_uid > 0 )
03191     {  // If parallel geometry unique id is packed
03192         std::vector< int > uids( n_uid );
03193         UNPACK_INTS( buff_ptr, &uids[0], n_uid );
03194 
03195         Tag uid_tag;
03196         result =
03197             mbImpl->tag_get_handle( "PARALLEL_UNIQUE_ID", 1, MB_TYPE_INTEGER, uid_tag, MB_TAG_SPARSE | MB_TAG_CREAT );MB_CHK_SET_ERR( result, "Failed to create parallel geometry unique id tag" );
03198 
03199         // Find existing sets
03200         for( i = 0; i < n_uid; i++ )
03201         {
03202             EntityHandle set_handle;
03203             Range temp_sets;
03204             void* tag_vals[] = { &uids[i] };
03205             if( uids[i] > 0 )
03206             {
03207                 result = mbImpl->get_entities_by_type_and_tag( 0, MBENTITYSET, &uid_tag, tag_vals, 1, temp_sets );
03208             }
03209             if( !temp_sets.empty() )
03210             {  // Existing set
03211                 set_handle = *temp_sets.begin();
03212             }
03213             else
03214             {  // Create a new set
03215                 result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03216                 result = mbImpl->tag_set_data( uid_tag, &set_handle, 1, &uids[i] );MB_CHK_SET_ERR( result, "Failed to set parallel geometry unique ids" );
03217             }
03218             new_sets.insert( set_handle );
03219         }
03220     }
03221     else
03222     {
03223         // Create sets
03224         for( i = 0; i < num_sets; i++ )
03225         {
03226             EntityHandle set_handle;
03227             result = mbImpl->create_meshset( options_vec[i], set_handle );MB_CHK_SET_ERR( result, "Failed to create set in unpack" );
03228 
03229             // Make sure new sets handles are monotonically increasing
03230             assert( set_handle > *new_sets.rbegin() );
03231             new_sets.insert( set_handle );
03232         }
03233     }
03234 
03235     std::copy( new_sets.begin(), new_sets.end(), std::back_inserter( entities ) );
03236     // Only need to sort if we came in with no sets on the end
03237     if( !no_sets ) std::sort( entities.begin(), entities.end() );
03238 
03239     for( rit = new_sets.begin(), i = 0; rit != new_sets.end(); ++rit, i++ )
03240     {
03241         // Unpack entities as vector, with length
03242         UNPACK_INT( buff_ptr, num_ents );
03243         members.resize( num_ents );
03244         if( num_ents ) UNPACK_EH( buff_ptr, &members[0], num_ents );
03245         result = get_local_handles( &members[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for ordered set contents" );
03246         result = mbImpl->add_entities( *rit, &members[0], num_ents );MB_CHK_SET_ERR( result, "Failed to add ents to ordered set in unpack" );
03247     }
03248 
03249     std::vector< int > num_pch( 2 * new_sets.size() );
03250     std::vector< int >::iterator vit;
03251     int tot_pch = 0;
03252     for( vit = num_pch.begin(); vit != num_pch.end(); ++vit )
03253     {
03254         UNPACK_INT( buff_ptr, *vit );
03255         tot_pch += *vit;
03256     }
03257 
03258     members.resize( tot_pch );
03259     UNPACK_EH( buff_ptr, &members[0], tot_pch );
03260     result = get_local_handles( &members[0], tot_pch, entities );MB_CHK_SET_ERR( result, "Failed to get local handle for parent/child sets" );
03261 
03262     int num               = 0;
03263     EntityHandle* mem_ptr = &members[0];
03264     for( rit = new_sets.begin(); rit != new_sets.end(); ++rit )
03265     {
03266         // Unpack parents/children
03267         int num_par = num_pch[num++], num_child = num_pch[num++];
03268         if( num_par + num_child )
03269         {
03270             for( i = 0; i < num_par; i++ )
03271             {
03272                 assert( 0 != mem_ptr[i] );
03273                 result = mbImpl->add_parent_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add parent to set in unpack" );
03274             }
03275             mem_ptr += num_par;
03276             for( i = 0; i < num_child; i++ )
03277             {
03278                 assert( 0 != mem_ptr[i] );
03279                 result = mbImpl->add_child_meshset( *rit, mem_ptr[i] );MB_CHK_SET_ERR( result, "Failed to add child to set in unpack" );
03280             }
03281             mem_ptr += num_child;
03282         }
03283     }
03284 
03285     // Unpack source handles
03286     Range dum_range;
03287     if( store_remote_handles && !new_sets.empty() )
03288     {
03289         UNPACK_RANGE( buff_ptr, dum_range );
03290         result = update_remote_data( new_sets, dum_range, from_proc, 0 );MB_CHK_SET_ERR( result, "Failed to set sharing data for sets" );
03291     }
03292 
03293     myDebug->tprintf( 4, "Done unpacking sets." );
03294 
03295     return MB_SUCCESS;
03296 }
03297 
03298 ErrorCode ParallelComm::pack_adjacencies( Range& /*entities*/, Range::const_iterator& /*start_rit*/,
03299                                           Range& /*whole_range*/, unsigned char*& /*buff_ptr*/, int& /*count*/,
03300                                           const bool /*just_count*/, const bool /*store_handles*/,
03301                                           const int /*to_proc*/ )
03302 {
03303     return MB_FAILURE;
03304 }
03305 
03306 ErrorCode ParallelComm::unpack_adjacencies( unsigned char*& /*buff_ptr*/, Range& /*entities*/,
03307                                             const bool /*store_handles*/, const int /*from_proc*/ )
03308 {
03309     return MB_FAILURE;
03310 }
03311 
03312 ErrorCode ParallelComm::pack_tags( Range& entities, const std::vector< Tag >& src_tags,
03313                                    const std::vector< Tag >& dst_tags, const std::vector< Range >& tag_ranges,
03314                                    Buffer* buff, const bool store_remote_handles, const int to_proc )
03315 {
03316     ErrorCode result;
03317     std::vector< Tag >::const_iterator tag_it, dst_it;
03318     std::vector< Range >::const_iterator rit;
03319     int count = 0;
03320 
03321     for( tag_it = src_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end(); ++tag_it, ++rit )
03322     {
03323         result = packed_tag_size( *tag_it, *rit, count );
03324         if( MB_SUCCESS != result ) return result;
03325     }
03326 
03327     // Number of tags
03328     count += sizeof( int );
03329 
03330     buff->check_space( count );
03331 
03332     PACK_INT( buff->buff_ptr, src_tags.size() );
03333 
03334     std::vector< EntityHandle > entities_vec( entities.size() );
03335     std::copy( entities.begin(), entities.end(), entities_vec.begin() );
03336 
03337     for( tag_it = src_tags.begin(), dst_it = dst_tags.begin(), rit = tag_ranges.begin(); tag_it != src_tags.end();
03338          ++tag_it, ++dst_it, ++rit )
03339     {
03340         result = pack_tag( *tag_it, *dst_it, *rit, entities_vec, buff, store_remote_handles, to_proc );
03341         if( MB_SUCCESS != result ) return result;
03342     }
03343 
03344     myDebug->tprintf( 4, "Done packing tags." );
03345 
03346     buff->set_stored_size();
03347 
03348     return MB_SUCCESS;
03349 }
03350 
03351 ErrorCode ParallelComm::packed_tag_size( Tag tag, const Range& tagged_entities, int& count )
03352 {
03353     // For dense tags, compute size assuming all entities have that tag
03354     // For sparse tags, get number of entities w/ that tag to compute size
03355 
03356     std::vector< int > var_len_sizes;
03357     std::vector< const void* > var_len_values;
03358 
03359     // Default value
03360     count += sizeof( int );
03361     if( NULL != tag->get_default_value() ) count += tag->get_default_value_size();
03362 
03363     // Size, type, data type
03364     count += 3 * sizeof( int );
03365 
03366     // Name
03367     count += sizeof( int );
03368     count += tag->get_name().size();
03369 
03370     // Range of tag
03371     count += sizeof( int ) + tagged_entities.size() * sizeof( EntityHandle );
03372 
03373     if( tag->get_size() == MB_VARIABLE_LENGTH )
03374     {
03375         const int num_ent = tagged_entities.size();
03376         // Send a tag size for each entity
03377         count += num_ent * sizeof( int );
03378         // Send tag data for each entity
03379         var_len_sizes.resize( num_ent );
03380         var_len_values.resize( num_ent );
03381         ErrorCode result =
03382             tag->get_data( sequenceManager, errorHandler, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get lenghts of variable-length tag values" );
03383         count += std::accumulate( var_len_sizes.begin(), var_len_sizes.end(), 0 );
03384     }
03385     else
03386     {
03387         // Tag data values for range or vector
03388         count += tagged_entities.size() * tag->get_size();
03389     }
03390 
03391     return MB_SUCCESS;
03392 }
03393 
03394 ErrorCode ParallelComm::pack_tag( Tag src_tag, Tag dst_tag, const Range& tagged_entities,
03395                                   const std::vector< EntityHandle >& whole_vec, Buffer* buff,
03396                                   const bool store_remote_handles, const int to_proc )
03397 {
03398     ErrorCode result;
03399     std::vector< int > var_len_sizes;
03400     std::vector< const void* > var_len_values;
03401 
03402     if( src_tag != dst_tag )
03403     {
03404         if( dst_tag->get_size() != src_tag->get_size() ) return MB_TYPE_OUT_OF_RANGE;
03405         if( dst_tag->get_data_type() != src_tag->get_data_type() && dst_tag->get_data_type() != MB_TYPE_OPAQUE &&
03406             src_tag->get_data_type() != MB_TYPE_OPAQUE )
03407             return MB_TYPE_OUT_OF_RANGE;
03408     }
03409 
03410     // Size, type, data type
03411     buff->check_space( 3 * sizeof( int ) );
03412     PACK_INT( buff->buff_ptr, src_tag->get_size() );
03413     TagType this_type;
03414     result = mbImpl->tag_get_type( dst_tag, this_type );
03415     PACK_INT( buff->buff_ptr, (int)this_type );
03416     DataType data_type = src_tag->get_data_type();
03417     PACK_INT( buff->buff_ptr, (int)data_type );
03418     int type_size = TagInfo::size_from_data_type( data_type );
03419 
03420     // Default value
03421     if( NULL == src_tag->get_default_value() )
03422     {
03423         buff->check_space( sizeof( int ) );
03424         PACK_INT( buff->buff_ptr, 0 );
03425     }
03426     else
03427     {
03428         buff->check_space( src_tag->get_default_value_size() );
03429         PACK_BYTES( buff->buff_ptr, src_tag->get_default_value(), src_tag->get_default_value_size() );
03430     }
03431 
03432     // Name
03433     buff->check_space( src_tag->get_name().size() );
03434     PACK_BYTES( buff->buff_ptr, dst_tag->get_name().c_str(), dst_tag->get_name().size() );
03435 
03436     myDebug->tprintf( 4, "Packing tag \"%s\"", src_tag->get_name().c_str() );
03437     if( src_tag != dst_tag ) myDebug->tprintf( 4, " (as tag \"%s\")", dst_tag->get_name().c_str() );
03438     myDebug->tprintf( 4, "\n" );
03439 
03440     // Pack entities
03441     buff->check_space( tagged_entities.size() * sizeof( EntityHandle ) + sizeof( int ) );
03442     PACK_INT( buff->buff_ptr, tagged_entities.size() );
03443     std::vector< EntityHandle > dum_tagged_entities( tagged_entities.size() );
03444     result = get_remote_handles( store_remote_handles, tagged_entities, &dum_tagged_entities[0], to_proc, whole_vec );
03445     if( MB_SUCCESS != result )
03446     {
03447         if( myDebug->get_verbosity() == 3 )
03448         {
03449             std::cerr << "Failed to get remote handles for tagged entities:" << std::endl;
03450             tagged_entities.print( "  " );
03451         }
03452         MB_SET_ERR( result, "Failed to get remote handles for tagged entities" );
03453     }
03454 
03455     PACK_EH( buff->buff_ptr, &dum_tagged_entities[0], dum_tagged_entities.size() );
03456 
03457     const size_t num_ent = tagged_entities.size();
03458     if( src_tag->get_size() == MB_VARIABLE_LENGTH )
03459     {
03460         var_len_sizes.resize( num_ent, 0 );
03461         var_len_values.resize( num_ent, 0 );
03462         result = mbImpl->tag_get_by_ptr( src_tag, tagged_entities, &var_len_values[0], &var_len_sizes[0] );MB_CHK_SET_ERR( result, "Failed to get variable-length tag data in pack_tags" );
03463         buff->check_space( num_ent * sizeof( int ) );
03464         PACK_INTS( buff->buff_ptr, &var_len_sizes[0], num_ent );
03465         for( unsigned int i = 0; i < num_ent; i++ )
03466         {
03467             buff->check_space( var_len_sizes[i] );
03468             PACK_VOID( buff->buff_ptr, var_len_values[i], type_size * var_len_sizes[i] );
03469         }
03470     }
03471     else
03472     {
03473         buff->check_space( num_ent * src_tag->get_size() );
03474         // Should be OK to read directly into buffer, since tags are untyped and
03475         // handled by memcpy
03476         result = mbImpl->tag_get_data( src_tag, tagged_entities, buff->buff_ptr );MB_CHK_SET_ERR( result, "Failed to get tag data in pack_tags" );
03477         buff->buff_ptr += num_ent * src_tag->get_size();
03478         PC( num_ent * src_tag->get_size(), " void" );
03479     }
03480 
03481     return MB_SUCCESS;
03482 }
03483 
03484 ErrorCode ParallelComm::get_tag_send_list( const Range& whole_range, std::vector< Tag >& all_tags,
03485                                            std::vector< Range >& tag_ranges )
03486 {
03487     std::vector< Tag > tmp_tags;
03488     ErrorCode result = mbImpl->tag_get_tags( tmp_tags );MB_CHK_SET_ERR( result, "Failed to get tags in pack_tags" );
03489 
03490     std::vector< Tag >::iterator tag_it;
03491     for( tag_it = tmp_tags.begin(); tag_it != tmp_tags.end(); ++tag_it )
03492     {
03493         std::string tag_name;
03494         result = mbImpl->tag_get_name( *tag_it, tag_name );
03495         if( tag_name.c_str()[0] == '_' && tag_name.c_str()[1] == '_' ) continue;
03496 
03497         Range tmp_range;
03498         result = ( *tag_it )->get_tagged_entities( sequenceManager, tmp_range );MB_CHK_SET_ERR( result, "Failed to get entities for tag in pack_tags" );
03499         tmp_range = intersect( tmp_range, whole_range );
03500 
03501         if( tmp_range.empty() ) continue;
03502 
03503         // OK, we'll be sending this tag
03504         all_tags.push_back( *tag_it );
03505         tag_ranges.push_back( Range() );
03506         tag_ranges.back().swap( tmp_range );
03507     }
03508 
03509     return MB_SUCCESS;
03510 }
03511 
03512 ErrorCode ParallelComm::unpack_tags( unsigned char*& buff_ptr, std::vector< EntityHandle >& entities,
03513                                      const bool /*store_remote_handles*/, const int /*from_proc*/,
03514                                      const MPI_Op* const mpi_op )
03515 {
03516     // Tags
03517     // Get all the tags
03518     // For dense tags, compute size assuming all entities have that tag
03519     // For sparse tags, get number of entities w/ that tag to compute size
03520 
03521     ErrorCode result;
03522 
03523     int num_tags;
03524     UNPACK_INT( buff_ptr, num_tags );
03525     std::vector< const void* > var_len_vals;
03526     std::vector< unsigned char > dum_vals;
03527     std::vector< EntityHandle > dum_ehvals;
03528 
03529     for( int i = 0; i < num_tags; i++ )
03530     {
03531         // Tag handle
03532         Tag tag_handle;
03533 
03534         // Size, data type
03535         int tag_size, tag_data_type, tag_type;
03536         UNPACK_INT( buff_ptr, tag_size );
03537         UNPACK_INT( buff_ptr, tag_type );
03538         UNPACK_INT( buff_ptr, tag_data_type );
03539 
03540         // Default value
03541         int def_val_size;
03542         UNPACK_INT( buff_ptr, def_val_size );
03543         void* def_val_ptr = NULL;
03544         if( def_val_size )
03545         {
03546             def_val_ptr = buff_ptr;
03547             buff_ptr += def_val_size;
03548             UPC( tag_size, " void" );
03549         }
03550 
03551         // Name
03552         int name_len;
03553         UNPACK_INT( buff_ptr, name_len );
03554         std::string tag_name( reinterpret_cast< char* >( buff_ptr ), name_len );
03555         buff_ptr += name_len;
03556         UPC( 64, " chars" );
03557 
03558         myDebug->tprintf( 4, "Unpacking tag %s\n", tag_name.c_str() );
03559 
03560         // Create the tag
03561         if( tag_size == MB_VARIABLE_LENGTH )
03562             result = mbImpl->tag_get_handle( tag_name.c_str(), def_val_size, (DataType)tag_data_type, tag_handle,
03563                                              MB_TAG_VARLEN | MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03564         else
03565             result = mbImpl->tag_get_handle( tag_name.c_str(), tag_size, (DataType)tag_data_type, tag_handle,
03566                                              MB_TAG_CREAT | MB_TAG_BYTES | tag_type, def_val_ptr );
03567         if( MB_SUCCESS != result ) return result;
03568 
03569         // Get handles and convert to local handles
03570         int num_ents;
03571         UNPACK_INT( buff_ptr, num_ents );
03572         std::vector< EntityHandle > dum_ents( num_ents );
03573         UNPACK_EH( buff_ptr, &dum_ents[0], num_ents );
03574 
03575         // In this case handles are indices into new entity range; need to convert
03576         // to local handles
03577         result = get_local_handles( &dum_ents[0], num_ents, entities );MB_CHK_SET_ERR( result, "Unable to convert to local handles" );
03578 
03579         // If it's a handle type, also convert tag vals in-place in buffer
03580         if( MB_TYPE_HANDLE == tag_type )
03581         {
03582             dum_ehvals.resize( num_ents );
03583             UNPACK_EH( buff_ptr, &dum_ehvals[0], num_ents );
03584             result = get_local_handles( &dum_ehvals[0], num_ents, entities );MB_CHK_SET_ERR( result, "Failed to get local handles for tag vals" );
03585         }
03586 
03587         DataType data_type;
03588         mbImpl->tag_get_data_type( tag_handle, data_type );
03589         int type_size = TagInfo::size_from_data_type( data_type );
03590 
03591         if( !dum_ents.empty() )
03592         {
03593             if( tag_size == MB_VARIABLE_LENGTH )
03594             {
03595                 // Be careful of alignment here. If the integers are aligned
03596                 // in the buffer, we can use them directly. Otherwise we must
03597                 // copy them.
03598                 std::vector< int > var_lengths( num_ents );
03599                 UNPACK_INTS( buff_ptr, &var_lengths[0], num_ents );
03600                 UPC( sizeof( int ) * num_ents, " void" );
03601 
03602                 // Get pointers into buffer for each tag value
03603                 var_len_vals.resize( num_ents );
03604                 for( std::vector< EntityHandle >::size_type j = 0; j < (std::vector< EntityHandle >::size_type)num_ents;
03605                      j++ )
03606                 {
03607                     var_len_vals[j] = buff_ptr;
03608                     buff_ptr += var_lengths[j] * type_size;
03609                     UPC( var_lengths[j], " void" );
03610                 }
03611                 result =
03612                     mbImpl->tag_set_by_ptr( tag_handle, &dum_ents[0], num_ents, &var_len_vals[0], &var_lengths[0] );MB_CHK_SET_ERR( result, "Failed to set tag data when unpacking variable-length tag" );
03613             }
03614             else
03615             {
03616                 // Get existing values of dst tag
03617                 dum_vals.resize( tag_size * num_ents );
03618                 if( mpi_op )
03619                 {
03620                     int tag_length;
03621                     result = mbImpl->tag_get_length( tag_handle, tag_length );MB_CHK_SET_ERR( result, "Failed to get tag length" );
03622                     result = mbImpl->tag_get_data( tag_handle, &dum_ents[0], num_ents, &dum_vals[0] );MB_CHK_SET_ERR( result, "Failed to get existing value of dst tag on entities" );
03623                     result = reduce_void( tag_data_type, *mpi_op, tag_length * num_ents, &dum_vals[0], buff_ptr );MB_CHK_SET_ERR( result, "Failed to perform mpi op on dst tags" );
03624                 }
03625                 result = mbImpl->tag_set_data( tag_handle, &dum_ents[0], num_ents, buff_ptr );MB_CHK_SET_ERR( result, "Failed to set range-based tag data when unpacking tag" );
03626                 buff_ptr += num_ents * tag_size;
03627                 UPC( num_ents * tag_size, " void" );
03628             }
03629         }
03630     }
03631 
03632     myDebug->tprintf( 4, "Done unpacking tags.\n" );
03633 
03634     return MB_SUCCESS;
03635 }
03636 
03637 template < class T >
03638 T LAND( const T& arg1, const T& arg2 )
03639 {
03640     return arg1 && arg2;
03641 }
03642 template < class T >
03643 T LOR( const T& arg1, const T& arg2 )
03644 {
03645     return arg1 || arg2;
03646 }
03647 template < class T >
03648 T LXOR( const T& arg1, const T& arg2 )
03649 {
03650     return ( ( arg1 && !arg2 ) || ( !arg1 && arg2 ) );
03651 }
03652 template < class T >
03653 T MAX( const T& arg1, const T& arg2 )
03654 {
03655     return ( arg1 > arg2 ? arg1 : arg2 );
03656 }
03657 template < class T >
03658 T MIN( const T& arg1, const T& arg2 )
03659 {
03660     return ( arg1 < arg2 ? arg1 : arg2 );
03661 }
03662 template < class T >
03663 T ADD( const T& arg1, const T& arg2 )
03664 {
03665     return arg1 + arg2;
03666 }
03667 template < class T >
03668 T MULT( const T& arg1, const T& arg2 )
03669 {
03670     return arg1 * arg2;
03671 }
03672 
03673 template < class T >
03674 ErrorCode ParallelComm::reduce( const MPI_Op mpi_op, int num_ents, void* old_vals, void* new_vals )
03675 {
03676     T* old_tmp = reinterpret_cast< T* >( old_vals );
03677     // T *new_tmp = reinterpret_cast<T*>(new_vals);
03678     // new vals pointer needs to be aligned , some compilers will optimize and will shift
03679 
03680     std::vector< T > new_values;
03681     new_values.resize( num_ents );
03682     memcpy( &new_values[0], new_vals, num_ents * sizeof( T ) );
03683     T* new_tmp = &new_values[0];
03684 
03685     if( mpi_op == MPI_SUM )
03686         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, ADD< T > );
03687     else if( mpi_op == MPI_PROD )
03688         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MULT< T > );
03689     else if( mpi_op == MPI_MAX )
03690         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MAX< T > );
03691     else if( mpi_op == MPI_MIN )
03692         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, MIN< T > );
03693     else if( mpi_op == MPI_LAND )
03694         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LAND< T > );
03695     else if( mpi_op == MPI_LOR )
03696         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LOR< T > );
03697     else if( mpi_op == MPI_LXOR )
03698         std::transform( old_tmp, old_tmp + num_ents, new_tmp, new_tmp, LXOR< T > );
03699     else if( mpi_op == MPI_BAND || mpi_op == MPI_BOR || mpi_op == MPI_BXOR )
03700     {
03701         std::cerr << "Bitwise operations not allowed in tag reductions." << std::endl;
03702         return MB_FAILURE;
03703     }
03704     else if( mpi_op != MPI_OP_NULL )
03705     {
03706         std::cerr << "Unknown MPI operation type." << std::endl;
03707         return MB_TYPE_OUT_OF_RANGE;
03708     }
03709 
03710     // copy now the result back where it should be
03711     memcpy( new_vals, new_tmp, num_ents * sizeof( T ) );
03712     std::vector< T >().swap( new_values );  // way to release allocated vector
03713 
03714     return MB_SUCCESS;
03715 }
03716 
03717 ErrorCode ParallelComm::reduce_void( int tag_data_type, const MPI_Op mpi_op, int num_ents, void* old_vals,
03718                                      void* new_vals )
03719 {
03720     ErrorCode result;
03721     switch( tag_data_type )
03722     {
03723         case MB_TYPE_INTEGER:
03724             result = reduce< int >( mpi_op, num_ents, old_vals, new_vals );
03725             break;
03726         case MB_TYPE_DOUBLE:
03727             result = reduce< double >( mpi_op, num_ents, old_vals, new_vals );
03728             break;
03729         case MB_TYPE_BIT:
03730             result = reduce< unsigned char >( mpi_op, num_ents, old_vals, new_vals );
03731             break;
03732         default:
03733             result = MB_SUCCESS;
03734             break;
03735     }
03736 
03737     return result;
03738 }
03739 
03740 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, int resolve_dim, int shared_dim, const Tag* id_tag )
03741 {
03742     ErrorCode result;
03743     Range proc_ents;
03744 
03745     // Check for structured mesh, and do it differently if it is
03746     ScdInterface* scdi;
03747     result = mbImpl->query_interface( scdi );
03748     if( scdi )
03749     {
03750         result = scdi->tag_shared_vertices( this, this_set );
03751         if( MB_SUCCESS == result )
03752         {
03753             myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
03754             return result;
03755         }
03756     }
03757 
03758     if( 0 == this_set )
03759     {
03760         // Get the entities in the partition sets
03761         for( Range::iterator rit = partitionSets.begin(); rit != partitionSets.end(); ++rit )
03762         {
03763             Range tmp_ents;
03764             result = mbImpl->get_entities_by_handle( *rit, tmp_ents, true );
03765             if( MB_SUCCESS != result ) return result;
03766             proc_ents.merge( tmp_ents );
03767         }
03768     }
03769     else
03770     {
03771         result = mbImpl->get_entities_by_handle( this_set, proc_ents, true );
03772         if( MB_SUCCESS != result ) return result;
03773     }
03774 
03775     // Resolve dim is maximal dim of entities in proc_ents
03776     if( -1 == resolve_dim )
03777     {
03778         if( !proc_ents.empty() ) resolve_dim = mbImpl->dimension_from_handle( *proc_ents.rbegin() );
03779     }
03780 
03781     // proc_ents should all be of same dimension
03782     if( resolve_dim > shared_dim &&
03783         mbImpl->dimension_from_handle( *proc_ents.rbegin() ) != mbImpl->dimension_from_handle( *proc_ents.begin() ) )
03784     {
03785         Range::iterator lower = proc_ents.lower_bound( CN::TypeDimensionMap[0].first ),
03786                         upper = proc_ents.upper_bound( CN::TypeDimensionMap[resolve_dim - 1].second );
03787         proc_ents.erase( lower, upper );
03788     }
03789 
03790     // Must call even if we don't have any entities, to make sure
03791     // collective comm'n works
03792     return resolve_shared_ents( this_set, proc_ents, resolve_dim, shared_dim, NULL, id_tag );
03793 }
03794 
03795 ErrorCode ParallelComm::resolve_shared_ents( EntityHandle this_set, Range& proc_ents, int resolve_dim, int shared_dim,
03796                                              Range* skin_ents, const Tag* id_tag )
03797 {
03798 #ifdef MOAB_HAVE_MPE
03799     if( myDebug->get_verbosity() == 2 )
03800     {
03801         define_mpe();
03802         MPE_Log_event( RESOLVE_START, procConfig.proc_rank(), "Entering resolve_shared_ents." );
03803     }
03804 #endif
03805 
03806     ErrorCode result;
03807     myDebug->tprintf( 1, "Resolving shared entities.\n" );
03808 
03809     if( resolve_dim < shared_dim )
03810     {
03811         MB_SET_ERR( MB_FAILURE, "MOAB does not support vertex-based partitions, only element-based ones" );
03812     }
03813 
03814     if( -1 == shared_dim )
03815     {
03816         if( !proc_ents.empty() )
03817             shared_dim = mbImpl->dimension_from_handle( *proc_ents.begin() ) - 1;
03818         else if( resolve_dim == 3 )
03819             shared_dim = 2;
03820     }
03821     int max_global_resolve_dim = -1;
03822     int err = MPI_Allreduce( &resolve_dim, &max_global_resolve_dim, 1, MPI_INT, MPI_MAX, proc_config().proc_comm() );
03823     if( MPI_SUCCESS != err ) { MB_SET_ERR( MB_FAILURE, "Unable to guess global resolve_dim" ); }
03824     if( shared_dim < 0 || resolve_dim < 0 )
03825     {
03826         // MB_SET_ERR(MB_FAILURE, "Unable to guess shared_dim or resolve_dim");
03827         resolve_dim = max_global_resolve_dim;
03828         shared_dim  = resolve_dim - 1;
03829     }
03830 
03831     if( resolve_dim < 0 || shared_dim < 0 ) return MB_SUCCESS;
03832     // no task has any mesh, get out
03833 
03834     // Get the skin entities by dimension
03835     Range tmp_skin_ents[4];
03836 
03837     // Get the entities to be skinned
03838     // Find the skin
03839     int skin_dim = resolve_dim - 1;
03840     if( !skin_ents )
03841     {
03842         skin_ents              = tmp_skin_ents;
03843         skin_ents[resolve_dim] = proc_ents;
03844         Skinner skinner( mbImpl );
03845         result =
03846             skinner.find_skin( this_set, skin_ents[skin_dim + 1], false, skin_ents[skin_dim], NULL, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
03847         myDebug->tprintf( 1, "Found skin:   skin_dim: %d resolve_dim: %d , now resolving.\n", skin_dim, resolve_dim );
03848         myDebug->tprintf( 3, "skin_ents[0].size(): %d skin_ents[1].size(): %d  \n", (int)skin_ents[0].size(),
03849                           (int)skin_ents[1].size() );
03850         // Get entities adjacent to skin ents from shared_dim down to zero
03851         for( int this_dim = skin_dim - 1; this_dim >= 0; this_dim-- )
03852         {
03853             result =
03854                 mbImpl->get_adjacencies( skin_ents[skin_dim], this_dim, true, skin_ents[this_dim], Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
03855 
03856             if( this_set && skin_dim == 2 && this_dim == 1 )
03857             {
03858                 result = mbImpl->add_entities( this_set, skin_ents[this_dim] );MB_CHK_ERR( result );
03859             }
03860         }
03861     }
03862     else if( skin_ents[resolve_dim].empty() )
03863         skin_ents[resolve_dim] = proc_ents;
03864 
03865     // Global id tag
03866     Tag gid_tag;
03867     if( id_tag )
03868         gid_tag = *id_tag;
03869     else
03870     {
03871         bool tag_created = false;
03872         int def_val      = -1;
03873         result = mbImpl->tag_get_handle( GLOBAL_ID_TAG_NAME, 1, MB_TYPE_INTEGER, gid_tag, MB_TAG_DENSE | MB_TAG_CREAT,
03874                                          &def_val, &tag_created );
03875         if( MB_ALREADY_ALLOCATED != result && MB_SUCCESS != result )
03876         {
03877             MB_SET_ERR( result, "Failed to create/get gid tag handle" );
03878         }
03879         else if( tag_created )
03880         {
03881             // Just created it, so we need global ids
03882             result = assign_global_ids( this_set, skin_dim + 1, true, true, true );MB_CHK_SET_ERR( result, "Failed to assign global ids" );
03883         }
03884     }
03885 
03886     DataType tag_type;
03887     result = mbImpl->tag_get_data_type( gid_tag, tag_type );MB_CHK_SET_ERR( result, "Failed to get tag data type" );
03888     int bytes_per_tag;
03889     result = mbImpl->tag_get_bytes( gid_tag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed to get number of bytes per tag" );
03890     // On 64 bits, long and int are different
03891     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
03892 
03893     // Get gids for skin ents in a vector, to pass to gs
03894     std::vector< long > lgid_data( skin_ents[0].size() );
03895     // Size is either long or int
03896     // On 64 bit is 8 or 4
03897     if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
03898     {  // It is a special id tag
03899         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &lgid_data[0] );MB_CHK_SET_ERR( result, "Couldn't get gid tag for skin vertices" );
03900     }
03901     else if( 4 == bytes_per_tag )
03902     {  // Must be GLOBAL_ID tag or 32 bits ...
03903         std::vector< int > gid_data( lgid_data.size() );
03904         result = mbImpl->tag_get_data( gid_tag, skin_ents[0], &gid_data[0] );MB_CHK_SET_ERR( result, "Failed to get gid tag for skin vertices" );
03905         std::copy( gid_data.begin(), gid_data.end(), lgid_data.begin() );
03906     }
03907     else
03908     {
03909         // Not supported flag
03910         MB_SET_ERR( MB_FAILURE, "Unsupported id tag" );
03911     }
03912 
03913     // Put handles in vector for passing to gs setup
03914     std::vector< Ulong > handle_vec;  // Assumes that we can do conversion from Ulong to EntityHandle
03915     std::copy( skin_ents[0].begin(), skin_ents[0].end(), std::back_inserter( handle_vec ) );
03916 
03917 #ifdef MOAB_HAVE_MPE
03918     if( myDebug->get_verbosity() == 2 )
03919     {
03920         MPE_Log_event( SHAREDV_START, procConfig.proc_rank(), "Creating crystal router." );
03921     }
03922 #endif
03923 
03924     // Get a crystal router
03925     gs_data::crystal_data* cd = procConfig.crystal_router();
03926 
03927     /*
03928     // Get total number of entities; will overshoot highest global id, but
03929     // that's OK
03930     int num_total[2] = {0, 0}, num_local[2] = {0, 0};
03931     result = mbImpl->get_number_entities_by_dimension(this_set, 0, num_local);
03932     if (MB_SUCCESS != result)return result;
03933     int failure = MPI_Allreduce(num_local, num_total, 1,
03934     MPI_INT, MPI_SUM, procConfig.proc_comm());
03935     if (failure) {
03936       MB_SET_ERR(MB_FAILURE, "Allreduce for total number of shared ents failed");
03937     }
03938     */
03939     // Call gather-scatter to get shared ids & procs
03940     gs_data* gsd = new gs_data();
03941     // assert(sizeof(ulong_) == sizeof(EntityHandle));
03942     result = gsd->initialize( skin_ents[0].size(), &lgid_data[0], &handle_vec[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
03943 
03944     // Get shared proc tags
03945     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
03946     result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags" );
03947 
03948     // Load shared verts into a tuple, then sort by index
03949     TupleList shared_verts;
03950     shared_verts.initialize( 2, 0, 1, 0, skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 ) );
03951     shared_verts.enableWriteAccess();
03952 
03953     unsigned int i = 0, j = 0;
03954     for( unsigned int p = 0; p < gsd->nlinfo->_np; p++ )
03955         for( unsigned int np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
03956         {
03957             shared_verts.vi_wr[i++] = gsd->nlinfo->_sh_ind[j];
03958             shared_verts.vi_wr[i++] = gsd->nlinfo->_target[p];
03959             shared_verts.vul_wr[j]  = gsd->nlinfo->_ulabels[j];
03960             j++;
03961             shared_verts.inc_n();
03962         }
03963 
03964     myDebug->tprintf( 3, " shared verts size %d \n", (int)shared_verts.get_n() );
03965 
03966     int max_size = skin_ents[0].size() * ( MAX_SHARING_PROCS + 1 );
03967     moab::TupleList::buffer sort_buffer;
03968     sort_buffer.buffer_init( max_size );
03969     shared_verts.sort( 0, &sort_buffer );
03970     sort_buffer.reset();
03971 
03972     // Set sharing procs and handles tags on skin ents
03973     int maxp = -1;
03974     std::vector< int > sharing_procs( MAX_SHARING_PROCS );
03975     std::fill( sharing_procs.begin(), sharing_procs.end(), maxp );
03976     j = 0;
03977     i = 0;
03978 
03979     // Get ents shared by 1 or n procs
03980     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
03981     Range proc_verts;
03982     result = mbImpl->get_adjacencies( proc_ents, 0, false, proc_verts, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get proc_verts" );
03983 
03984     myDebug->print( 3, " resolve shared ents:  proc verts ", proc_verts );
03985     result = tag_shared_verts( shared_verts, skin_ents, proc_nvecs, proc_verts );MB_CHK_SET_ERR( result, "Failed to tag shared verts" );
03986 
03987 #ifdef MOAB_HAVE_MPE
03988     if( myDebug->get_verbosity() == 2 )
03989     {
03990         MPE_Log_event( SHAREDV_END, procConfig.proc_rank(), "Finished tag_shared_verts." );
03991     }
03992 #endif
03993 
03994     // Get entities shared by 1 or n procs
03995     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );MB_CHK_SET_ERR( result, "Failed to tag shared entities" );
03996 
03997     shared_verts.reset();
03998 
03999     if( myDebug->get_verbosity() > 0 )
04000     {
04001         for( std::map< std::vector< int >, std::vector< EntityHandle > >::const_iterator mit = proc_nvecs.begin();
04002              mit != proc_nvecs.end(); ++mit )
04003         {
04004             myDebug->tprintf( 1, "Iface: " );
04005             for( std::vector< int >::const_iterator vit = ( mit->first ).begin(); vit != ( mit->first ).end(); ++vit )
04006                 myDebug->printf( 1, " %d", *vit );
04007             myDebug->print( 1, "\n" );
04008         }
04009     }
04010 
04011     // Create the sets for each interface; store them as tags on
04012     // the interface instance
04013     Range iface_sets;
04014     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
04015 
04016     // Establish comm procs and buffers for them
04017     std::set< unsigned int > procs;
04018     result = get_interface_procs( procs, true );MB_CHK_SET_ERR( result, "Failed to get interface procs" );
04019 
04020 #ifndef NDEBUG
04021     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Shared handle check failed after interface vertex exchange" );
04022 #endif
04023 
04024     // Resolve shared entity remote handles; implemented in ghost cell exchange
04025     // code because it's so similar
04026     result = exchange_ghost_cells( -1, -1, 0, 0, true, true );MB_CHK_SET_ERR( result, "Failed to resolve shared entity remote handles" );
04027 
04028     // Now build parent/child links for interface sets
04029     result = create_iface_pc_links();MB_CHK_SET_ERR( result, "Failed to create interface parent/child links" );
04030 
04031     gsd->reset();
04032     delete gsd;
04033 
04034 #ifdef MOAB_HAVE_MPE
04035     if( myDebug->get_verbosity() == 2 )
04036     {
04037         MPE_Log_event( RESOLVE_END, procConfig.proc_rank(), "Exiting resolve_shared_ents." );
04038     }
04039 #endif
04040 
04041     // std::ostringstream ent_str;
04042     // ent_str << "mesh." << procConfig.proc_rank() << ".h5m";
04043     // mbImpl->write_mesh(ent_str.str().c_str());
04044 
04045     // Done
04046     return result;
04047 }
04048 
04049 void ParallelComm::define_mpe()
04050 {
04051 #ifdef MOAB_HAVE_MPE
04052     if( myDebug->get_verbosity() == 2 )
04053     {
04054         // Define mpe states used for logging
04055         int success;
04056         MPE_Log_get_state_eventIDs( &IFACE_START, &IFACE_END );
04057         MPE_Log_get_state_eventIDs( &GHOST_START, &GHOST_END );
04058         MPE_Log_get_state_eventIDs( &SHAREDV_START, &SHAREDV_END );
04059         MPE_Log_get_state_eventIDs( &RESOLVE_START, &RESOLVE_END );
04060         MPE_Log_get_state_eventIDs( &ENTITIES_START, &ENTITIES_END );
04061         MPE_Log_get_state_eventIDs( &RHANDLES_START, &RHANDLES_END );
04062         MPE_Log_get_state_eventIDs( &OWNED_START, &OWNED_END );
04063         success = MPE_Describe_state( IFACE_START, IFACE_END, "Resolve interface ents", "green" );
04064         assert( MPE_LOG_OK == success );
04065         success = MPE_Describe_state( GHOST_START, GHOST_END, "Exchange ghost ents", "red" );
04066         assert( MPE_LOG_OK == success );
04067         success = MPE_Describe_state( SHAREDV_START, SHAREDV_END, "Resolve interface vertices", "blue" );
04068         assert( MPE_LOG_OK == success );
04069         success = MPE_Describe_state( RESOLVE_START, RESOLVE_END, "Resolve shared ents", "purple" );
04070         assert( MPE_LOG_OK == success );
04071         success = MPE_Describe_state( ENTITIES_START, ENTITIES_END, "Exchange shared ents", "yellow" );
04072         assert( MPE_LOG_OK == success );
04073         success = MPE_Describe_state( RHANDLES_START, RHANDLES_END, "Remote handles", "cyan" );
04074         assert( MPE_LOG_OK == success );
04075         success = MPE_Describe_state( OWNED_START, OWNED_END, "Exchange owned ents", "black" );
04076         assert( MPE_LOG_OK == success );
04077     }
04078 #endif
04079 }
04080 
04081 ErrorCode ParallelComm::resolve_shared_ents( ParallelComm** pc, const unsigned int np, EntityHandle this_set,
04082                                              const int part_dim )
04083 {
04084     std::vector< Range > verts( np );
04085     int tot_verts = 0;
04086     unsigned int p, i, j, v;
04087     ErrorCode rval;
04088     for( p = 0; p < np; p++ )
04089     {
04090         Skinner skinner( pc[p]->get_moab() );
04091         Range part_ents, skin_ents;
04092         rval = pc[p]->get_moab()->get_entities_by_dimension( this_set, part_dim, part_ents );
04093         if( MB_SUCCESS != rval ) return rval;
04094         rval = skinner.find_skin( this_set, part_ents, false, skin_ents, 0, true, true, true );
04095         if( MB_SUCCESS != rval ) return rval;
04096         rval = pc[p]->get_moab()->get_adjacencies( skin_ents, 0, true, verts[p], Interface::UNION );
04097         if( MB_SUCCESS != rval ) return rval;
04098         tot_verts += verts[p].size();
04099     }
04100 
04101     TupleList shared_ents;
04102     shared_ents.initialize( 2, 0, 1, 0, tot_verts );
04103     shared_ents.enableWriteAccess();
04104 
04105     i = 0;
04106     j = 0;
04107     std::vector< int > gids;
04108     Range::iterator rit;
04109     Tag gid_tag;
04110     for( p = 0; p < np; p++ )
04111     {
04112         gid_tag = pc[p]->get_moab()->globalId_tag();
04113 
04114         gids.resize( verts[p].size() );
04115         rval = pc[p]->get_moab()->tag_get_data( gid_tag, verts[p], &gids[0] );
04116         if( MB_SUCCESS != rval ) return rval;
04117 
04118         for( v = 0, rit = verts[p].begin(); v < gids.size(); v++, ++rit )
04119         {
04120             shared_ents.vi_wr[i++] = gids[v];
04121             shared_ents.vi_wr[i++] = p;
04122             shared_ents.vul_wr[j]  = *rit;
04123             j++;
04124             shared_ents.inc_n();
04125         }
04126     }
04127 
04128     moab::TupleList::buffer sort_buffer;
04129     sort_buffer.buffer_init( tot_verts );
04130     shared_ents.sort( 0, &sort_buffer );
04131     sort_buffer.reset();
04132 
04133     j = 0;
04134     i = 0;
04135     std::vector< EntityHandle > handles;
04136     std::vector< int > procs;
04137 
04138     while( i < shared_ents.get_n() )
04139     {
04140         handles.clear();
04141         procs.clear();
04142 
04143         // Count & accumulate sharing procs
04144         int this_gid = shared_ents.vi_rd[j];
04145         while( i < shared_ents.get_n() && shared_ents.vi_rd[j] == this_gid )
04146         {
04147             j++;
04148             procs.push_back( shared_ents.vi_rd[j++] );
04149             handles.push_back( shared_ents.vul_rd[i++] );
04150         }
04151         if( 1 == procs.size() ) continue;
04152 
04153         for( v = 0; v < procs.size(); v++ )
04154         {
04155             rval = pc[procs[v]]->update_remote_data( handles[v], &procs[0], &handles[0], procs.size(),
04156                                                      ( procs[0] == (int)pc[procs[v]]->rank()
04157                                                            ? PSTATUS_INTERFACE
04158                                                            : ( PSTATUS_NOT_OWNED | PSTATUS_INTERFACE ) ) );
04159             if( MB_SUCCESS != rval ) return rval;
04160         }
04161     }
04162 
04163     std::set< unsigned int > psets;
04164     for( p = 0; p < np; p++ )
04165     {
04166         rval = pc[p]->create_interface_sets( this_set, part_dim, part_dim - 1 );
04167         if( MB_SUCCESS != rval ) return rval;
04168         // Establish comm procs and buffers for them
04169         psets.clear();
04170         rval = pc[p]->get_interface_procs( psets, true );
04171         if( MB_SUCCESS != rval ) return rval;
04172     }
04173 
04174     shared_ents.reset();
04175 
04176     return MB_SUCCESS;
04177 }
04178 
04179 ErrorCode ParallelComm::tag_iface_entities()
04180 {
04181     ErrorCode result = MB_SUCCESS;
04182     Range iface_ents, tmp_ents, rmv_ents;
04183     std::vector< unsigned char > pstat;
04184     unsigned char set_pstat;
04185     Range::iterator rit2;
04186     unsigned int i;
04187 
04188     for( Range::iterator rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04189     {
04190         iface_ents.clear();
04191 
04192         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get interface set contents" );
04193         pstat.resize( iface_ents.size() );
04194         result = mbImpl->tag_get_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set entities" );
04195         result = mbImpl->tag_get_data( pstatus_tag(), &( *rit ), 1, &set_pstat );MB_CHK_SET_ERR( result, "Failed to get pstatus values for interface set" );
04196         rmv_ents.clear();
04197         for( rit2 = iface_ents.begin(), i = 0; rit2 != iface_ents.end(); ++rit2, i++ )
04198         {
04199             if( !( pstat[i] & PSTATUS_INTERFACE ) )
04200             {
04201                 rmv_ents.insert( *rit2 );
04202                 pstat[i] = 0x0;
04203             }
04204         }
04205         result = mbImpl->remove_entities( *rit, rmv_ents );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
04206 
04207         if( !( set_pstat & PSTATUS_NOT_OWNED ) ) continue;
04208         // If we're here, we need to set the notowned status on (remaining) set contents
04209 
04210         // Remove rmv_ents from the contents list
04211         iface_ents = subtract( iface_ents, rmv_ents );
04212         // Compress the pstat vector (removing 0x0's)
04213         std::remove_if( pstat.begin(), pstat.end(),
04214                         std::bind( std::equal_to< unsigned char >(), std::placeholders::_1, 0x0 ) );
04215         // std::bind2nd(std::equal_to<unsigned char>(), 0x0));
04216         // https://stackoverflow.com/questions/32739018/a-replacement-for-stdbind2nd
04217         // Fold the not_owned bit into remaining values
04218         unsigned int sz = iface_ents.size();
04219         for( i = 0; i < sz; i++ )
04220             pstat[i] |= PSTATUS_NOT_OWNED;
04221 
04222         // Set the tag on the entities
04223         result = mbImpl->tag_set_data( pstatus_tag(), iface_ents, &pstat[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus values for interface set entities" );
04224     }
04225 
04226     return MB_SUCCESS;
04227 }
04228 
04229 ErrorCode ParallelComm::set_pstatus_entities( Range& pstatus_ents, unsigned char pstatus_val, bool lower_dim_ents,
04230                                               bool verts_too, int operation )
04231 {
04232     std::vector< unsigned char > pstatus_vals( pstatus_ents.size() );
04233     Range all_ents, *range_ptr = &pstatus_ents;
04234     ErrorCode result;
04235     if( lower_dim_ents || verts_too )
04236     {
04237         all_ents      = pstatus_ents;
04238         range_ptr     = &all_ents;
04239         int start_dim = ( lower_dim_ents ? mbImpl->dimension_from_handle( *pstatus_ents.rbegin() ) - 1 : 0 );
04240         for( ; start_dim >= 0; start_dim-- )
04241         {
04242             result = mbImpl->get_adjacencies( all_ents, start_dim, true, all_ents, Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get adjacencies for pstatus entities" );
04243         }
04244     }
04245     if( Interface::UNION == operation )
04246     {
04247         result = mbImpl->tag_get_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04248         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04249             pstatus_vals[i] |= pstatus_val;
04250     }
04251     else
04252     {
04253         for( unsigned int i = 0; i < pstatus_vals.size(); i++ )
04254             pstatus_vals[i] = pstatus_val;
04255     }
04256     result = mbImpl->tag_set_data( pstatus_tag(), *range_ptr, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04257 
04258     return MB_SUCCESS;
04259 }
04260 
04261 ErrorCode ParallelComm::set_pstatus_entities( EntityHandle* pstatus_ents, int num_ents, unsigned char pstatus_val,
04262                                               bool lower_dim_ents, bool verts_too, int operation )
04263 {
04264     std::vector< unsigned char > pstatus_vals( num_ents );
04265     ErrorCode result;
04266     if( lower_dim_ents || verts_too )
04267     {
04268         // In this case, call the range-based version
04269         Range tmp_range;
04270         std::copy( pstatus_ents, pstatus_ents + num_ents, range_inserter( tmp_range ) );
04271         return set_pstatus_entities( tmp_range, pstatus_val, lower_dim_ents, verts_too, operation );
04272     }
04273 
04274     if( Interface::UNION == operation )
04275     {
04276         result = mbImpl->tag_get_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus tag data" );
04277         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04278             pstatus_vals[i] |= pstatus_val;
04279     }
04280     else
04281     {
04282         for( unsigned int i = 0; i < (unsigned int)num_ents; i++ )
04283             pstatus_vals[i] = pstatus_val;
04284     }
04285     result = mbImpl->tag_set_data( pstatus_tag(), pstatus_ents, num_ents, &pstatus_vals[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag data" );
04286 
04287     return MB_SUCCESS;
04288 }
04289 
04290 static size_t choose_owner_idx( const std::vector< unsigned >& proc_list )
04291 {
04292     // Try to assign owners randomly so we get a good distribution,
04293     // (note: specifying the same seed on all procs is essential)
04294     unsigned val = 0;
04295     for( size_t i = 0; i < proc_list.size(); i++ )
04296         val ^= proc_list[i];
04297     srand( (int)( val ) );
04298     return rand() % proc_list.size();
04299 }
04300 
04301 struct set_tuple
04302 {
04303     unsigned idx;
04304     unsigned proc;
04305     EntityHandle handle;
04306     inline bool operator<( set_tuple other ) const
04307     {
04308         return ( idx == other.idx ) ? ( proc < other.proc ) : ( idx < other.idx );
04309     }
04310 };
04311 
04312 ErrorCode ParallelComm::resolve_shared_sets( EntityHandle file, const Tag* idtag )
04313 {
04314     // Find all sets with any of the following tags:
04315     const char* const shared_set_tag_names[] = { GEOM_DIMENSION_TAG_NAME, MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME,
04316                                                  NEUMANN_SET_TAG_NAME, PARALLEL_PARTITION_TAG_NAME };
04317     int num_tags                             = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04318     Range candidate_sets;
04319     ErrorCode result = MB_FAILURE;
04320 
04321     // If we're not given an ID tag to use to globally identify sets,
04322     // then fall back to using known tag values
04323     if( !idtag )
04324     {
04325         Tag gid, tag;
04326         gid = mbImpl->globalId_tag();
04327         if( NULL != gid ) result = mbImpl->tag_get_handle( GEOM_DIMENSION_TAG_NAME, 1, MB_TYPE_INTEGER, tag );
04328         if( MB_SUCCESS == result )
04329         {
04330             for( int d = 0; d < 4; d++ )
04331             {
04332                 candidate_sets.clear();
04333                 const void* vals[] = { &d };
04334                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, vals, 1, candidate_sets );
04335                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, gid );
04336             }
04337         }
04338 
04339         for( int i = 1; i < num_tags; i++ )
04340         {
04341             result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag );
04342             if( MB_SUCCESS == result )
04343             {
04344                 candidate_sets.clear();
04345                 result = mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets );
04346                 if( MB_SUCCESS == result ) resolve_shared_sets( candidate_sets, tag );
04347             }
04348         }
04349 
04350         return MB_SUCCESS;
04351     }
04352 
04353     for( int i = 0; i < num_tags; i++ )
04354     {
04355         Tag tag;
04356         result = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tag, MB_TAG_ANY );
04357         if( MB_SUCCESS != result ) continue;
04358 
04359         mbImpl->get_entities_by_type_and_tag( file, MBENTITYSET, &tag, 0, 1, candidate_sets, Interface::UNION );
04360     }
04361 
04362     // Find any additional sets that contain shared entities
04363     Range::iterator hint = candidate_sets.begin();
04364     Range all_sets;
04365     mbImpl->get_entities_by_type( file, MBENTITYSET, all_sets );
04366     all_sets           = subtract( all_sets, candidate_sets );
04367     Range::iterator it = all_sets.begin();
04368     while( it != all_sets.end() )
04369     {
04370         Range contents;
04371         mbImpl->get_entities_by_handle( *it, contents );
04372         contents.erase( contents.lower_bound( MBENTITYSET ), contents.end() );
04373         filter_pstatus( contents, PSTATUS_SHARED, PSTATUS_OR );
04374         if( contents.empty() ) { ++it; }
04375         else
04376         {
04377             hint = candidate_sets.insert( hint, *it );
04378             it   = all_sets.erase( it );
04379         }
04380     }
04381 
04382     // Find any additionl sets that contain or are parents of potential shared sets
04383     Range prev_list = candidate_sets;
04384     while( !prev_list.empty() )
04385     {
04386         it = all_sets.begin();
04387         Range new_list;
04388         hint = new_list.begin();
04389         while( it != all_sets.end() )
04390         {
04391             Range contents;
04392             mbImpl->get_entities_by_type( *it, MBENTITYSET, contents );
04393             if( !intersect( prev_list, contents ).empty() )
04394             {
04395                 hint = new_list.insert( hint, *it );
04396                 it   = all_sets.erase( it );
04397             }
04398             else
04399             {
04400                 new_list.clear();
04401                 mbImpl->get_child_meshsets( *it, contents );
04402                 if( !intersect( prev_list, contents ).empty() )
04403                 {
04404                     hint = new_list.insert( hint, *it );
04405                     it   = all_sets.erase( it );
04406                 }
04407                 else
04408                 {
04409                     ++it;
04410                 }
04411             }
04412         }
04413 
04414         candidate_sets.merge( new_list );
04415         prev_list.swap( new_list );
04416     }
04417 
04418     return resolve_shared_sets( candidate_sets, *idtag );
04419 }
04420 
04421 #ifndef NDEBUG
04422 bool is_sorted_unique( std::vector< unsigned >& v )
04423 {
04424     for( size_t i = 1; i < v.size(); i++ )
04425         if( v[i - 1] >= v[i] ) return false;
04426     return true;
04427 }
04428 #endif
04429 
04430 ErrorCode ParallelComm::resolve_shared_sets( Range& sets, Tag idtag )
04431 {
04432     ErrorCode result;
04433     const unsigned rk = proc_config().proc_rank();
04434     MPI_Comm cm       = proc_config().proc_comm();
04435 
04436     // Build sharing list for all sets
04437 
04438     // Get ids for sets in a vector, to pass to gs
04439     std::vector< long > larray;  // Allocate sufficient space for longs
04440     std::vector< Ulong > handles;
04441     Range tmp_sets;
04442     // The id tag can be size 4 or size 8
04443     // Based on that, convert to int or to long, similarly to what we do
04444     // for resolving shared vertices;
04445     // This code must work on 32 bit too, where long is 4 bytes, also
04446     // so test first size 4, then we should be fine
04447     DataType tag_type;
04448     result = mbImpl->tag_get_data_type( idtag, tag_type );MB_CHK_SET_ERR( result, "Failed getting tag data type" );
04449     int bytes_per_tag;
04450     result = mbImpl->tag_get_bytes( idtag, bytes_per_tag );MB_CHK_SET_ERR( result, "Failed getting number of bytes per tag" );
04451     // On 64 bits, long and int are different
04452     // On 32 bits, they are not; if size of long is 8, it is a 64 bit machine (really?)
04453 
04454     for( Range::iterator rit = sets.begin(); rit != sets.end(); ++rit )
04455     {
04456         if( sizeof( long ) == bytes_per_tag && ( ( MB_TYPE_HANDLE == tag_type ) || ( MB_TYPE_OPAQUE == tag_type ) ) )
04457         {  // It is a special id tag
04458             long dum;
04459             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04460             if( MB_SUCCESS == result )
04461             {
04462                 larray.push_back( dum );
04463                 handles.push_back( *rit );
04464                 tmp_sets.insert( tmp_sets.end(), *rit );
04465             }
04466         }
04467         else if( 4 == bytes_per_tag )
04468         {  // Must be GLOBAL_ID tag or MATERIAL_ID, etc
04469             int dum;
04470             result = mbImpl->tag_get_data( idtag, &( *rit ), 1, &dum );
04471             if( MB_SUCCESS == result )
04472             {
04473                 larray.push_back( dum );
04474                 handles.push_back( *rit );
04475                 tmp_sets.insert( tmp_sets.end(), *rit );
04476             }
04477         }
04478     }
04479 
04480     const size_t nsets = handles.size();
04481 
04482     // Get handle array for sets
04483     // This is not true on windows machine, 64 bits: entity handle is 64 bit, long is 32
04484     // assert(sizeof(EntityHandle) <= sizeof(unsigned long));
04485 
04486     // Do communication of data
04487     gs_data::crystal_data* cd = procConfig.crystal_router();
04488     gs_data* gsd              = new gs_data();
04489     result                    = gsd->initialize( nsets, &larray[0], &handles[0], 2, 1, 1, cd );MB_CHK_SET_ERR( result, "Failed to create gs data" );
04490 
04491     // Convert from global IDs grouped by process rank to list
04492     // of <idx, rank> pairs so that we can sort primarily
04493     // by idx and secondarily by rank (we want lists of procs for each
04494     // idx, not lists if indices for each proc).
04495     size_t ntuple = 0;
04496     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04497         ntuple += gsd->nlinfo->_nshared[p];
04498     std::vector< set_tuple > tuples;
04499     tuples.reserve( ntuple );
04500     size_t j = 0;
04501     for( unsigned p = 0; p < gsd->nlinfo->_np; p++ )
04502     {
04503         for( unsigned np = 0; np < gsd->nlinfo->_nshared[p]; np++ )
04504         {
04505             set_tuple t;
04506             t.idx    = gsd->nlinfo->_sh_ind[j];
04507             t.proc   = gsd->nlinfo->_target[p];
04508             t.handle = gsd->nlinfo->_ulabels[j];
04509             tuples.push_back( t );
04510             j++;
04511         }
04512     }
04513     std::sort( tuples.begin(), tuples.end() );
04514 
04515     // Release crystal router stuff
04516     gsd->reset();
04517     delete gsd;
04518 
04519     // Storing sharing data for each set
04520     size_t ti    = 0;
04521     unsigned idx = 0;
04522     std::vector< unsigned > procs;
04523     Range::iterator si = tmp_sets.begin();
04524     while( si != tmp_sets.end() && ti < tuples.size() )
04525     {
04526         assert( idx <= tuples[ti].idx );
04527         if( idx < tuples[ti].idx ) si += ( tuples[ti].idx - idx );
04528         idx = tuples[ti].idx;
04529 
04530         procs.clear();
04531         size_t ti_init = ti;
04532         while( ti < tuples.size() && tuples[ti].idx == idx )
04533         {
04534             procs.push_back( tuples[ti].proc );
04535             ++ti;
04536         }
04537         assert( is_sorted_unique( procs ) );
04538 
04539         result = sharedSetData->set_sharing_procs( *si, procs );
04540         if( MB_SUCCESS != result )
04541         {
04542             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04543             std::cerr.flush();
04544             MPI_Abort( cm, 1 );
04545         }
04546 
04547         // Add this proc to list of sharing procs in correct position
04548         // so that all procs select owner based on same list
04549         std::vector< unsigned >::iterator it = std::lower_bound( procs.begin(), procs.end(), rk );
04550         assert( it == procs.end() || *it > rk );
04551         procs.insert( it, rk );
04552         size_t owner_idx = choose_owner_idx( procs );
04553         EntityHandle owner_handle;
04554         if( procs[owner_idx] == rk )
04555             owner_handle = *si;
04556         else if( procs[owner_idx] > rk )
04557             owner_handle = tuples[ti_init + owner_idx - 1].handle;
04558         else
04559             owner_handle = tuples[ti_init + owner_idx].handle;
04560         result = sharedSetData->set_owner( *si, procs[owner_idx], owner_handle );
04561         if( MB_SUCCESS != result )
04562         {
04563             std::cerr << "Failure at " __FILE__ ":" << __LINE__ << std::endl;
04564             std::cerr.flush();
04565             MPI_Abort( cm, 1 );
04566         }
04567 
04568         ++si;
04569         ++idx;
04570     }
04571 
04572     return MB_SUCCESS;
04573 }
04574 // populate sets with ghost entities, if necessary
04575 ErrorCode ParallelComm::augment_default_sets_with_ghosts( EntityHandle file_set )
04576 {
04577     // gather all default sets we are interested in, material, neumann, etc
04578     // we will skip geometry sets, because they are not uniquely identified with their tag value
04579     // maybe we will add another tag, like category
04580 
04581     if( procConfig.proc_size() < 2 ) return MB_SUCCESS;  // no reason to stop by
04582     const char* const shared_set_tag_names[] = { MATERIAL_SET_TAG_NAME, DIRICHLET_SET_TAG_NAME, NEUMANN_SET_TAG_NAME,
04583                                                  PARALLEL_PARTITION_TAG_NAME };
04584 
04585     int num_tags = sizeof( shared_set_tag_names ) / sizeof( shared_set_tag_names[0] );
04586 
04587     Range* rangeSets = new Range[num_tags];
04588     Tag* tags        = new Tag[num_tags + 1];  // one extra for global id tag, which is an int, so far
04589 
04590     int my_rank   = rank();
04591     int** tagVals = new int*[num_tags];
04592     for( int i = 0; i < num_tags; i++ )
04593         tagVals[i] = NULL;
04594     ErrorCode rval;
04595 
04596     // for each tag, we keep a local map, from the value to the actual set with that value
04597     // we assume that the tag values are unique, for a given set, otherwise we
04598     // do not know to which set to add the entity
04599 
04600     typedef std::map< int, EntityHandle > MVal;
04601     typedef std::map< int, EntityHandle >::iterator itMVal;
04602     MVal* localMaps = new MVal[num_tags];
04603 
04604     for( int i = 0; i < num_tags; i++ )
04605     {
04606 
04607         rval = mbImpl->tag_get_handle( shared_set_tag_names[i], 1, MB_TYPE_INTEGER, tags[i], MB_TAG_ANY );
04608         if( MB_SUCCESS != rval ) continue;
04609         rval = mbImpl->get_entities_by_type_and_tag( file_set, MBENTITYSET, &( tags[i] ), 0, 1, rangeSets[i],
04610                                                      Interface::UNION );MB_CHK_SET_ERR( rval, "can't get sets with a tag" );
04611 
04612         if( rangeSets[i].size() > 0 )
04613         {
04614             tagVals[i] = new int[rangeSets[i].size()];
04615             // fill up with the tag values
04616             rval = mbImpl->tag_get_data( tags[i], rangeSets[i], tagVals[i] );MB_CHK_SET_ERR( rval, "can't get set tag values" );
04617             // now for inverse mapping:
04618             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04619             {
04620                 localMaps[i][tagVals[i][j]] = rangeSets[i][j];
04621             }
04622         }
04623     }
04624     // get the global id tag too
04625     tags[num_tags] = mbImpl->globalId_tag();
04626 
04627     TupleList remoteEnts;
04628     // processor to send to, type of tag (0-mat,) tag value,     remote handle
04629     //                         1-diri
04630     //                         2-neum
04631     //                         3-part
04632     //
04633     int initialSize = (int)sharedEnts.size();  // estimate that on average, each shared ent
04634     // will be sent to one processor, for one tag
04635     // we will actually send only entities that are owned locally, and from those
04636     // only those that do have a special tag (material, neumann, etc)
04637     // if we exceed the capacity, we resize the tuple
04638     remoteEnts.initialize( 3, 0, 1, 0, initialSize );
04639     remoteEnts.enableWriteAccess();
04640 
04641     // now, for each owned entity, get the remote handle(s) and Proc(s), and verify if it
04642     // belongs to one of the sets; if yes, create a tuple and append it
04643 
04644     std::set< EntityHandle > own_and_sha;
04645     int ir = 0, jr = 0;
04646     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04647     {
04648         // ghosted eh
04649         EntityHandle geh = *vit;
04650         if( own_and_sha.find( geh ) != own_and_sha.end() )  // already encountered
04651             continue;
04652         int procs[MAX_SHARING_PROCS];
04653         EntityHandle handles[MAX_SHARING_PROCS];
04654         int nprocs;
04655         unsigned char pstat;
04656         rval = get_sharing_data( geh, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( rval, "Failed to get sharing data" );
04657         if( pstat & PSTATUS_NOT_OWNED ) continue;  // we will send info only for entities that we own
04658         own_and_sha.insert( geh );
04659         for( int i = 0; i < num_tags; i++ )
04660         {
04661             for( int j = 0; j < (int)rangeSets[i].size(); j++ )
04662             {
04663                 EntityHandle specialSet = rangeSets[i][j];  // this set has tag i, value tagVals[i][j];
04664                 if( mbImpl->contains_entities( specialSet, &geh, 1 ) )
04665                 {
04666                     // this ghosted entity is in a special set, so form the tuple
04667                     // to send to the processors that do not own this
04668                     for( int k = 0; k < nprocs; k++ )
04669                     {
04670                         if( procs[k] != my_rank )
04671                         {
04672                             if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04673                             {
04674                                 // resize, so we do not overflow
04675                                 int oldSize = remoteEnts.get_max();
04676                                 // increase with 50% the capacity
04677                                 remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04678                             }
04679                             remoteEnts.vi_wr[ir++]  = procs[k];       // send to proc
04680                             remoteEnts.vi_wr[ir++]  = i;              // for the tags [i] (0-3)
04681                             remoteEnts.vi_wr[ir++]  = tagVals[i][j];  // actual value of the tag
04682                             remoteEnts.vul_wr[jr++] = handles[k];
04683                             remoteEnts.inc_n();
04684                         }
04685                     }
04686                 }
04687             }
04688         }
04689         // if the local entity has a global id, send it too, so we avoid
04690         // another "exchange_tags" for global id
04691         int gid;
04692         rval = mbImpl->tag_get_data( tags[num_tags], &geh, 1, &gid );MB_CHK_SET_ERR( rval, "Failed to get global id" );
04693         if( gid != 0 )
04694         {
04695             for( int k = 0; k < nprocs; k++ )
04696             {
04697                 if( procs[k] != my_rank )
04698                 {
04699                     if( remoteEnts.get_n() >= remoteEnts.get_max() - 1 )
04700                     {
04701                         // resize, so we do not overflow
04702                         int oldSize = remoteEnts.get_max();
04703                         // increase with 50% the capacity
04704                         remoteEnts.resize( oldSize + oldSize / 2 + 1 );
04705                     }
04706                     remoteEnts.vi_wr[ir++]  = procs[k];  // send to proc
04707                     remoteEnts.vi_wr[ir++]  = num_tags;  // for the tags [j] (4)
04708                     remoteEnts.vi_wr[ir++]  = gid;       // actual value of the tag
04709                     remoteEnts.vul_wr[jr++] = handles[k];
04710                     remoteEnts.inc_n();
04711                 }
04712             }
04713         }
04714     }
04715 
04716 #ifndef NDEBUG
04717     if( my_rank == 1 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 1, before augment routing" );
04718     MPI_Barrier( procConfig.proc_comm() );
04719     int sentEnts = remoteEnts.get_n();
04720     assert( ( sentEnts == jr ) && ( 3 * sentEnts == ir ) );
04721 #endif
04722     // exchange the info now, and send to
04723     gs_data::crystal_data* cd = this->procConfig.crystal_router();
04724     // All communication happens here; no other mpi calls
04725     // Also, this is a collective call
04726     rval = cd->gs_transfer( 1, remoteEnts, 0 );MB_CHK_SET_ERR( rval, "Error in tuple transfer" );
04727 #ifndef NDEBUG
04728     if( my_rank == 0 && 1 == get_debug_verbosity() ) remoteEnts.print( " on rank 0, after augment routing" );
04729     MPI_Barrier( procConfig.proc_comm() );
04730 #endif
04731 
04732     // now process the data received from other processors
04733     int received = remoteEnts.get_n();
04734     for( int i = 0; i < received; i++ )
04735     {
04736         // int from = ents_to_delete.vi_rd[i];
04737         EntityHandle geh = (EntityHandle)remoteEnts.vul_rd[i];
04738         int from_proc    = remoteEnts.vi_rd[3 * i];
04739         if( my_rank == from_proc )
04740             std::cout << " unexpected receive from my rank " << my_rank << " during augmenting with ghosts\n ";
04741         int tag_type = remoteEnts.vi_rd[3 * i + 1];
04742         assert( ( 0 <= tag_type ) && ( tag_type <= num_tags ) );
04743         int value = remoteEnts.vi_rd[3 * i + 2];
04744         if( tag_type == num_tags )
04745         {
04746             // it is global id
04747             rval = mbImpl->tag_set_data( tags[num_tags], &geh, 1, &value );MB_CHK_SET_ERR( rval, "Error in setting gid tag" );
04748         }
04749         else
04750         {
04751             // now, based on value and tag type, see if we have that value in the map
04752             MVal& lmap = localMaps[tag_type];
04753             itMVal itm = lmap.find( value );
04754             if( itm == lmap.end() )
04755             {
04756                 // the value was not found yet in the local map, so we have to create the set
04757                 EntityHandle newSet;
04758                 rval = mbImpl->create_meshset( MESHSET_SET, newSet );MB_CHK_SET_ERR( rval, "can't create new set" );
04759                 lmap[value] = newSet;
04760                 // set the tag value
04761                 rval = mbImpl->tag_set_data( tags[tag_type], &newSet, 1, &value );MB_CHK_SET_ERR( rval, "can't set tag for new set" );
04762 
04763                 // we also need to add the new created set to the file set, if not null
04764                 if( file_set )
04765                 {
04766                     rval = mbImpl->add_entities( file_set, &newSet, 1 );MB_CHK_SET_ERR( rval, "can't add new set to the file set" );
04767                 }
04768             }
04769             // add the entity to the set pointed to by the map
04770             rval = mbImpl->add_entities( lmap[value], &geh, 1 );MB_CHK_SET_ERR( rval, "can't add ghost ent to the set" );
04771         }
04772     }
04773 
04774     for( int i = 0; i < num_tags; i++ )
04775         delete[] tagVals[i];
04776     delete[] tagVals;
04777     delete[] rangeSets;
04778     delete[] tags;
04779     delete[] localMaps;
04780     return MB_SUCCESS;
04781 }
04782 ErrorCode ParallelComm::create_interface_sets( EntityHandle this_set, int resolve_dim, int shared_dim )
04783 {
04784     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
04785 
04786     // Build up the list of shared entities
04787     int procs[MAX_SHARING_PROCS];
04788     EntityHandle handles[MAX_SHARING_PROCS];
04789     ErrorCode result;
04790     int nprocs;
04791     unsigned char pstat;
04792     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
04793     {
04794         if( shared_dim != -1 && mbImpl->dimension_from_handle( *vit ) > shared_dim ) continue;
04795         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
04796         std::sort( procs, procs + nprocs );
04797         std::vector< int > tmp_procs( procs, procs + nprocs );
04798         assert( tmp_procs.size() != 2 );
04799         proc_nvecs[tmp_procs].push_back( *vit );
04800     }
04801 
04802     Skinner skinner( mbImpl );
04803     Range skin_ents[4];
04804     result = mbImpl->get_entities_by_dimension( this_set, resolve_dim, skin_ents[resolve_dim] );MB_CHK_SET_ERR( result, "Failed to get skin entities by dimension" );
04805     result =
04806         skinner.find_skin( this_set, skin_ents[resolve_dim], false, skin_ents[resolve_dim - 1], 0, true, true, true );MB_CHK_SET_ERR( result, "Failed to find skin" );
04807     if( shared_dim > 1 )
04808     {
04809         result = mbImpl->get_adjacencies( skin_ents[resolve_dim - 1], resolve_dim - 2, true, skin_ents[resolve_dim - 2],
04810                                           Interface::UNION );MB_CHK_SET_ERR( result, "Failed to get skin adjacencies" );
04811     }
04812 
04813     result = get_proc_nvecs( resolve_dim, shared_dim, skin_ents, proc_nvecs );
04814 
04815     return create_interface_sets( proc_nvecs );
04816 }
04817 
04818 ErrorCode ParallelComm::create_interface_sets( std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
04819 {
04820     if( proc_nvecs.empty() ) return MB_SUCCESS;
04821 
04822     int proc_ids[MAX_SHARING_PROCS];
04823     EntityHandle proc_handles[MAX_SHARING_PROCS];
04824     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
04825     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in create_interface_sets" );
04826     Range::iterator rit;
04827 
04828     // Create interface sets, tag them, and tag their contents with iface set tag
04829     std::vector< unsigned char > pstatus;
04830     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator vit = proc_nvecs.begin();
04831          vit != proc_nvecs.end(); ++vit )
04832     {
04833         // Create the set
04834         EntityHandle new_set;
04835         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
04836         interfaceSets.insert( new_set );
04837 
04838         // Add entities
04839         assert( !vit->second.empty() );
04840         result = mbImpl->add_entities( new_set, &( vit->second )[0], ( vit->second ).size() );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
04841         // Tag set with the proc rank(s)
04842         if( vit->first.size() == 1 )
04843         {
04844             assert( ( vit->first )[0] != (int)procConfig.proc_rank() );
04845             result = mbImpl->tag_set_data( shp_tag, &new_set, 1, &( vit->first )[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04846             proc_handles[0] = 0;
04847             result          = mbImpl->tag_set_data( shh_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04848         }
04849         else
04850         {
04851             // Pad tag data out to MAX_SHARING_PROCS with -1
04852             if( vit->first.size() > MAX_SHARING_PROCS )
04853             {
04854                 std::cerr << "Exceeded MAX_SHARING_PROCS for " << CN::EntityTypeName( TYPE_FROM_HANDLE( new_set ) )
04855                           << ' ' << ID_FROM_HANDLE( new_set ) << " on process " << proc_config().proc_rank()
04856                           << std::endl;
04857                 std::cerr.flush();
04858                 MPI_Abort( proc_config().proc_comm(), 66 );
04859             }
04860             // assert(vit->first.size() <= MAX_SHARING_PROCS);
04861             std::copy( vit->first.begin(), vit->first.end(), proc_ids );
04862             std::fill( proc_ids + vit->first.size(), proc_ids + MAX_SHARING_PROCS, -1 );
04863             result = mbImpl->tag_set_data( shps_tag, &new_set, 1, proc_ids );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04864             unsigned int ind = std::find( proc_ids, proc_ids + vit->first.size(), procConfig.proc_rank() ) - proc_ids;
04865             assert( ind < vit->first.size() );
04866             std::fill( proc_handles, proc_handles + MAX_SHARING_PROCS, 0 );
04867             proc_handles[ind] = new_set;
04868             result            = mbImpl->tag_set_data( shhs_tag, &new_set, 1, proc_handles );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
04869         }
04870 
04871         // Get the owning proc, then set the pstatus tag on iface set
04872         int min_proc       = ( vit->first )[0];
04873         unsigned char pval = ( PSTATUS_SHARED | PSTATUS_INTERFACE );
04874         if( min_proc < (int)procConfig.proc_rank() ) pval |= PSTATUS_NOT_OWNED;
04875         if( vit->first.size() > 1 ) pval |= PSTATUS_MULTISHARED;
04876         result = mbImpl->tag_set_data( pstat_tag, &new_set, 1, &pval );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
04877 
04878         // Tag the vertices with the same thing
04879         pstatus.clear();
04880         std::vector< EntityHandle > verts;
04881         for( std::vector< EntityHandle >::iterator v2it = ( vit->second ).begin(); v2it != ( vit->second ).end();
04882              ++v2it )
04883             if( mbImpl->type_from_handle( *v2it ) == MBVERTEX ) verts.push_back( *v2it );
04884         pstatus.resize( verts.size(), pval );
04885         if( !verts.empty() )
04886         {
04887             result = mbImpl->tag_set_data( pstat_tag, &verts[0], verts.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to tag interface set vertices with pstatus" );
04888         }
04889     }
04890 
04891     return MB_SUCCESS;
04892 }
04893 
04894 ErrorCode ParallelComm::create_iface_pc_links()
04895 {
04896     // Now that we've resolved the entities in the iface sets,
04897     // set parent/child links between the iface sets
04898 
04899     // First tag all entities in the iface sets
04900     Tag tmp_iface_tag;
04901     EntityHandle tmp_iface_set = 0;
04902     ErrorCode result           = mbImpl->tag_get_handle( "__tmp_iface", 1, MB_TYPE_HANDLE, tmp_iface_tag,
04903                                                MB_TAG_DENSE | MB_TAG_CREAT, &tmp_iface_set );MB_CHK_SET_ERR( result, "Failed to create temporary interface set tag" );
04904 
04905     Range iface_ents;
04906     std::vector< EntityHandle > tag_vals;
04907     Range::iterator rit;
04908 
04909     for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04910     {
04911         // tag entities with interface set
04912         iface_ents.clear();
04913         result = mbImpl->get_entities_by_handle( *rit, iface_ents );MB_CHK_SET_ERR( result, "Failed to get entities in interface set" );
04914 
04915         if( iface_ents.empty() ) continue;
04916 
04917         tag_vals.resize( iface_ents.size() );
04918         std::fill( tag_vals.begin(), tag_vals.end(), *rit );
04919         result = mbImpl->tag_set_data( tmp_iface_tag, iface_ents, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to tag iface entities with interface set" );
04920     }
04921 
04922     // Now go back through interface sets and add parent/child links
04923     Range tmp_ents2;
04924     for( int d = 2; d >= 0; d-- )
04925     {
04926         for( rit = interfaceSets.begin(); rit != interfaceSets.end(); ++rit )
04927         {
04928             // Get entities on this interface
04929             iface_ents.clear();
04930             result = mbImpl->get_entities_by_handle( *rit, iface_ents, true );MB_CHK_SET_ERR( result, "Failed to get entities by handle" );
04931             if( iface_ents.empty() || mbImpl->dimension_from_handle( *iface_ents.rbegin() ) != d ) continue;
04932 
04933             // Get higher-dimensional entities and their interface sets
04934             result = mbImpl->get_adjacencies( &( *iface_ents.begin() ), 1, d + 1, false, tmp_ents2 );MB_CHK_SET_ERR( result, "Failed to get adjacencies for interface sets" );
04935             tag_vals.resize( tmp_ents2.size() );
04936             result = mbImpl->tag_get_data( tmp_iface_tag, tmp_ents2, &tag_vals[0] );MB_CHK_SET_ERR( result, "Failed to get tmp iface tag for interface sets" );
04937 
04938             // Go through and for any on interface make it a parent
04939             EntityHandle last_set = 0;
04940             for( unsigned int i = 0; i < tag_vals.size(); i++ )
04941             {
04942                 if( tag_vals[i] && tag_vals[i] != last_set )
04943                 {
04944                     result = mbImpl->add_parent_child( tag_vals[i], *rit );MB_CHK_SET_ERR( result, "Failed to add parent/child link for interface set" );
04945                     last_set = tag_vals[i];
04946                 }
04947             }
04948         }
04949     }
04950 
04951     // Delete the temporary tag
04952     result = mbImpl->tag_delete( tmp_iface_tag );MB_CHK_SET_ERR( result, "Failed to delete tmp iface tag" );
04953 
04954     return MB_SUCCESS;
04955 }
04956 
04957 ErrorCode ParallelComm::get_proc_nvecs( int resolve_dim, int shared_dim, Range* skin_ents,
04958                                         std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs )
04959 {
04960     // Set sharing procs tags on other skin ents
04961     ErrorCode result;
04962     const EntityHandle* connect;
04963     int num_connect;
04964     std::set< int > sharing_procs;
04965     std::vector< EntityHandle > dum_connect;
04966     std::vector< int > sp_vec;
04967 
04968     for( int d = 3; d > 0; d-- )
04969     {
04970         if( resolve_dim == d ) continue;
04971 
04972         for( Range::iterator rit = skin_ents[d].begin(); rit != skin_ents[d].end(); ++rit )
04973         {
04974             // Get connectivity
04975             result = mbImpl->get_connectivity( *rit, connect, num_connect, false, &dum_connect );MB_CHK_SET_ERR( result, "Failed to get connectivity on non-vertex skin entities" );
04976 
04977             int op = ( resolve_dim < shared_dim ? Interface::UNION : Interface::INTERSECT );
04978             result = get_sharing_data( connect, num_connect, sharing_procs, op );MB_CHK_SET_ERR( result, "Failed to get sharing data in get_proc_nvecs" );
04979             if( sharing_procs.empty() ||
04980                 ( sharing_procs.size() == 1 && *sharing_procs.begin() == (int)procConfig.proc_rank() ) )
04981                 continue;
04982 
04983             // Need to specify sharing data correctly for entities or they will
04984             // end up in a different interface set than corresponding vertices
04985             if( sharing_procs.size() == 2 )
04986             {
04987                 std::set< int >::iterator it = sharing_procs.find( proc_config().proc_rank() );
04988                 assert( it != sharing_procs.end() );
04989                 sharing_procs.erase( it );
04990             }
04991 
04992             // Intersection is the owning proc(s) for this skin ent
04993             sp_vec.clear();
04994             std::copy( sharing_procs.begin(), sharing_procs.end(), std::back_inserter( sp_vec ) );
04995             assert( sp_vec.size() != 2 );
04996             proc_nvecs[sp_vec].push_back( *rit );
04997         }
04998     }
04999 
05000 #ifndef NDEBUG
05001     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05002     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05003          mit != proc_nvecs.end(); ++mit )
05004     {
05005         std::vector< EntityHandle > tmp_vec = ( mit->second );
05006         std::sort( tmp_vec.begin(), tmp_vec.end() );
05007         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05008         assert( vit == tmp_vec.end() );
05009     }
05010 #endif
05011 
05012     return MB_SUCCESS;
05013 }
05014 
05015 // Overloaded form of tag_shared_verts
05016 // Tuple coming in is of form (arbitrary value, remoteProc, localHandle, remoteHandle)
05017 // Also will check for doubles in the list if the list is sorted
05018 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents,
05019                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
05020                                           Range& /*proc_verts*/, unsigned int i_extra )
05021 {
05022     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05023     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
05024 
05025     unsigned int j = 0, i = 0;
05026     std::vector< int > sharing_procs, sharing_procs2, tag_procs;
05027     std::vector< EntityHandle > sharing_handles, sharing_handles2, tag_lhandles, tag_rhandles;
05028     std::vector< unsigned char > pstatus;
05029 
05030     // Were on tuple j/2
05031     if( i_extra ) i += i_extra;
05032     while( j < 2 * shared_ents.get_n() )
05033     {
05034         // Count & accumulate sharing procs
05035         EntityHandle this_ent = shared_ents.vul_rd[j], other_ent = 0;
05036         int other_proc = -1;
05037         while( j < 2 * shared_ents.get_n() && shared_ents.vul_rd[j] == this_ent )
05038         {
05039             j++;
05040             // Shouldn't have same proc
05041             assert( shared_ents.vi_rd[i] != (int)procConfig.proc_rank() );
05042             // Grab the remote data if its not a dublicate
05043             if( shared_ents.vul_rd[j] != other_ent || shared_ents.vi_rd[i] != other_proc )
05044             {
05045                 assert( 0 != shared_ents.vul_rd[j] );
05046                 sharing_procs.push_back( shared_ents.vi_rd[i] );
05047                 sharing_handles.push_back( shared_ents.vul_rd[j] );
05048             }
05049             other_proc = shared_ents.vi_rd[i];
05050             other_ent  = shared_ents.vul_rd[j];
05051             j++;
05052             i += 1 + i_extra;
05053         }
05054 
05055         if( sharing_procs.size() > 1 )
05056         {
05057             // Add current proc/handle to list
05058             sharing_procs.push_back( procConfig.proc_rank() );
05059             sharing_handles.push_back( this_ent );
05060 
05061             // Sort sharing_procs and sharing_handles such that
05062             // sharing_procs is in ascending order. Use temporary
05063             // lists and binary search to re-order sharing_handles.
05064             sharing_procs2 = sharing_procs;
05065             std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05066             sharing_handles2.resize( sharing_handles.size() );
05067             for( size_t k = 0; k < sharing_handles.size(); k++ )
05068             {
05069                 size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05070                              sharing_procs2.begin();
05071                 sharing_handles2[idx] = sharing_handles[k];
05072             }
05073             sharing_procs.swap( sharing_procs2 );
05074             sharing_handles.swap( sharing_handles2 );
05075         }
05076 
05077         assert( sharing_procs.size() != 2 );
05078         proc_nvecs[sharing_procs].push_back( this_ent );
05079 
05080         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05081         if( sharing_procs.size() == 1 )
05082         {
05083             tag_procs.push_back( sharing_procs[0] );
05084             tag_lhandles.push_back( this_ent );
05085             tag_rhandles.push_back( sharing_handles[0] );
05086             pstatus.push_back( share_flag );
05087         }
05088         else
05089         {
05090             // Pad lists
05091             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05092             if( sharing_procs.size() > MAX_SHARING_PROCS )
05093             {
05094                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05095                           << proc_config().proc_rank() << std::endl;
05096                 std::cerr.flush();
05097                 MPI_Abort( proc_config().proc_comm(), 66 );
05098             }
05099             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05100             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05101             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05102             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05103             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05104             sharedEnts.insert( this_ent );
05105         }
05106 
05107         // Reset sharing proc(s) tags
05108         sharing_procs.clear();
05109         sharing_handles.clear();
05110     }
05111 
05112     if( !tag_procs.empty() )
05113     {
05114         result = mbImpl->tag_set_data( shp_tag, &tag_lhandles[0], tag_procs.size(), &tag_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05115         result = mbImpl->tag_set_data( shh_tag, &tag_lhandles[0], tag_procs.size(), &tag_rhandles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05116         result = mbImpl->tag_set_data( pstat_tag, &tag_lhandles[0], tag_procs.size(), &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05117         for( std::vector< EntityHandle >::iterator vvt = tag_lhandles.begin(); vvt != tag_lhandles.end(); vvt++ )
05118             sharedEnts.insert( *vvt );
05119     }
05120 
05121 #ifndef NDEBUG
05122     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05123     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05124          mit != proc_nvecs.end(); ++mit )
05125     {
05126         std::vector< EntityHandle > tmp_vec = ( mit->second );
05127         std::sort( tmp_vec.begin(), tmp_vec.end() );
05128         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05129         assert( vit == tmp_vec.end() );
05130     }
05131 #endif
05132 
05133     return MB_SUCCESS;
05134 }
05135 
05136 ErrorCode ParallelComm::tag_shared_verts( TupleList& shared_ents, Range* skin_ents,
05137                                           std::map< std::vector< int >, std::vector< EntityHandle > >& proc_nvecs,
05138                                           Range& /*proc_verts*/ )
05139 {
05140     Tag shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag;
05141     ErrorCode result = get_shared_proc_tags( shp_tag, shps_tag, shh_tag, shhs_tag, pstat_tag );MB_CHK_SET_ERR( result, "Failed to get shared proc tags in tag_shared_verts" );
05142 
05143     unsigned int j = 0, i = 0;
05144     std::vector< int > sharing_procs, sharing_procs2;
05145     std::vector< EntityHandle > sharing_handles, sharing_handles2, skin_verts( skin_ents[0].size() );
05146     for( Range::iterator rit = skin_ents[0].begin(); rit != skin_ents[0].end(); ++rit, i++ )
05147         skin_verts[i] = *rit;
05148     i = 0;
05149 
05150     while( j < 2 * shared_ents.get_n() )
05151     {
05152         // Count & accumulate sharing procs
05153         int this_idx          = shared_ents.vi_rd[j];
05154         EntityHandle this_ent = skin_verts[this_idx];
05155         while( j < 2 * shared_ents.get_n() && shared_ents.vi_rd[j] == this_idx )
05156         {
05157             j++;
05158             // Shouldn't have same proc
05159             assert( shared_ents.vi_rd[j] != (int)procConfig.proc_rank() );
05160             sharing_procs.push_back( shared_ents.vi_rd[j++] );
05161             sharing_handles.push_back( shared_ents.vul_rd[i++] );
05162         }
05163 
05164         if( sharing_procs.size() > 1 )
05165         {
05166             // Add current proc/handle to list
05167             sharing_procs.push_back( procConfig.proc_rank() );
05168             sharing_handles.push_back( this_ent );
05169         }
05170 
05171         // Sort sharing_procs and sharing_handles such that
05172         // sharing_procs is in ascending order. Use temporary
05173         // lists and binary search to re-order sharing_handles.
05174         sharing_procs2 = sharing_procs;
05175         std::sort( sharing_procs2.begin(), sharing_procs2.end() );
05176         sharing_handles2.resize( sharing_handles.size() );
05177         for( size_t k = 0; k < sharing_handles.size(); k++ )
05178         {
05179             size_t idx = std::lower_bound( sharing_procs2.begin(), sharing_procs2.end(), sharing_procs[k] ) -
05180                          sharing_procs2.begin();
05181             sharing_handles2[idx] = sharing_handles[k];
05182         }
05183         sharing_procs.swap( sharing_procs2 );
05184         sharing_handles.swap( sharing_handles2 );
05185 
05186         assert( sharing_procs.size() != 2 );
05187         proc_nvecs[sharing_procs].push_back( this_ent );
05188 
05189         unsigned char share_flag = PSTATUS_SHARED, ms_flag = ( PSTATUS_SHARED | PSTATUS_MULTISHARED );
05190         if( sharing_procs.size() == 1 )
05191         {
05192             result = mbImpl->tag_set_data( shp_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedp tag on shared vertex" );
05193             result = mbImpl->tag_set_data( shh_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedh tag on shared vertex" );
05194             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &share_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05195             sharedEnts.insert( this_ent );
05196         }
05197         else
05198         {
05199             // Pad lists
05200             // assert(sharing_procs.size() <= MAX_SHARING_PROCS);
05201             if( sharing_procs.size() > MAX_SHARING_PROCS )
05202             {
05203                 std::cerr << "MAX_SHARING_PROCS exceeded for vertex " << this_ent << " on process "
05204                           << proc_config().proc_rank() << std::endl;
05205                 std::cerr.flush();
05206                 MPI_Abort( proc_config().proc_comm(), 66 );
05207             }
05208             sharing_procs.resize( MAX_SHARING_PROCS, -1 );
05209             sharing_handles.resize( MAX_SHARING_PROCS, 0 );
05210             result = mbImpl->tag_set_data( shps_tag, &this_ent, 1, &sharing_procs[0] );MB_CHK_SET_ERR( result, "Failed to set sharedps tag on shared vertex" );
05211             result = mbImpl->tag_set_data( shhs_tag, &this_ent, 1, &sharing_handles[0] );MB_CHK_SET_ERR( result, "Failed to set sharedhs tag on shared vertex" );
05212             result = mbImpl->tag_set_data( pstat_tag, &this_ent, 1, &ms_flag );MB_CHK_SET_ERR( result, "Failed to set pstatus tag on shared vertex" );
05213             sharedEnts.insert( this_ent );
05214         }
05215 
05216         // Reset sharing proc(s) tags
05217         sharing_procs.clear();
05218         sharing_handles.clear();
05219     }
05220 
05221 #ifndef NDEBUG
05222     // Shouldn't be any repeated entities in any of the vectors in proc_nvecs
05223     for( std::map< std::vector< int >, std::vector< EntityHandle > >::iterator mit = proc_nvecs.begin();
05224          mit != proc_nvecs.end(); ++mit )
05225     {
05226         std::vector< EntityHandle > tmp_vec = ( mit->second );
05227         std::sort( tmp_vec.begin(), tmp_vec.end() );
05228         std::vector< EntityHandle >::iterator vit = std::unique( tmp_vec.begin(), tmp_vec.end() );
05229         assert( vit == tmp_vec.end() );
05230     }
05231 #endif
05232 
05233     return MB_SUCCESS;
05234 }
05235 
05236 //! Get processors with which this processor communicates; sets are sorted by processor
05237 ErrorCode ParallelComm::get_interface_procs( std::set< unsigned int >& procs_set, bool get_buffs )
05238 {
05239     // Make sure the sharing procs vector is empty
05240     procs_set.clear();
05241 
05242     // Pre-load vector of single-proc tag values
05243     unsigned int i, j;
05244     std::vector< int > iface_proc( interfaceSets.size() );
05245     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), interfaceSets, &iface_proc[0] );MB_CHK_SET_ERR( result, "Failed to get iface_proc for iface sets" );
05246 
05247     // Get sharing procs either from single-proc vector or by getting
05248     // multi-proc tag value
05249     int tmp_iface_procs[MAX_SHARING_PROCS];
05250     std::fill( tmp_iface_procs, tmp_iface_procs + MAX_SHARING_PROCS, -1 );
05251     Range::iterator rit;
05252     for( rit = interfaceSets.begin(), i = 0; rit != interfaceSets.end(); ++rit, i++ )
05253     {
05254         if( -1 != iface_proc[i] )
05255         {
05256             assert( iface_proc[i] != (int)procConfig.proc_rank() );
05257             procs_set.insert( (unsigned int)iface_proc[i] );
05258         }
05259         else
05260         {
05261             // Get the sharing_procs tag
05262             result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, tmp_iface_procs );MB_CHK_SET_ERR( result, "Failed to get iface_procs for iface set" );
05263             for( j = 0; j < MAX_SHARING_PROCS; j++ )
05264             {
05265                 if( -1 != tmp_iface_procs[j] && tmp_iface_procs[j] != (int)procConfig.proc_rank() )
05266                     procs_set.insert( (unsigned int)tmp_iface_procs[j] );
05267                 else if( -1 == tmp_iface_procs[j] )
05268                 {
05269                     std::fill( tmp_iface_procs, tmp_iface_procs + j, -1 );
05270                     break;
05271                 }
05272             }
05273         }
05274     }
05275 
05276     if( get_buffs )
05277     {
05278         for( std::set< unsigned int >::iterator sit = procs_set.begin(); sit != procs_set.end(); ++sit )
05279             get_buffers( *sit );
05280     }
05281 
05282     return MB_SUCCESS;
05283 }
05284 
05285 ErrorCode ParallelComm::get_pstatus( EntityHandle entity, unsigned char& pstatus_val )
05286 {
05287     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), &entity, 1, &pstatus_val );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05288     return result;
05289 }
05290 
05291 ErrorCode ParallelComm::get_pstatus_entities( int dim, unsigned char pstatus_val, Range& pstatus_ents )
05292 {
05293     Range ents;
05294     ErrorCode result;
05295 
05296     if( -1 == dim )
05297     {
05298         result = mbImpl->get_entities_by_handle( 0, ents );MB_CHK_SET_ERR( result, "Failed to get all entities" );
05299     }
05300     else
05301     {
05302         result = mbImpl->get_entities_by_dimension( 0, dim, ents );MB_CHK_SET_ERR( result, "Failed to get entities of dimension " << dim );
05303     }
05304 
05305     std::vector< unsigned char > pstatus( ents.size() );
05306     result = mbImpl->tag_get_data( pstatus_tag(), ents, &pstatus[0] );MB_CHK_SET_ERR( result, "Failed to get pastatus tag data" );
05307     Range::iterator rit = ents.begin();
05308     int i               = 0;
05309     if( pstatus_val )
05310     {
05311         for( ; rit != ents.end(); i++, ++rit )
05312         {
05313             if( pstatus[i] & pstatus_val && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05314                 pstatus_ents.insert( *rit );
05315         }
05316     }
05317     else
05318     {
05319         for( ; rit != ents.end(); i++, ++rit )
05320         {
05321             if( !pstatus[i] && ( -1 == dim || mbImpl->dimension_from_handle( *rit ) == dim ) )
05322                 pstatus_ents.insert( *rit );
05323         }
05324     }
05325 
05326     return MB_SUCCESS;
05327 }
05328 
05329 ErrorCode ParallelComm::check_global_ids( EntityHandle this_set, const int dimension, const int start_id,
05330                                           const bool largest_dim_only, const bool parallel, const bool owned_only )
05331 {
05332     // Global id tag
05333     Tag gid_tag = mbImpl->globalId_tag();
05334     int def_val = -1;
05335     Range dum_range;
05336 
05337     void* tag_ptr    = &def_val;
05338     ErrorCode result = mbImpl->get_entities_by_type_and_tag( this_set, MBVERTEX, &gid_tag, &tag_ptr, 1, dum_range );MB_CHK_SET_ERR( result, "Failed to get entities by MBVERTEX type and gid tag" );
05339 
05340     if( !dum_range.empty() )
05341     {
05342         // Just created it, so we need global ids
05343         result = assign_global_ids( this_set, dimension, start_id, largest_dim_only, parallel, owned_only );MB_CHK_SET_ERR( result, "Failed assigning global ids" );
05344     }
05345 
05346     return MB_SUCCESS;
05347 }
05348 
05349 bool ParallelComm::is_iface_proc( EntityHandle this_set, int to_proc )
05350 {
05351     int sharing_procs[MAX_SHARING_PROCS];
05352     std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05353     ErrorCode result = mbImpl->tag_get_data( sharedp_tag(), &this_set, 1, sharing_procs );
05354     if( MB_SUCCESS == result && to_proc == sharing_procs[0] ) return true;
05355 
05356     result = mbImpl->tag_get_data( sharedps_tag(), &this_set, 1, sharing_procs );
05357     if( MB_SUCCESS != result ) return false;
05358 
05359     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
05360     {
05361         if( to_proc == sharing_procs[i] )
05362             return true;
05363         else if( -1 == sharing_procs[i] )
05364             return false;
05365     }
05366 
05367     return false;
05368 }
05369 
05370 ErrorCode ParallelComm::filter_pstatus( Range& ents, unsigned char pstat, unsigned char op, int to_proc,
05371                                         Range* returned_ents )
05372 {
05373     Range tmp_ents;
05374 
05375     // assert(!ents.empty());
05376     if( ents.empty() )
05377     {
05378         if( returned_ents ) returned_ents->clear();
05379         return MB_SUCCESS;
05380     }
05381 
05382     // Put into tmp_ents any entities which are not owned locally or
05383     // who are already shared with to_proc
05384     std::vector< unsigned char > shared_flags( ents.size() ), shared_flags2;
05385     ErrorCode result = mbImpl->tag_get_data( pstatus_tag(), ents, &shared_flags[0] );MB_CHK_SET_ERR( result, "Failed to get pstatus flag" );
05386     Range::const_iterator rit, hint = tmp_ents.begin();
05387     ;
05388     int i;
05389     if( op == PSTATUS_OR )
05390     {
05391         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05392         {
05393             if( ( ( shared_flags[i] & ~pstat ) ^ shared_flags[i] ) & pstat )
05394             {
05395                 hint = tmp_ents.insert( hint, *rit );
05396                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05397             }
05398         }
05399     }
05400     else if( op == PSTATUS_AND )
05401     {
05402         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05403         {
05404             if( ( shared_flags[i] & pstat ) == pstat )
05405             {
05406                 hint = tmp_ents.insert( hint, *rit );
05407                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05408             }
05409         }
05410     }
05411     else if( op == PSTATUS_NOT )
05412     {
05413         for( rit = ents.begin(), i = 0; rit != ents.end(); ++rit, i++ )
05414         {
05415             if( !( shared_flags[i] & pstat ) )
05416             {
05417                 hint = tmp_ents.insert( hint, *rit );
05418                 if( -1 != to_proc ) shared_flags2.push_back( shared_flags[i] );
05419             }
05420         }
05421     }
05422     else
05423     {
05424         assert( false );
05425         return MB_FAILURE;
05426     }
05427 
05428     if( -1 != to_proc )
05429     {
05430         int sharing_procs[MAX_SHARING_PROCS];
05431         std::fill( sharing_procs, sharing_procs + MAX_SHARING_PROCS, -1 );
05432         Range tmp_ents2;
05433         hint = tmp_ents2.begin();
05434 
05435         for( rit = tmp_ents.begin(), i = 0; rit != tmp_ents.end(); ++rit, i++ )
05436         {
05437             // We need to check sharing procs
05438             if( shared_flags2[i] & PSTATUS_MULTISHARED )
05439             {
05440                 result = mbImpl->tag_get_data( sharedps_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedps tag" );
05441                 assert( -1 != sharing_procs[0] );
05442                 for( unsigned int j = 0; j < MAX_SHARING_PROCS; j++ )
05443                 {
05444                     // If to_proc shares this entity, add it to list
05445                     if( sharing_procs[j] == to_proc ) { hint = tmp_ents2.insert( hint, *rit ); }
05446                     else if( -1 == sharing_procs[j] )
05447                         break;
05448 
05449                     sharing_procs[j] = -1;
05450                 }
05451             }
05452             else if( shared_flags2[i] & PSTATUS_SHARED )
05453             {
05454                 result = mbImpl->tag_get_data( sharedp_tag(), &( *rit ), 1, sharing_procs );MB_CHK_SET_ERR( result, "Failed to get sharedp tag" );
05455                 assert( -1 != sharing_procs[0] );
05456                 if( sharing_procs[0] == to_proc ) hint = tmp_ents2.insert( hint, *rit );
05457                 sharing_procs[0] = -1;
05458             }
05459             else
05460                 assert( "should never get here" && false );
05461         }
05462 
05463         tmp_ents.swap( tmp_ents2 );
05464     }
05465 
05466     if( returned_ents )
05467         returned_ents->swap( tmp_ents );
05468     else
05469         ents.swap( tmp_ents );
05470 
05471     return MB_SUCCESS;
05472 }
05473 
05474 ErrorCode ParallelComm::exchange_ghost_cells( int ghost_dim, int bridge_dim, int num_layers, int addl_ents,
05475                                               bool store_remote_handles, bool wait_all, EntityHandle* file_set )
05476 {
05477 #ifdef MOAB_HAVE_MPE
05478     if( myDebug->get_verbosity() == 2 )
05479     {
05480         if( !num_layers )
05481             MPE_Log_event( IFACE_START, procConfig.proc_rank(), "Starting interface exchange." );
05482         else
05483             MPE_Log_event( GHOST_START, procConfig.proc_rank(), "Starting ghost exchange." );
05484     }
05485 #endif
05486 
05487     myDebug->tprintf( 1, "Entering exchange_ghost_cells with num_layers = %d\n", num_layers );
05488     if( myDebug->get_verbosity() == 4 )
05489     {
05490         msgs.clear();
05491         msgs.reserve( MAX_SHARING_PROCS );
05492     }
05493 
05494     // If we're only finding out about existing ents, we have to be storing
05495     // remote handles too
05496     assert( num_layers > 0 || store_remote_handles );
05497 
05498     const bool is_iface = !num_layers;
05499 
05500     // Get the b-dimensional interface(s) with with_proc, where b = bridge_dim
05501 
05502     int success;
05503     ErrorCode result = MB_SUCCESS;
05504     int incoming1 = 0, incoming2 = 0;
05505 
05506     reset_all_buffers();
05507 
05508     // When this function is called, buffProcs should already have any
05509     // communicating procs
05510 
05511     //===========================================
05512     // Post ghost irecv's for ghost entities from all communicating procs
05513     //===========================================
05514 #ifdef MOAB_HAVE_MPE
05515     if( myDebug->get_verbosity() == 2 )
05516     {
05517         MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." );
05518     }
05519 #endif
05520 
05521     // Index reqs the same as buffer/sharing procs indices
05522     std::vector< MPI_Request > recv_ent_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL ),
05523         recv_remoteh_reqs( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05524     std::vector< unsigned int >::iterator proc_it;
05525     int ind, p;
05526     sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05527     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
05528     {
05529         incoming1++;
05530         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
05531                            MB_MESG_ENTS_SIZE, incoming1 );
05532         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
05533                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recv_ent_reqs[3 * ind] );
05534         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in ghost exchange" ); }
05535     }
05536 
05537     //===========================================
05538     // Get entities to be sent to neighbors
05539     //===========================================
05540     Range sent_ents[MAX_SHARING_PROCS], allsent, tmp_range;
05541     TupleList entprocs;
05542     int dum_ack_buff;
05543     result = get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents, allsent, entprocs );MB_CHK_SET_ERR( result, "get_sent_ents failed" );
05544 
05545     // augment file set with the entities to be sent
05546     // we might have created new entities if addl_ents>0, edges and/or faces
05547     if( addl_ents > 0 && file_set && !allsent.empty() )
05548     {
05549         result = mbImpl->add_entities( *file_set, allsent );MB_CHK_SET_ERR( result, "Failed to add new sub-entities to set" );
05550     }
05551     myDebug->tprintf( 1, "allsent ents compactness (size) = %f (%lu)\n", allsent.compactness(),
05552                       (unsigned long)allsent.size() );
05553 
05554     //===========================================
05555     // Pack and send ents from this proc to others
05556     //===========================================
05557     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05558     {
05559         myDebug->tprintf( 1, "Sent ents compactness (size) = %f (%lu)\n", sent_ents[p].compactness(),
05560                           (unsigned long)sent_ents[p].size() );
05561 
05562         // Reserve space on front for size and for initial buff size
05563         localOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05564 
05565         // Entities
05566         result = pack_entities( sent_ents[p], localOwnedBuffs[p], store_remote_handles, buffProcs[p], is_iface,
05567                                 &entprocs, &allsent );MB_CHK_SET_ERR( result, "Packing entities failed" );
05568 
05569         if( myDebug->get_verbosity() == 4 )
05570         {
05571             msgs.resize( msgs.size() + 1 );
05572             msgs.back() = new Buffer( *localOwnedBuffs[p] );
05573         }
05574 
05575         // Send the buffer (size stored in front in send_buffer)
05576         result = send_buffer( *proc_it, localOwnedBuffs[p], MB_MESG_ENTS_SIZE, sendReqs[3 * p],
05577                               recv_ent_reqs[3 * p + 2], &dum_ack_buff, incoming1, MB_MESG_REMOTEH_SIZE,
05578                               ( !is_iface && store_remote_handles ?  // this used for ghosting only
05579                                     localOwnedBuffs[p]
05580                                                                   : NULL ),
05581                               &recv_remoteh_reqs[3 * p], &incoming2 );MB_CHK_SET_ERR( result, "Failed to Isend in ghost exchange" );
05582     }
05583 
05584     entprocs.reset();
05585 
05586     //===========================================
05587     // Receive/unpack new entities
05588     //===========================================
05589     // Number of incoming messages for ghosts is the number of procs we
05590     // communicate with; for iface, it's the number of those with lower rank
05591     MPI_Status status;
05592     std::vector< std::vector< EntityHandle > > recd_ents( buffProcs.size() );
05593     std::vector< std::vector< EntityHandle > > L1hloc( buffProcs.size() ), L1hrem( buffProcs.size() );
05594     std::vector< std::vector< int > > L1p( buffProcs.size() );
05595     std::vector< EntityHandle > L2hloc, L2hrem;
05596     std::vector< unsigned int > L2p;
05597     std::vector< EntityHandle > new_ents;
05598 
05599     while( incoming1 )
05600     {
05601         // Wait for all recvs of ghost ents before proceeding to sending remote handles,
05602         // b/c some procs may have sent to a 3rd proc ents owned by me;
05603         PRINT_DEBUG_WAITANY( recv_ent_reqs, MB_MESG_ENTS_SIZE, procConfig.proc_rank() );
05604 
05605         success = MPI_Waitany( 3 * buffProcs.size(), &recv_ent_reqs[0], &ind, &status );
05606         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
05607 
05608         PRINT_DEBUG_RECD( status );
05609 
05610         // OK, received something; decrement incoming counter
05611         incoming1--;
05612         bool done = false;
05613 
05614         // In case ind is for ack, we need index of one before it
05615         unsigned int base_ind = 3 * ( ind / 3 );
05616         result = recv_buffer( MB_MESG_ENTS_SIZE, status, remoteOwnedBuffs[ind / 3], recv_ent_reqs[base_ind + 1],
05617                               recv_ent_reqs[base_ind + 2], incoming1, localOwnedBuffs[ind / 3], sendReqs[base_ind + 1],
05618                               sendReqs[base_ind + 2], done,
05619                               ( !is_iface && store_remote_handles ? localOwnedBuffs[ind / 3] : NULL ),
05620                               MB_MESG_REMOTEH_SIZE,  // maybe base_ind+1?
05621                               &recv_remoteh_reqs[base_ind + 1], &incoming2 );MB_CHK_SET_ERR( result, "Failed to receive buffer" );
05622 
05623         if( done )
05624         {
05625             if( myDebug->get_verbosity() == 4 )
05626             {
05627                 msgs.resize( msgs.size() + 1 );
05628                 msgs.back() = new Buffer( *remoteOwnedBuffs[ind / 3] );
05629             }
05630 
05631             // Message completely received - process buffer that was sent
05632             remoteOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
05633             result = unpack_entities( remoteOwnedBuffs[ind / 3]->buff_ptr, store_remote_handles, ind / 3, is_iface,
05634                                       L1hloc, L1hrem, L1p, L2hloc, L2hrem, L2p, new_ents );
05635             if( MB_SUCCESS != result )
05636             {
05637                 std::cout << "Failed to unpack entities. Buffer contents:" << std::endl;
05638                 print_buffer( remoteOwnedBuffs[ind / 3]->mem_ptr, MB_MESG_ENTS_SIZE, buffProcs[ind / 3], false );
05639                 return result;
05640             }
05641 
05642             if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05643             {
05644                 // Post irecv's for remote handles from new proc; shouldn't be iface,
05645                 // since we know about all procs we share with
05646                 assert( !is_iface );
05647                 recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05648                 for( unsigned int i = recv_ent_reqs.size(); i < 3 * buffProcs.size(); i += 3 )
05649                 {
05650                     localOwnedBuffs[i / 3]->reset_buffer();
05651                     incoming2++;
05652                     PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[i / 3], localOwnedBuffs[i / 3]->mem_ptr,
05653                                        INITIAL_BUFF_SIZE, MB_MESG_REMOTEH_SIZE, incoming2 );
05654                     success = MPI_Irecv( localOwnedBuffs[i / 3]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR,
05655                                          buffProcs[i / 3], MB_MESG_REMOTEH_SIZE, procConfig.proc_comm(),
05656                                          &recv_remoteh_reqs[i] );
05657                     if( success != MPI_SUCCESS )
05658                     {
05659                         MB_SET_ERR( MB_FAILURE, "Failed to post irecv for remote handles in ghost exchange" );
05660                     }
05661                 }
05662                 recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05663                 sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
05664             }
05665         }
05666     }
05667 
05668     // Add requests for any new addl procs
05669     if( recv_ent_reqs.size() != 3 * buffProcs.size() )
05670     {
05671         // Shouldn't get here...
05672         MB_SET_ERR( MB_FAILURE, "Requests length doesn't match proc count in ghost exchange" );
05673     }
05674 
05675 #ifdef MOAB_HAVE_MPE
05676     if( myDebug->get_verbosity() == 2 )
05677     {
05678         MPE_Log_event( ENTITIES_END, procConfig.proc_rank(), "Ending entity exchange." );
05679     }
05680 #endif
05681 
05682     if( is_iface )
05683     {
05684         // Need to check over entities I sent and make sure I received
05685         // handles for them from all expected procs; if not, need to clean
05686         // them up
05687         result = check_clean_iface( allsent );
05688         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05689 
05690         // Now set the shared/interface tag on non-vertex entities on interface
05691         result = tag_iface_entities();MB_CHK_SET_ERR( result, "Failed to tag iface entities" );
05692 
05693 #ifndef NDEBUG
05694         result = check_sent_ents( allsent );
05695         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05696         result = check_all_shared_handles( true );
05697         if( MB_SUCCESS != result ) std::cout << "Failed check." << std::endl;
05698 #endif
05699 
05700 #ifdef MOAB_HAVE_MPE
05701         if( myDebug->get_verbosity() == 2 )
05702         {
05703             MPE_Log_event( IFACE_END, procConfig.proc_rank(), "Ending interface exchange." );
05704         }
05705 #endif
05706 
05707         //===========================================
05708         // Wait if requested
05709         //===========================================
05710         if( wait_all )
05711         {
05712             if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05713             else
05714             {
05715                 MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05716                 success = MPI_Waitall( 3 * buffProcs.size(), &recv_ent_reqs[0], mult_status );
05717                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05718                 success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05719                 if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05720                 /*success = MPI_Waitall(3*buffProcs.size(), &recv_remoteh_reqs[0], mult_status);
05721                 if (MPI_SUCCESS != success) {
05722                   MB_SET_ERR(MB_FAILURE, "Failed in waitall in ghost exchange");
05723                 }*/
05724             }
05725         }
05726 
05727         myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
05728         myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==true \n" );
05729 
05730         return MB_SUCCESS;
05731     }
05732 
05733     // we still need to wait on sendReqs, if they are not fulfilled yet
05734     if( wait_all )
05735     {
05736         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05737         else
05738         {
05739             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05740             success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05741             if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05742         }
05743     }
05744     //===========================================
05745     // Send local handles for new ghosts to owner, then add
05746     // those to ghost list for that owner
05747     //===========================================
05748     for( p = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, p++ )
05749     {
05750 
05751         // Reserve space on front for size and for initial buff size
05752         remoteOwnedBuffs[p]->reset_buffer( sizeof( int ) );
05753 
05754         result = pack_remote_handles( L1hloc[p], L1hrem[p], L1p[p], *proc_it, remoteOwnedBuffs[p] );MB_CHK_SET_ERR( result, "Failed to pack remote handles" );
05755         remoteOwnedBuffs[p]->set_stored_size();
05756 
05757         if( myDebug->get_verbosity() == 4 )
05758         {
05759             msgs.resize( msgs.size() + 1 );
05760             msgs.back() = new Buffer( *remoteOwnedBuffs[p] );
05761         }
05762         result = send_buffer( buffProcs[p], remoteOwnedBuffs[p], MB_MESG_REMOTEH_SIZE, sendReqs[3 * p],
05763                               recv_remoteh_reqs[3 * p + 2], &dum_ack_buff, incoming2 );MB_CHK_SET_ERR( result, "Failed to send remote handles" );
05764     }
05765 
05766     //===========================================
05767     // Process remote handles of my ghosteds
05768     //===========================================
05769     while( incoming2 )
05770     {
05771         PRINT_DEBUG_WAITANY( recv_remoteh_reqs, MB_MESG_REMOTEH_SIZE, procConfig.proc_rank() );
05772         success = MPI_Waitany( 3 * buffProcs.size(), &recv_remoteh_reqs[0], &ind, &status );
05773         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitany in ghost exchange" ); }
05774 
05775         // OK, received something; decrement incoming counter
05776         incoming2--;
05777 
05778         PRINT_DEBUG_RECD( status );
05779 
05780         bool done             = false;
05781         unsigned int base_ind = 3 * ( ind / 3 );
05782         result = recv_buffer( MB_MESG_REMOTEH_SIZE, status, localOwnedBuffs[ind / 3], recv_remoteh_reqs[base_ind + 1],
05783                               recv_remoteh_reqs[base_ind + 2], incoming2, remoteOwnedBuffs[ind / 3],
05784                               sendReqs[base_ind + 1], sendReqs[base_ind + 2], done );MB_CHK_SET_ERR( result, "Failed to receive remote handles" );
05785         if( done )
05786         {
05787             // Incoming remote handles
05788             if( myDebug->get_verbosity() == 4 )
05789             {
05790                 msgs.resize( msgs.size() + 1 );
05791                 msgs.back() = new Buffer( *localOwnedBuffs[ind / 3] );
05792             }
05793             localOwnedBuffs[ind / 3]->reset_ptr( sizeof( int ) );
05794             result =
05795                 unpack_remote_handles( buffProcs[ind / 3], localOwnedBuffs[ind / 3]->buff_ptr, L2hloc, L2hrem, L2p );MB_CHK_SET_ERR( result, "Failed to unpack remote handles" );
05796         }
05797     }
05798 
05799 #ifdef MOAB_HAVE_MPE
05800     if( myDebug->get_verbosity() == 2 )
05801     {
05802         MPE_Log_event( RHANDLES_END, procConfig.proc_rank(), "Ending remote handles." );
05803         MPE_Log_event( GHOST_END, procConfig.proc_rank(), "Ending ghost exchange (still doing checks)." );
05804     }
05805 #endif
05806 
05807     //===========================================
05808     // Wait if requested
05809     //===========================================
05810     if( wait_all )
05811     {
05812         if( myDebug->get_verbosity() == 5 ) { success = MPI_Barrier( procConfig.proc_comm() ); }
05813         else
05814         {
05815             MPI_Status mult_status[3 * MAX_SHARING_PROCS];
05816             success = MPI_Waitall( 3 * buffProcs.size(), &recv_remoteh_reqs[0], mult_status );
05817             if( MPI_SUCCESS == success ) success = MPI_Waitall( 3 * buffProcs.size(), &sendReqs[0], mult_status );
05818         }
05819         if( MPI_SUCCESS != success ) { MB_SET_ERR( MB_FAILURE, "Failed in waitall in ghost exchange" ); }
05820     }
05821 
05822 #ifndef NDEBUG
05823     result = check_sent_ents( allsent );MB_CHK_SET_ERR( result, "Failed check on shared entities" );
05824     result = check_all_shared_handles( true );MB_CHK_SET_ERR( result, "Failed check on all shared handles" );
05825 #endif
05826 
05827     if( file_set && !new_ents.empty() )
05828     {
05829         result = mbImpl->add_entities( *file_set, &new_ents[0], new_ents.size() );MB_CHK_SET_ERR( result, "Failed to add new entities to set" );
05830     }
05831 
05832     myDebug->tprintf( 1, "Total number of shared entities = %lu.\n", (unsigned long)sharedEnts.size() );
05833     myDebug->tprintf( 1, "Exiting exchange_ghost_cells for is_iface==false \n" );
05834 
05835     return MB_SUCCESS;
05836 }
05837 
05838 ErrorCode ParallelComm::send_buffer( const unsigned int to_proc, Buffer* send_buff, int mesg_tag, MPI_Request& send_req,
05839                                      MPI_Request& ack_req, int* ack_buff, int& this_incoming, int next_mesg_tag,
05840                                      Buffer* next_recv_buff, MPI_Request* next_recv_req, int* next_incoming )
05841 {
05842     ErrorCode result = MB_SUCCESS;
05843     int success;
05844 
05845     // If small message, post recv for remote handle message
05846     if( send_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE && next_recv_buff )
05847     {
05848         ( *next_incoming )++;
05849         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, next_mesg_tag,
05850                            *next_incoming );
05851         success = MPI_Irecv( next_recv_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, to_proc, next_mesg_tag,
05852                              procConfig.proc_comm(), next_recv_req );
05853         if( success != MPI_SUCCESS )
05854         {
05855             MB_SET_ERR( MB_FAILURE, "Failed to post irecv for next message in ghost exchange" );
05856         }
05857     }
05858     // If large, we'll need an ack before sending the rest
05859     else if( send_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
05860     {
05861         this_incoming++;
05862         PRINT_DEBUG_IRECV( procConfig.proc_rank(), to_proc, (unsigned char*)ack_buff, sizeof( int ), mesg_tag - 1,
05863                            this_incoming );
05864         success = MPI_Irecv( (void*)ack_buff, sizeof( int ), MPI_UNSIGNED_CHAR, to_proc, mesg_tag - 1,
05865                              procConfig.proc_comm(), &ack_req );
05866         if( success != MPI_SUCCESS )
05867         {
05868             MB_SET_ERR( MB_FAILURE, "Failed to post irecv for entity ack in ghost exchange" );
05869         }
05870     }
05871 
05872     // Send the buffer
05873     PRINT_DEBUG_ISEND( procConfig.proc_rank(), to_proc, send_buff->mem_ptr, mesg_tag,
05874                        std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ) );
05875     assert( 0 <= send_buff->get_stored_size() && send_buff->get_stored_size() <= (int)send_buff->alloc_size );
05876     success = MPI_Isend( send_buff->mem_ptr, std::min( send_buff->get_stored_size(), (int)INITIAL_BUFF_SIZE ),
05877                          MPI_UNSIGNED_CHAR, to_proc, mesg_tag, procConfig.proc_comm(), &send_req );
05878     if( success != MPI_SUCCESS ) return MB_FAILURE;
05879 
05880     return result;
05881 }
05882 
05883 ErrorCode ParallelComm::recv_buffer( int mesg_tag_expected, const MPI_Status& mpi_status, Buffer* recv_buff,
05884                                      MPI_Request& recv_req, MPI_Request& /*ack_recvd_req*/, int& this_incoming,
05885                                      Buffer* send_buff, MPI_Request& send_req, MPI_Request& sent_ack_req, bool& done,
05886                                      Buffer* next_buff, int next_tag, MPI_Request* next_req, int* next_incoming )
05887 {
05888     // Process a received message; if there will be more coming,
05889     // post a receive for 2nd part then send an ack message
05890     int from_proc = mpi_status.MPI_SOURCE;
05891     int success;
05892 
05893     // Set the buff_ptr on the recv_buffer; needs to point beyond any
05894     // valid data already in the buffer
05895     recv_buff->reset_ptr( std::min( recv_buff->get_stored_size(), (int)recv_buff->alloc_size ) );
05896 
05897     if( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() > (int)INITIAL_BUFF_SIZE )
05898     {
05899         // 1st message & large - allocate buffer, post irecv for 2nd message,
05900         // then send ack
05901         recv_buff->reserve( recv_buff->get_stored_size() );
05902         assert( recv_buff->alloc_size > INITIAL_BUFF_SIZE );
05903 
05904         // Will expect a 2nd message
05905         this_incoming++;
05906 
05907         PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr + INITIAL_BUFF_SIZE,
05908                            recv_buff->get_stored_size() - INITIAL_BUFF_SIZE, mesg_tag_expected + 1, this_incoming );
05909         success = MPI_Irecv( recv_buff->mem_ptr + INITIAL_BUFF_SIZE, recv_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05910                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &recv_req );
05911         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post 2nd iRecv in ghost exchange" ); }
05912 
05913         // Send ack, doesn't matter what data actually is
05914         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, recv_buff->mem_ptr, mesg_tag_expected - 1,
05915                            sizeof( int ) );
05916         success = MPI_Isend( recv_buff->mem_ptr, sizeof( int ), MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected - 1,
05917                              procConfig.proc_comm(), &sent_ack_req );
05918         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send ack in ghost exchange" ); }
05919     }
05920     else if( mpi_status.MPI_TAG == mesg_tag_expected - 1 )
05921     {
05922         // Got an ack back, send the 2nd half of message
05923 
05924         // Should be a large message if we got this
05925         assert( *( (size_t*)send_buff->mem_ptr ) > INITIAL_BUFF_SIZE );
05926 
05927         // Post irecv for next message, then send 2nd message
05928         if( next_buff )
05929         {
05930             // We'll expect a return message
05931             ( *next_incoming )++;
05932             PRINT_DEBUG_IRECV( procConfig.proc_rank(), from_proc, next_buff->mem_ptr, INITIAL_BUFF_SIZE, next_tag,
05933                                *next_incoming );
05934 
05935             success = MPI_Irecv( next_buff->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, from_proc, next_tag,
05936                                  procConfig.proc_comm(), next_req );
05937             if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post next irecv in ghost exchange" ); }
05938         }
05939 
05940         // Send 2nd message
05941         PRINT_DEBUG_ISEND( procConfig.proc_rank(), from_proc, send_buff->mem_ptr + INITIAL_BUFF_SIZE,
05942                            mesg_tag_expected + 1, send_buff->get_stored_size() - INITIAL_BUFF_SIZE );
05943 
05944         assert( send_buff->get_stored_size() - INITIAL_BUFF_SIZE < send_buff->alloc_size &&
05945                 0 <= send_buff->get_stored_size() );
05946         success = MPI_Isend( send_buff->mem_ptr + INITIAL_BUFF_SIZE, send_buff->get_stored_size() - INITIAL_BUFF_SIZE,
05947                              MPI_UNSIGNED_CHAR, from_proc, mesg_tag_expected + 1, procConfig.proc_comm(), &send_req );
05948         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to send 2nd message in ghost exchange" ); }
05949     }
05950     else if( ( mpi_status.MPI_TAG == mesg_tag_expected && recv_buff->get_stored_size() <= (int)INITIAL_BUFF_SIZE ) ||
05951              mpi_status.MPI_TAG == mesg_tag_expected + 1 )
05952     {
05953         // Message completely received - signal that we're done
05954         done = true;
05955     }
05956 
05957     return MB_SUCCESS;
05958 }
05959 
05960 struct ProcList
05961 {
05962     int procs[MAX_SHARING_PROCS];
05963 };
05964 static bool operator<( const ProcList& a, const ProcList& b )
05965 {
05966     for( int i = 0; i < MAX_SHARING_PROCS; i++ )
05967     {
05968         if( a.procs[i] < b.procs[i] )
05969             return true;
05970         else if( b.procs[i] < a.procs[i] )
05971             return false;
05972         else if( a.procs[i] < 0 )
05973             return false;
05974     }
05975     return false;
05976 }
05977 
05978 ErrorCode ParallelComm::check_clean_iface( Range& allsent )
05979 {
05980     // allsent is all entities I think are on interface; go over them, looking
05981     // for zero-valued handles, and fix any I find
05982 
05983     // Keep lists of entities for which teh sharing data changed, grouped
05984     // by set of sharing procs.
05985     typedef std::map< ProcList, Range > procmap_t;
05986     procmap_t old_procs, new_procs;
05987 
05988     ErrorCode result = MB_SUCCESS;
05989     Range::iterator rit;
05990     Range::reverse_iterator rvit;
05991     unsigned char pstatus;
05992     int nump;
05993     ProcList sharedp;
05994     EntityHandle sharedh[MAX_SHARING_PROCS];
05995     for( rvit = allsent.rbegin(); rvit != allsent.rend(); ++rvit )
05996     {
05997         result = get_sharing_data( *rvit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data" );
05998         assert( "Should be shared with at least one other proc" &&
05999                 ( nump > 1 || sharedp.procs[0] != (int)procConfig.proc_rank() ) );
06000         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
06001 
06002         // Look for first null handle in list
06003         int idx = std::find( sharedh, sharedh + nump, (EntityHandle)0 ) - sharedh;
06004         if( idx == nump ) continue;  // All handles are valid
06005 
06006         ProcList old_list( sharedp );
06007         std::sort( old_list.procs, old_list.procs + nump );
06008         old_procs[old_list].insert( *rvit );
06009 
06010         // Remove null handles and corresponding proc ranks from lists
06011         int new_nump       = idx;
06012         bool removed_owner = !idx;
06013         for( ++idx; idx < nump; ++idx )
06014         {
06015             if( sharedh[idx] )
06016             {
06017                 sharedh[new_nump]       = sharedh[idx];
06018                 sharedp.procs[new_nump] = sharedp.procs[idx];
06019                 ++new_nump;
06020             }
06021         }
06022         sharedp.procs[new_nump] = -1;
06023 
06024         if( removed_owner && new_nump > 1 )
06025         {
06026             // The proc that we choose as the entity owner isn't sharing the
06027             // entity (doesn't have a copy of it). We need to pick a different
06028             // owner. Choose the proc with lowest rank.
06029             idx = std::min_element( sharedp.procs, sharedp.procs + new_nump ) - sharedp.procs;
06030             std::swap( sharedp.procs[0], sharedp.procs[idx] );
06031             std::swap( sharedh[0], sharedh[idx] );
06032             if( sharedp.procs[0] == (int)proc_config().proc_rank() ) pstatus &= ~PSTATUS_NOT_OWNED;
06033         }
06034 
06035         result = set_sharing_data( *rvit, pstatus, nump, new_nump, sharedp.procs, sharedh );MB_CHK_SET_ERR( result, "Failed to set sharing data in check_clean_iface" );
06036 
06037         if( new_nump > 1 )
06038         {
06039             if( new_nump == 2 )
06040             {
06041                 if( sharedp.procs[1] != (int)proc_config().proc_rank() )
06042                 {
06043                     assert( sharedp.procs[0] == (int)proc_config().proc_rank() );
06044                     sharedp.procs[0] = sharedp.procs[1];
06045                 }
06046                 sharedp.procs[1] = -1;
06047             }
06048             else
06049             {
06050                 std::sort( sharedp.procs, sharedp.procs + new_nump );
06051             }
06052             new_procs[sharedp].insert( *rvit );
06053         }
06054     }
06055 
06056     if( old_procs.empty() )
06057     {
06058         assert( new_procs.empty() );
06059         return MB_SUCCESS;
06060     }
06061 
06062     // Update interface sets
06063     procmap_t::iterator pmit;
06064     // std::vector<unsigned char> pstatus_list;
06065     rit = interface_sets().begin();
06066     while( rit != interface_sets().end() )
06067     {
06068         result = get_sharing_data( *rit, sharedp.procs, sharedh, pstatus, nump );MB_CHK_SET_ERR( result, "Failed to get sharing data for interface set" );
06069         assert( nump != 2 );
06070         std::sort( sharedp.procs, sharedp.procs + nump );
06071         assert( nump == MAX_SHARING_PROCS || sharedp.procs[nump] == -1 );
06072 
06073         pmit = old_procs.find( sharedp );
06074         if( pmit != old_procs.end() )
06075         {
06076             result = mbImpl->remove_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to remove entities from interface set" );
06077         }
06078 
06079         pmit = new_procs.find( sharedp );
06080         if( pmit == new_procs.end() )
06081         {
06082             int count;
06083             result = mbImpl->get_number_entities_by_handle( *rit, count );MB_CHK_SET_ERR( result, "Failed to get number of entities in interface set" );
06084             if( !count )
06085             {
06086                 result = mbImpl->delete_entities( &*rit, 1 );MB_CHK_SET_ERR( result, "Failed to delete entities from interface set" );
06087                 rit = interface_sets().erase( rit );
06088             }
06089             else
06090             {
06091                 ++rit;
06092             }
06093         }
06094         else
06095         {
06096             result = mbImpl->add_entities( *rit, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06097 
06098             // Remove those that we've processed so that we know which ones
06099             // are new.
06100             new_procs.erase( pmit );
06101             ++rit;
06102         }
06103     }
06104 
06105     // Create interface sets for new proc id combinations
06106     std::fill( sharedh, sharedh + MAX_SHARING_PROCS, 0 );
06107     for( pmit = new_procs.begin(); pmit != new_procs.end(); ++pmit )
06108     {
06109         EntityHandle new_set;
06110         result = mbImpl->create_meshset( MESHSET_SET, new_set );MB_CHK_SET_ERR( result, "Failed to create interface set" );
06111         interfaceSets.insert( new_set );
06112 
06113         // Add entities
06114         result = mbImpl->add_entities( new_set, pmit->second );MB_CHK_SET_ERR( result, "Failed to add entities to interface set" );
06115         // Tag set with the proc rank(s)
06116         assert( pmit->first.procs[0] >= 0 );
06117         pstatus = PSTATUS_SHARED | PSTATUS_INTERFACE;
06118         if( pmit->first.procs[1] == -1 )
06119         {
06120             int other = pmit->first.procs[0];
06121             assert( other != (int)procConfig.proc_rank() );
06122             result = mbImpl->tag_set_data( sharedp_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06123             sharedh[0] = 0;
06124             result     = mbImpl->tag_set_data( sharedh_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06125             if( other < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06126         }
06127         else
06128         {
06129             result = mbImpl->tag_set_data( sharedps_tag(), &new_set, 1, pmit->first.procs );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06130             result = mbImpl->tag_set_data( sharedhs_tag(), &new_set, 1, sharedh );MB_CHK_SET_ERR( result, "Failed to tag interface set with procs" );
06131             pstatus |= PSTATUS_MULTISHARED;
06132             if( pmit->first.procs[0] < (int)proc_config().proc_rank() ) pstatus |= PSTATUS_NOT_OWNED;
06133         }
06134 
06135         result = mbImpl->tag_set_data( pstatus_tag(), &new_set, 1, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface set with pstatus" );
06136 
06137         // Set pstatus on all interface entities in set
06138         result = mbImpl->tag_clear_data( pstatus_tag(), pmit->second, &pstatus );MB_CHK_SET_ERR( result, "Failed to tag interface entities with pstatus" );
06139     }
06140 
06141     return MB_SUCCESS;
06142 }
06143 
06144 ErrorCode ParallelComm::set_sharing_data( EntityHandle ent, unsigned char pstatus, int old_nump, int new_nump, int* ps,
06145                                           EntityHandle* hs )
06146 {
06147     // If new nump is less than 3, the entity is no longer mutishared
06148     if( old_nump > 2 && ( pstatus & PSTATUS_MULTISHARED ) && new_nump < 3 )
06149     {
06150         // Unset multishared flag
06151         pstatus ^= PSTATUS_MULTISHARED;
06152     }
06153 
06154     // Check for consistency in input data
06155     // DBG
06156     /*  bool con1 = ((new_nump == 2 && pstatus&PSTATUS_SHARED && !(pstatus&PSTATUS_MULTISHARED)) ||
06157       (new_nump > 2 && pstatus&PSTATUS_SHARED && pstatus&PSTATUS_MULTISHARED)); bool con2 =
06158       (!(pstatus&PSTATUS_GHOST) || pstatus&PSTATUS_SHARED); bool con3 = (new_nump < 3 ||
06159       (pstatus&PSTATUS_NOT_OWNED && ps[0] != (int)rank()) || (!(pstatus&PSTATUS_NOT_OWNED) && ps[0]
06160       == (int)rank())); std::cout<<"current rank = "<<rank()<<std::endl; std::cout<<"condition
06161       1::"<<con1<<std::endl; std::cout<<"condition 2::"<<con2<<std::endl; std::cout<<"condition
06162       3::"<<con3<<std::endl;*/
06163 
06164     // DBG
06165 
06166     assert( new_nump > 1 &&
06167             ( ( new_nump == 2 && pstatus & PSTATUS_SHARED &&
06168                 !( pstatus & PSTATUS_MULTISHARED ) ) ||  // If <= 2 must not be multishared
06169               ( new_nump > 2 && pstatus & PSTATUS_SHARED &&
06170                 pstatus & PSTATUS_MULTISHARED ) ) &&                         // If > 2 procs, must be multishared
06171             ( !( pstatus & PSTATUS_GHOST ) || pstatus & PSTATUS_SHARED ) &&  // If ghost, it must also be shared
06172             ( new_nump < 3 ||
06173               ( pstatus & PSTATUS_NOT_OWNED && ps[0] != (int)rank() ) ||      // I'm not owner and first proc not me
06174               ( !( pstatus & PSTATUS_NOT_OWNED ) && ps[0] == (int)rank() ) )  // I'm owner and first proc is me
06175     );
06176 
06177 #ifndef NDEBUG
06178     {
06179         // Check for duplicates in proc list
06180         std::set< unsigned int > dumprocs;
06181         int dp = 0;
06182         for( ; dp < old_nump && -1 != ps[dp]; dp++ )
06183             dumprocs.insert( ps[dp] );
06184         assert( dp == (int)dumprocs.size() );
06185     }
06186 #endif
06187 
06188     ErrorCode result;
06189     // Reset any old data that needs to be
06190     if( old_nump > 2 && new_nump < 3 )
06191     {
06192         // Need to remove multishared tags
06193         result = mbImpl->tag_delete_data( sharedps_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:1" );
06194         result = mbImpl->tag_delete_data( sharedhs_tag(), &ent, 1 );MB_CHK_SET_ERR( result, "set_sharing_data:2" );
06195         //    if (new_nump < 2)
06196         //      pstatus = 0x0;
06197         //    else if (ps[0] != (int)proc_config().proc_rank())
06198         //      pstatus |= PSTATUS_NOT_OWNED;
06199     }
06200     else if( ( old_nump < 3 && new_nump > 2 ) || ( old_nump > 1 && new_nump == 1 ) )
06201     {
06202         // Reset sharedp and sharedh tags
06203         int tmp_p          = -1;
06204         EntityHandle tmp_h = 0;
06205         result             = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, &tmp_p );MB_CHK_SET_ERR( result, "set_sharing_data:3" );
06206         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, &tmp_h );MB_CHK_SET_ERR( result, "set_sharing_data:4" );
06207     }
06208 
06209     assert( "check for multishared/owner I'm first proc" &&
06210             ( !( pstatus & PSTATUS_MULTISHARED ) || ( pstatus & ( PSTATUS_NOT_OWNED | PSTATUS_GHOST ) ) ||
06211               ( ps[0] == (int)rank() ) ) &&
06212             "interface entities should have > 1 proc" && ( !( pstatus & PSTATUS_INTERFACE ) || new_nump > 1 ) &&
06213             "ghost entities should have > 1 proc" && ( !( pstatus & PSTATUS_GHOST ) || new_nump > 1 ) );
06214 
06215     // Now set new data
06216     if( new_nump > 2 )
06217     {
06218         result = mbImpl->tag_set_data( sharedps_tag(), &ent, 1, ps );MB_CHK_SET_ERR( result, "set_sharing_data:5" );
06219         result = mbImpl->tag_set_data( sharedhs_tag(), &ent, 1, hs );MB_CHK_SET_ERR( result, "set_sharing_data:6" );
06220     }
06221     else
06222     {
06223         unsigned int j = ( ps[0] == (int)procConfig.proc_rank() ? 1 : 0 );
06224         assert( -1 != ps[j] );
06225         result = mbImpl->tag_set_data( sharedp_tag(), &ent, 1, ps + j );MB_CHK_SET_ERR( result, "set_sharing_data:7" );
06226         result = mbImpl->tag_set_data( sharedh_tag(), &ent, 1, hs + j );MB_CHK_SET_ERR( result, "set_sharing_data:8" );
06227     }
06228 
06229     result = mbImpl->tag_set_data( pstatus_tag(), &ent, 1, &pstatus );MB_CHK_SET_ERR( result, "set_sharing_data:9" );
06230 
06231     if( old_nump > 1 && new_nump < 2 ) sharedEnts.erase( ent );
06232 
06233     return result;
06234 }
06235 
06236 ErrorCode ParallelComm::get_sent_ents( const bool is_iface, const int bridge_dim, const int ghost_dim,
06237                                        const int num_layers, const int addl_ents, Range* sent_ents, Range& allsent,
06238                                        TupleList& entprocs )
06239 {
06240     ErrorCode result;
06241     unsigned int ind;
06242     std::vector< unsigned int >::iterator proc_it;
06243     Range tmp_range;
06244 
06245     // Done in a separate loop over procs because sometimes later procs
06246     // need to add info to earlier procs' messages
06247     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06248     {
06249         if( !is_iface )
06250         {
06251             result =
06252                 get_ghosted_entities( bridge_dim, ghost_dim, buffProcs[ind], num_layers, addl_ents, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get ghost layers" );
06253         }
06254         else
06255         {
06256             result = get_iface_entities( buffProcs[ind], -1, sent_ents[ind] );MB_CHK_SET_ERR( result, "Failed to get interface layers" );
06257         }
06258 
06259         // Filter out entities already shared with destination
06260         tmp_range.clear();
06261         result = filter_pstatus( sent_ents[ind], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06262         if( !tmp_range.empty() ) sent_ents[ind] = subtract( sent_ents[ind], tmp_range );
06263 
06264         allsent.merge( sent_ents[ind] );
06265     }
06266 
06267     //===========================================
06268     // Need to get procs each entity is sent to
06269     //===========================================
06270 
06271     // Get the total # of proc/handle pairs
06272     int npairs = 0;
06273     for( ind = 0; ind < buffProcs.size(); ind++ )
06274         npairs += sent_ents[ind].size();
06275 
06276     // Allocate a TupleList of that size
06277     entprocs.initialize( 1, 0, 1, 0, npairs );
06278     entprocs.enableWriteAccess();
06279 
06280     // Put the proc/handle pairs in the list
06281     for( ind = 0, proc_it = buffProcs.begin(); proc_it != buffProcs.end(); ++proc_it, ind++ )
06282     {
06283         for( Range::iterator rit = sent_ents[ind].begin(); rit != sent_ents[ind].end(); ++rit )
06284         {
06285             entprocs.vi_wr[entprocs.get_n()]  = *proc_it;
06286             entprocs.vul_wr[entprocs.get_n()] = *rit;
06287             entprocs.inc_n();
06288         }
06289     }
06290     // Sort by handle
06291     moab::TupleList::buffer sort_buffer;
06292     sort_buffer.buffer_init( npairs );
06293     entprocs.sort( 1, &sort_buffer );
06294 
06295     entprocs.disableWriteAccess();
06296     sort_buffer.reset();
06297 
06298     return MB_SUCCESS;
06299 }
06300 
06301 ErrorCode ParallelComm::exchange_ghost_cells( ParallelComm** pcs, unsigned int num_procs, int ghost_dim, int bridge_dim,
06302                                               int num_layers, int addl_ents, bool store_remote_handles,
06303                                               EntityHandle* file_sets )
06304 {
06305     // Static version of function, exchanging info through buffers rather
06306     // than through messages
06307 
06308     // If we're only finding out about existing ents, we have to be storing
06309     // remote handles too
06310     assert( num_layers > 0 || store_remote_handles );
06311 
06312     const bool is_iface = !num_layers;
06313 
06314     unsigned int ind;
06315     ParallelComm* pc;
06316     ErrorCode result = MB_SUCCESS;
06317 
06318     std::vector< Error* > ehs( num_procs );
06319     for( unsigned int i = 0; i < num_procs; i++ )
06320     {
06321         result = pcs[i]->get_moab()->query_interface( ehs[i] );
06322         assert( MB_SUCCESS == result );
06323     }
06324 
06325     // When this function is called, buffProcs should already have any
06326     // communicating procs
06327 
06328     //===========================================
06329     // Get entities to be sent to neighbors
06330     //===========================================
06331 
06332     // Done in a separate loop over procs because sometimes later procs
06333     // need to add info to earlier procs' messages
06334     Range sent_ents[MAX_SHARING_PROCS][MAX_SHARING_PROCS], allsent[MAX_SHARING_PROCS];
06335 
06336     //===========================================
06337     // Get entities to be sent to neighbors
06338     //===========================================
06339     TupleList entprocs[MAX_SHARING_PROCS];
06340     for( unsigned int p = 0; p < num_procs; p++ )
06341     {
06342         pc     = pcs[p];
06343         result = pc->get_sent_ents( is_iface, bridge_dim, ghost_dim, num_layers, addl_ents, sent_ents[p], allsent[p],
06344                                     entprocs[p] );MB_CHK_SET_ERR( result, "p = " << p << ", get_sent_ents failed" );
06345 
06346         //===========================================
06347         // Pack entities into buffers
06348         //===========================================
06349         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06350         {
06351             // Entities
06352             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06353             result = pc->pack_entities( sent_ents[p][ind], pc->localOwnedBuffs[ind], store_remote_handles,
06354                                         pc->buffProcs[ind], is_iface, &entprocs[p], &allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", packing entities failed" );
06355         }
06356 
06357         entprocs[p].reset();
06358     }
06359 
06360     //===========================================
06361     // Receive/unpack new entities
06362     //===========================================
06363     // Number of incoming messages for ghosts is the number of procs we
06364     // communicate with; for iface, it's the number of those with lower rank
06365     std::vector< std::vector< EntityHandle > > L1hloc[MAX_SHARING_PROCS], L1hrem[MAX_SHARING_PROCS];
06366     std::vector< std::vector< int > > L1p[MAX_SHARING_PROCS];
06367     std::vector< EntityHandle > L2hloc[MAX_SHARING_PROCS], L2hrem[MAX_SHARING_PROCS];
06368     std::vector< unsigned int > L2p[MAX_SHARING_PROCS];
06369     std::vector< EntityHandle > new_ents[MAX_SHARING_PROCS];
06370 
06371     for( unsigned int p = 0; p < num_procs; p++ )
06372     {
06373         L1hloc[p].resize( pcs[p]->buffProcs.size() );
06374         L1hrem[p].resize( pcs[p]->buffProcs.size() );
06375         L1p[p].resize( pcs[p]->buffProcs.size() );
06376     }
06377 
06378     for( unsigned int p = 0; p < num_procs; p++ )
06379     {
06380         pc = pcs[p];
06381 
06382         for( ind = 0; ind < pc->buffProcs.size(); ind++ )
06383         {
06384             // Incoming ghost entities; unpack; returns entities received
06385             // both from sending proc and from owning proc (which may be different)
06386 
06387             // Buffer could be empty, which means there isn't any message to
06388             // unpack (due to this comm proc getting added as a result of indirect
06389             // communication); just skip this unpack
06390             if( pc->localOwnedBuffs[ind]->get_stored_size() == 0 ) continue;
06391 
06392             unsigned int to_p = pc->buffProcs[ind];
06393             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06394             result = pcs[to_p]->unpack_entities( pc->localOwnedBuffs[ind]->buff_ptr, store_remote_handles, ind,
06395                                                  is_iface, L1hloc[to_p], L1hrem[to_p], L1p[to_p], L2hloc[to_p],
06396                                                  L2hrem[to_p], L2p[to_p], new_ents[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack entities" );
06397         }
06398     }
06399 
06400     if( is_iface )
06401     {
06402         // Need to check over entities I sent and make sure I received
06403         // handles for them from all expected procs; if not, need to clean
06404         // them up
06405         for( unsigned int p = 0; p < num_procs; p++ )
06406         {
06407             result = pcs[p]->check_clean_iface( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06408         }
06409 
06410 #ifndef NDEBUG
06411         for( unsigned int p = 0; p < num_procs; p++ )
06412         {
06413             result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06414         }
06415         result = check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06416 #endif
06417         return MB_SUCCESS;
06418     }
06419 
06420     //===========================================
06421     // Send local handles for new ghosts to owner, then add
06422     // those to ghost list for that owner
06423     //===========================================
06424     std::vector< unsigned int >::iterator proc_it;
06425     for( unsigned int p = 0; p < num_procs; p++ )
06426     {
06427         pc = pcs[p];
06428 
06429         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06430         {
06431             // Skip if iface layer and higher-rank proc
06432             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06433             result = pc->pack_remote_handles( L1hloc[p][ind], L1hrem[p][ind], L1p[p][ind], *proc_it,
06434                                               pc->localOwnedBuffs[ind] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to pack remote handles" );
06435         }
06436     }
06437 
06438     //===========================================
06439     // Process remote handles of my ghosteds
06440     //===========================================
06441     for( unsigned int p = 0; p < num_procs; p++ )
06442     {
06443         pc = pcs[p];
06444 
06445         for( ind = 0, proc_it = pc->buffProcs.begin(); proc_it != pc->buffProcs.end(); ++proc_it, ind++ )
06446         {
06447             // Incoming remote handles
06448             unsigned int to_p = pc->buffProcs[ind];
06449             pc->localOwnedBuffs[ind]->reset_ptr( sizeof( int ) );
06450             result = pcs[to_p]->unpack_remote_handles( p, pc->localOwnedBuffs[ind]->buff_ptr, L2hloc[to_p],
06451                                                        L2hrem[to_p], L2p[to_p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to unpack remote handles" );
06452         }
06453     }
06454 
06455 #ifndef NDEBUG
06456     for( unsigned int p = 0; p < num_procs; p++ )
06457     {
06458         result = pcs[p]->check_sent_ents( allsent[p] );MB_CHK_SET_ERR( result, "p = " << p << ", failed to check on shared entities" );
06459     }
06460 
06461     result = ParallelComm::check_all_shared_handles( pcs, num_procs );MB_CHK_SET_ERR( result, "Failed to check on all shared handles" );
06462 #endif
06463 
06464     if( file_sets )
06465     {
06466         for( unsigned int p = 0; p < num_procs; p++ )
06467         {
06468             if( new_ents[p].empty() ) continue;
06469             result = pcs[p]->get_moab()->add_entities( file_sets[p], &new_ents[p][0], new_ents[p].size() );MB_CHK_SET_ERR( result, "p = " << p << ", failed to add new entities to set" );
06470         }
06471     }
06472 
06473     return MB_SUCCESS;
06474 }
06475 
06476 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& exchange_procs )
06477 {
06478     // Set buffers
06479     int n_proc = exchange_procs.size();
06480     for( int i = 0; i < n_proc; i++ )
06481         get_buffers( exchange_procs[i] );
06482     reset_all_buffers();
06483 
06484     // Post ghost irecv's for entities from all communicating procs
06485     // Index requests the same as buffer/sharing procs indices
06486     int success;
06487     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06488     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06489     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06490 
06491     int incoming = 0;
06492     for( int i = 0; i < n_proc; i++ )
06493     {
06494         int ind = get_buffers( exchange_procs[i] );
06495         incoming++;
06496         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06497                            MB_MESG_ENTS_SIZE, incoming );
06498         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06499                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06500         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
06501     }
06502 
06503     return MB_SUCCESS;
06504 }
06505 
06506 ErrorCode ParallelComm::post_irecv( std::vector< unsigned int >& shared_procs, std::set< unsigned int >& recv_procs )
06507 {
06508     // Set buffers
06509     int num = shared_procs.size();
06510     for( int i = 0; i < num; i++ )
06511         get_buffers( shared_procs[i] );
06512     reset_all_buffers();
06513     num = remoteOwnedBuffs.size();
06514     for( int i = 0; i < num; i++ )
06515         remoteOwnedBuffs[i]->set_stored_size();
06516     num = localOwnedBuffs.size();
06517     for( int i = 0; i < num; i++ )
06518         localOwnedBuffs[i]->set_stored_size();
06519 
06520     // Post ghost irecv's for entities from all communicating procs
06521     // Index requests the same as buffer/sharing procs indices
06522     int success;
06523     recvReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06524     recvRemotehReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06525     sendReqs.resize( 2 * buffProcs.size(), MPI_REQUEST_NULL );
06526 
06527     int incoming                           = 0;
06528     std::set< unsigned int >::iterator it  = recv_procs.begin();
06529     std::set< unsigned int >::iterator eit = recv_procs.end();
06530     for( ; it != eit; ++it )
06531     {
06532         int ind = get_buffers( *it );
06533         incoming++;
06534         PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE,
06535                            MB_MESG_ENTS_SIZE, incoming );
06536         success = MPI_Irecv( remoteOwnedBuffs[ind]->mem_ptr, INITIAL_BUFF_SIZE, MPI_UNSIGNED_CHAR, buffProcs[ind],
06537                              MB_MESG_ENTS_SIZE, procConfig.proc_comm(), &recvReqs[2 * ind] );
06538         if( success != MPI_SUCCESS ) { MB_SET_ERR( MB_FAILURE, "Failed to post irecv in owned entity exchange" ); }
06539     }
06540 
06541     return MB_SUCCESS;
06542 }
06543 
06544 ErrorCode ParallelComm::exchange_owned_meshs( std::vector< unsigned int >& exchange_procs,
06545                                               std::vector< Range* >& exchange_ents,
06546                                               std::vector< MPI_Request >& recv_ent_reqs,
06547                                               std::vector< MPI_Request >& recv_remoteh_reqs, bool store_remote_handles,
06548                                               bool wait_all, bool migrate, int dim )
06549 {
06550     // Filter out entities already shared with destination
06551     // Exchange twice for entities and sets
06552     ErrorCode result;
06553     std::vector< unsigned int > exchange_procs_sets;
06554     std::vector< Range* > exchange_sets;
06555     int n_proc = exchange_procs.size();
06556     for( int i = 0; i < n_proc; i++ )
06557     {
06558         Range set_range   = exchange_ents[i]->subset_by_type( MBENTITYSET );
06559         *exchange_ents[i] = subtract( *exchange_ents[i], set_range );
06560         Range* tmp_range  = new Range( set_range );
06561         exchange_sets.push_back( tmp_range );
06562         exchange_procs_sets.push_back( exchange_procs[i] );
06563     }
06564 
06565     if( dim == 2 )
06566     {
06567         // Exchange entities first
06568         result = exchange_owned_mesh( exchange_procs, exchange_ents, recvReqs, recvRemotehReqs, true,
06569                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06570 
06571         // Exchange sets
06572         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recvReqs, recvRemotehReqs, false,
06573                                       store_remote_handles, wait_all, migrate );
06574     }
06575     else
06576     {
06577         // Exchange entities first
06578         result = exchange_owned_mesh( exchange_procs, exchange_ents, recv_ent_reqs, recv_remoteh_reqs, false,
06579                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh entities" );
06580 
06581         // Exchange sets
06582         result = exchange_owned_mesh( exchange_procs_sets, exchange_sets, recv_ent_reqs, recv_remoteh_reqs, false,
06583                                       store_remote_handles, wait_all, migrate );MB_CHK_SET_ERR( result, "Failed to exchange owned mesh sets" );
06584     }
06585 
06586     for( int i = 0; i < n_proc; i++ )
06587         delete exchange_sets[i];
06588 
06589     // Build up the list of shared entities
06590     std::map< std::vector< int >, std::vector< EntityHandle > > proc_nvecs;
06591     int procs[MAX_SHARING_PROCS];
06592     EntityHandle handles[MAX_SHARING_PROCS];
06593     int nprocs;
06594     unsigned char pstat;
06595     for( std::set< EntityHandle >::iterator vit = sharedEnts.begin(); vit != sharedEnts.end(); ++vit )
06596     {
06597         if( mbImpl->dimension_from_handle( *vit ) > 2 ) continue;
06598         result = get_sharing_data( *vit, procs, handles, pstat, nprocs );MB_CHK_SET_ERR( result, "Failed to get sharing data in exchange_owned_meshs" );
06599         std::sort( procs, procs + nprocs );
06600         std::vector< int > tmp_procs( procs, procs + nprocs );
06601         assert( tmp_procs.size() != 2 );
06602         proc_nvecs[tmp_procs].push_back( *vit );
06603     }
06604 
06605     // Create interface sets from shared entities
06606     result = create_interface_sets( proc_nvecs );MB_CHK_SET_ERR( result, "Failed to create interface sets" );
06607 
06608     return MB_SUCCESS;
06609 }
06610 
06611 ErrorCode ParallelComm::exchange_owned_mesh( std::vector< unsigned int >& exchange_procs,
06612                                              std::vector< Range* >& exchange_ents,
06613                                              std::vector< MPI_Request >& recv_ent_reqs,
06614                                              std::vector< MPI_Request >& recv_remoteh_reqs, const bool recv_posted,
06615                                              bool store_remote_handles, bool wait_all, bool migrate )
06616 {
06617 #ifdef MOAB_HAVE_MPE
06618     if( myDebug->get_verbosity() == 2 )
06619     {
06620         MPE_Log_event( OWNED_START, procConfig.proc_rank(), "Starting owned ents exchange." );
06621     }
06622 #endif
06623 
06624     myDebug->tprintf( 1, "Entering exchange_owned_mesh\n" );
06625     if( myDebug->get_verbosity() == 4 )
06626     {
06627         msgs.clear();
06628         msgs.reserve( MAX_SHARING_PROCS );
06629     }
06630     unsigned int i;
06631     int ind, success;
06632     ErrorCode result = MB_SUCCESS;
06633     int incoming1 = 0, incoming2 = 0;
06634 
06635     // Set buffProcs with communicating procs
06636     unsigned int n_proc = exchange_procs.size();
06637     for( i = 0; i < n_proc; i++ )
06638     {
06639         ind    = get_buffers( exchange_procs[i] );
06640         result = add_verts( *exchange_ents[i] );MB_CHK_SET_ERR( result, "Failed to add verts" );
06641 
06642         // Filter out entities already shared with destination
06643         Range tmp_range;
06644         result = filter_pstatus( *exchange_ents[i], PSTATUS_SHARED, PSTATUS_AND, buffProcs[ind], &tmp_range );MB_CHK_SET_ERR( result, "Failed to filter on owner" );
06645         if( !tmp_range.empty() ) { *exchange_ents[i] = subtract( *exchange_ents[i], tmp_range ); }
06646     }
06647 
06648     //===========================================
06649     // Post ghost irecv's for entities from all communicating procs
06650     //===========================================
06651 #ifdef MOAB_HAVE_MPE
06652     if( myDebug->get_verbosity() == 2 )
06653     {
06654         MPE_Log_event( ENTITIES_START, procConfig.proc_rank(), "Starting entity exchange." );
06655     }
06656 #endif
06657 
06658     // Index reqs the same as buffer/sharing procs indices
06659     if( !recv_posted )
06660     {
06661         reset_all_buffers();
06662         recv_ent_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06663         recv_remoteh_reqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06664         sendReqs.resize( 3 * buffProcs.size(), MPI_REQUEST_NULL );
06665 
06666         for( i = 0; i < n_proc; i++ )
06667         {
06668             ind = get_buffers( exchange_procs[i] );
06669             incoming1++;
06670             PRINT_DEBUG_IRECV( procConfig.proc_rank(), buffProcs[ind], remoteOwnedBuffs[ind]->mem_ptr,
06671